virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH V4 0/2] tcm_vhost hotplug
@ 2013-03-22  5:39 Asias He
  2013-03-22  5:39 ` [PATCH V4 1/2] tcm_vhost: Introduce tcm_vhost_check_feature() Asias He
                   ` (4 more replies)
  0 siblings, 5 replies; 15+ messages in thread
From: Asias He @ 2013-03-22  5:39 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

Asias He (2):
  tcm_vhost: Introduce tcm_vhost_check_feature()
  tcm_vhost: Add hotplug/hotunplug support

 drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
 drivers/vhost/tcm_vhost.h |  10 +++
 2 files changed, 229 insertions(+), 5 deletions(-)

-- 
1.8.1.4

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH V4 1/2] tcm_vhost: Introduce tcm_vhost_check_feature()
  2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
@ 2013-03-22  5:39 ` Asias He
  2013-03-22  5:39 ` [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support Asias He
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 15+ messages in thread
From: Asias He @ 2013-03-22  5:39 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

This helper is useful to check if a feature is supported.

Signed-off-by: Asias He <asias@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 12 ++++++++++++
 1 file changed, 12 insertions(+)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index 0524267..d81e3a9 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -90,6 +90,18 @@ static int iov_num_pages(struct iovec *iov)
 	       ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
+static bool tcm_vhost_check_feature(struct vhost_scsi *vs, int feature)
+{
+	bool ret = false;
+
+	mutex_lock(&vs->dev.mutex);
+	if (vhost_has_feature(&vs->dev, feature))
+		ret = true;
+	mutex_unlock(&vs->dev.mutex);
+
+	return ret;
+}
+
 static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
 {
 	bool ret = false;
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
  2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
  2013-03-22  5:39 ` [PATCH V4 1/2] tcm_vhost: Introduce tcm_vhost_check_feature() Asias He
@ 2013-03-22  5:39 ` Asias He
  2013-03-24 15:20   ` Michael S. Tsirkin
       [not found]   ` <20130324152009.GB8657@redhat.com>
  2013-03-24 15:20 ` [PATCH V4 0/2] tcm_vhost hotplug Michael S. Tsirkin
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 15+ messages in thread
From: Asias He @ 2013-03-22  5:39 UTC (permalink / raw)
  To: Nicholas Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, target-devel,
	Stefan Hajnoczi, Paolo Bonzini

In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
virtio-scsi), hotplug support is added to virtio-scsi.

This patch adds hotplug and hotunplug support to tcm_vhost.

You can create or delete a LUN in targetcli to hotplug or hotunplug a
LUN in guest.

Changes in v4:
- Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
- Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick

Changes in v3:
- Separate the bug fix to another thread

Changes in v2:
- Remove code duplication in tcm_vhost_{hotplug,hotunplug}
- Fix racing of vs_events_nr
- Add flush fix patch to this series

Signed-off-by: Asias He <asias@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
 drivers/vhost/tcm_vhost.h |  10 +++
 2 files changed, 217 insertions(+), 5 deletions(-)

diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
index d81e3a9..e734ead 100644
--- a/drivers/vhost/tcm_vhost.c
+++ b/drivers/vhost/tcm_vhost.c
@@ -62,6 +62,9 @@ enum {
 
 #define VHOST_SCSI_MAX_TARGET	256
 #define VHOST_SCSI_MAX_VQ	128
+#define VHOST_SCSI_MAX_EVENT	128
+
+#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
 
 struct vhost_scsi {
 	/* Protected by vhost_scsi->dev.mutex */
@@ -73,6 +76,12 @@ struct vhost_scsi {
 
 	struct vhost_work vs_completion_work; /* cmd completion work item */
 	struct llist_head vs_completion_list; /* cmd completion queue */
+
+	struct vhost_work vs_event_work; /* evt injection work item */
+	struct llist_head vs_event_list; /* evt injection queue */
+
+	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
+	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
@@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
 	return ret;
 }
 
+static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
+{
+	bool ret;
+
+	mutex_lock(&vs->dev.mutex);
+	ret = vs->vs_events_dropped;
+	mutex_unlock(&vs->dev.mutex);
+
+	return ret;
+}
 static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
 {
 	return 1;
@@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
 	return 0;
 }
 
+static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+{
+	mutex_lock(&vs->dev.mutex);
+	vs->vs_events_nr--;
+	kfree(evt);
+	mutex_unlock(&vs->dev.mutex);
+}
+
+static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+	u32 event, u32 reason)
+{
+	struct tcm_vhost_evt *evt;
+
+	mutex_lock(&vs->dev.mutex);
+	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
+		mutex_unlock(&vs->dev.mutex);
+		return NULL;
+	}
+
+	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+	if (evt) {
+		evt->event.event = event;
+		evt->event.reason = reason;
+		vs->vs_events_nr++;
+	}
+	mutex_unlock(&vs->dev.mutex);
+
+	return evt;
+}
+
 static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 {
 	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
@@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
 	kfree(tv_cmd);
 }
 
+static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
+	struct virtio_scsi_event *event)
+{
+	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
+	struct virtio_scsi_event __user *eventp;
+	unsigned out, in;
+	int head, ret;
+
+	if (!tcm_vhost_check_endpoint(vq))
+		return;
+
+	mutex_lock(&vq->mutex);
+again:
+	vhost_disable_notify(&vs->dev, vq);
+	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
+			ARRAY_SIZE(vq->iov), &out, &in,
+			NULL, NULL);
+	if (head < 0) {
+		mutex_lock(&vs->dev.mutex);
+		vs->vs_events_dropped = true;
+		mutex_unlock(&vs->dev.mutex);
+		goto out;
+	}
+	if (head == vq->num) {
+		if (vhost_enable_notify(&vs->dev, vq))
+			goto again;
+		mutex_lock(&vs->dev.mutex);
+		vs->vs_events_dropped = true;
+		mutex_unlock(&vs->dev.mutex);
+		goto out;
+	}
+
+	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
+		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
+				vq->iov[out].iov_len);
+		goto out;
+	}
+
+	mutex_lock(&vs->dev.mutex);
+	if (vs->vs_events_dropped) {
+		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+		vs->vs_events_dropped = false;
+	}
+	mutex_unlock(&vs->dev.mutex);
+
+	eventp = vq->iov[out].iov_base;
+	ret = __copy_to_user(eventp, event, sizeof(*event));
+	if (!ret)
+		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
+	else
+		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+out:
+	mutex_unlock(&vq->mutex);
+}
+
+static void tcm_vhost_evt_work(struct vhost_work *work)
+{
+	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
+					vs_event_work);
+	struct tcm_vhost_evt *evt;
+	struct llist_node *llnode;
+
+	llnode = llist_del_all(&vs->vs_event_list);
+	while (llnode) {
+		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+		llnode = llist_next(llnode);
+		tcm_vhost_do_evt_work(vs, &evt->event);
+		tcm_vhost_free_evt(vs, evt);
+	}
+}
+
 /* Fill in status and signal that we are done processing this command
  *
  * This is scheduled in the vhost work queue so we are called with the owner
@@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 	pr_debug("%s: The handling func for control queue.\n", __func__);
 }
 
+static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, u32 event, u32 reason)
+{
+	struct tcm_vhost_evt *evt;
+
+	evt = tcm_vhost_allocate_evt(vs, event, reason);
+	if (!evt) {
+		mutex_lock(&vs->dev.mutex);
+		vs->vs_events_dropped = true;
+		mutex_unlock(&vs->dev.mutex);
+		return -ENOMEM;
+	}
+
+	if (tpg && lun) {
+		/* TODO: share lun setup code with virtio-scsi.ko */
+		evt->event.lun[0] = 0x01;
+		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+		if (lun->unpacked_lun >= 256)
+			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
+		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
+	}
+
+	llist_add(&evt->list, &vs->vs_event_list);
+	vhost_work_queue(&vs->dev, &vs->vs_event_work);
+
+	return 0;
+}
+
 static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
 {
-	pr_debug("%s: The handling func for event queue.\n", __func__);
+	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+						poll.work);
+	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
+	if (!tcm_vhost_check_endpoint(vq))
+		return;
+
+	if (tcm_vhost_check_events_dropped(vs))
+		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+
 }
 
 static void vhost_scsi_handle_kick(struct vhost_work *work)
@@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
 				return -EEXIST;
 			}
 			tv_tpg->tv_tpg_vhost_count++;
+			tv_tpg->vhost_scsi = vs;
 			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
 			smp_mb__after_atomic_inc();
 			match = true;
@@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
 			goto err_tpg;
 		}
 		tv_tpg->tv_tpg_vhost_count--;
+		tv_tpg->vhost_scsi = NULL;
 		vs->vs_tpg[target] = NULL;
 		match = true;
 		mutex_unlock(&tv_tpg->tv_tpg_mutex);
@@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
 		return -ENOMEM;
 
 	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
+	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
+
+	s->vs_events_nr = 0;
 
 	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
 	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
@@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
 		vhost_scsi_flush_vq(vs, i);
 	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+	vhost_work_flush(&vs->dev, &vs->vs_event_work);
 }
 
 static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
 {
-	if (features & ~VHOST_FEATURES)
+	if (features & ~VHOST_SCSI_FEATURES)
 		return -EOPNOTSUPP;
 
 	mutex_lock(&vs->dev.mutex);
@@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
 			return -EFAULT;
 		return 0;
 	case VHOST_GET_FEATURES:
-		features = VHOST_FEATURES;
+		features = VHOST_SCSI_FEATURES;
 		if (copy_to_user(featurep, &features, sizeof features))
 			return -EFAULT;
 		return 0;
@@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
 	return "Unknown";
 }
 
+static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+	struct se_lun *lun, bool plug)
+{
+
+	struct vhost_scsi *vs = tpg->vhost_scsi;
+	u32 reason;
+
+	mutex_lock(&tpg->tv_tpg_mutex);
+	vs = tpg->vhost_scsi;
+	mutex_unlock(&tpg->tv_tpg_mutex);
+	if (!vs)
+		return -EOPNOTSUPP;
+
+	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
+		return -EOPNOTSUPP;
+
+	if (plug)
+		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
+	else
+		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
+
+	return tcm_vhost_send_evt(vs, tpg, lun,
+			VIRTIO_SCSI_T_TRANSPORT_RESET,
+			reason);
+}
+
+static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	return tcm_vhost_do_plug(tpg, lun, true);
+}
+
+static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+{
+	return tcm_vhost_do_plug(tpg, lun, false);
+}
+
 static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 	struct se_lun *lun)
 {
@@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
 	tv_tpg->tv_tpg_port_count++;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
 
+	tcm_vhost_hotplug(tv_tpg, lun);
+
 	return 0;
 }
 
 static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
-	struct se_lun *se_lun)
+	struct se_lun *lun)
 {
 	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
 				struct tcm_vhost_tpg, se_tpg);
-
 	mutex_lock(&tv_tpg->tv_tpg_mutex);
 	tv_tpg->tv_tpg_port_count--;
 	mutex_unlock(&tv_tpg->tv_tpg_mutex);
+
+	tcm_vhost_hotunplug(tv_tpg, lun);
 }
 
 static struct se_node_acl *tcm_vhost_make_nodeacl(
diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
index 1d2ae7a..94e9ee53 100644
--- a/drivers/vhost/tcm_vhost.h
+++ b/drivers/vhost/tcm_vhost.h
@@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
 	struct se_node_acl se_node_acl;
 };
 
+struct vhost_scsi;
 struct tcm_vhost_tpg {
 	/* Vhost port target portal group tag for TCM */
 	u16 tport_tpgt;
@@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
 	struct tcm_vhost_tport *tport;
 	/* Returned by tcm_vhost_make_tpg() */
 	struct se_portal_group se_tpg;
+	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
+	struct vhost_scsi *vhost_scsi;
 };
 
 struct tcm_vhost_tport {
@@ -83,6 +86,13 @@ struct tcm_vhost_tport {
 	struct se_wwn tport_wwn;
 };
 
+struct tcm_vhost_evt {
+	/* virtio_scsi event */
+	struct virtio_scsi_event event;
+	/* virtio_scsi event list, serviced from vhost worker thread */
+	struct llist_node list;
+};
+
 /*
  * As per request from MST, keep TCM_VHOST related ioctl defines out of
  * linux/vhost.h (user-space) for now..
-- 
1.8.1.4

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
  2013-03-22  5:39 ` [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support Asias He
@ 2013-03-24 15:20   ` Michael S. Tsirkin
       [not found]   ` <20130324152009.GB8657@redhat.com>
  1 sibling, 0 replies; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-03-24 15:20 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> virtio-scsi), hotplug support is added to virtio-scsi.
> 
> This patch adds hotplug and hotunplug support to tcm_vhost.
> 
> You can create or delete a LUN in targetcli to hotplug or hotunplug a
> LUN in guest.
> 
> Changes in v4:
> - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> 
> Changes in v3:
> - Separate the bug fix to another thread
> 
> Changes in v2:
> - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> - Fix racing of vs_events_nr
> - Add flush fix patch to this series
> 
> Signed-off-by: Asias He <asias@redhat.com>
> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> ---
>  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 217 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> index d81e3a9..e734ead 100644
> --- a/drivers/vhost/tcm_vhost.c
> +++ b/drivers/vhost/tcm_vhost.c
> @@ -62,6 +62,9 @@ enum {
>  
>  #define VHOST_SCSI_MAX_TARGET	256
>  #define VHOST_SCSI_MAX_VQ	128
> +#define VHOST_SCSI_MAX_EVENT	128
> +
> +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
>  
>  struct vhost_scsi {
>  	/* Protected by vhost_scsi->dev.mutex */
> @@ -73,6 +76,12 @@ struct vhost_scsi {
>  
>  	struct vhost_work vs_completion_work; /* cmd completion work item */
>  	struct llist_head vs_completion_list; /* cmd completion queue */
> +
> +	struct vhost_work vs_event_work; /* evt injection work item */
> +	struct llist_head vs_event_list; /* evt injection queue */
> +
> +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */

Woa u64. We allow up to 128 of these. int will do.

>  };
>  
>  /* Local pointer to allocated TCM configfs fabric module */
> @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
>  	return ret;
>  }
>  
> +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> +{
> +	bool ret;
> +
> +	mutex_lock(&vs->dev.mutex);
> +	ret = vs->vs_events_dropped;
> +	mutex_unlock(&vs->dev.mutex);
> +
> +	return ret;
> +}
>  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
>  {
>  	return 1;
> @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
>  	return 0;
>  }
>  
> +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> +{
> +	mutex_lock(&vs->dev.mutex);
> +	vs->vs_events_nr--;
> +	kfree(evt);
> +	mutex_unlock(&vs->dev.mutex);
> +}
> +
> +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> +	u32 event, u32 reason)
> +{
> +	struct tcm_vhost_evt *evt;
> +
> +	mutex_lock(&vs->dev.mutex);
> +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {


Se eventfd dropped flag here and stop worrying about it in callers?

> +		mutex_unlock(&vs->dev.mutex);
> +		return NULL;
> +	}
> +
> +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> +	if (evt) {
> +		evt->event.event = event;
> +		evt->event.reason = reason;
> +		vs->vs_events_nr++;
> +	}
> +	mutex_unlock(&vs->dev.mutex);
> +
> +	return evt;
> +}
> +
>  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
>  {
>  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
>  	kfree(tv_cmd);
>  }
>  
> +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> +	struct virtio_scsi_event *event)
> +{
> +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> +	struct virtio_scsi_event __user *eventp;
> +	unsigned out, in;
> +	int head, ret;
> +
> +	if (!tcm_vhost_check_endpoint(vq))
> +		return;
> +
> +	mutex_lock(&vq->mutex);
> +again:
> +	vhost_disable_notify(&vs->dev, vq);
> +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> +			ARRAY_SIZE(vq->iov), &out, &in,
> +			NULL, NULL);
> +	if (head < 0) {
> +		mutex_lock(&vs->dev.mutex);

This nests dev mutex within vq mutex, can cause deadlock.
Should be the other way around.

> +		vs->vs_events_dropped = true;
> +		mutex_unlock(&vs->dev.mutex);
> +		goto out;
> +	}
> +	if (head == vq->num) {
> +		if (vhost_enable_notify(&vs->dev, vq))
> +			goto again;
> +		mutex_lock(&vs->dev.mutex);
> +		vs->vs_events_dropped = true;
> +		mutex_unlock(&vs->dev.mutex);
> +		goto out;
> +	}
> +
> +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> +				vq->iov[out].iov_len);
> +		goto out;
> +	}
> +
> +	mutex_lock(&vs->dev.mutex);
> +	if (vs->vs_events_dropped) {
> +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> +		vs->vs_events_dropped = false;
> +	}
> +	mutex_unlock(&vs->dev.mutex);
> +
> +	eventp = vq->iov[out].iov_base;
> +	ret = __copy_to_user(eventp, event, sizeof(*event));

This assumes specific framing, please don't do this.

> +	if (!ret)
> +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> +	else
> +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> +out:
> +	mutex_unlock(&vq->mutex);
> +}
> +
> +static void tcm_vhost_evt_work(struct vhost_work *work)
> +{
> +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> +					vs_event_work);
> +	struct tcm_vhost_evt *evt;
> +	struct llist_node *llnode;
> +
> +	llnode = llist_del_all(&vs->vs_event_list);
> +	while (llnode) {
> +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> +		llnode = llist_next(llnode);
> +		tcm_vhost_do_evt_work(vs, &evt->event);
> +		tcm_vhost_free_evt(vs, evt);
> +	}
> +}
> +
>  /* Fill in status and signal that we are done processing this command
>   *
>   * This is scheduled in the vhost work queue so we are called with the owner
> @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
>  	pr_debug("%s: The handling func for control queue.\n", __func__);
>  }
>  
> +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> +	struct se_lun *lun, u32 event, u32 reason)
> +{
> +	struct tcm_vhost_evt *evt;
> +
> +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> +	if (!evt) {
> +		mutex_lock(&vs->dev.mutex);
> +		vs->vs_events_dropped = true;
> +		mutex_unlock(&vs->dev.mutex);
> +		return -ENOMEM;
> +	}
> +
> +	if (tpg && lun) {
> +		/* TODO: share lun setup code with virtio-scsi.ko */
> +		evt->event.lun[0] = 0x01;
> +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> +		if (lun->unpacked_lun >= 256)
> +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> +	}
> +
> +	llist_add(&evt->list, &vs->vs_event_list);
> +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> +
> +	return 0;
> +}
> +
>  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
>  {
> -	pr_debug("%s: The handling func for event queue.\n", __func__);
> +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> +						poll.work);
> +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> +
> +	if (!tcm_vhost_check_endpoint(vq))
> +		return;
> +
> +	if (tcm_vhost_check_events_dropped(vs))
> +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> +
>  }
>  
>  static void vhost_scsi_handle_kick(struct vhost_work *work)
> @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
>  				return -EEXIST;
>  			}
>  			tv_tpg->tv_tpg_vhost_count++;
> +			tv_tpg->vhost_scsi = vs;
>  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
>  			smp_mb__after_atomic_inc();
>  			match = true;
> @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
>  			goto err_tpg;
>  		}
>  		tv_tpg->tv_tpg_vhost_count--;
> +		tv_tpg->vhost_scsi = NULL;
>  		vs->vs_tpg[target] = NULL;
>  		match = true;
>  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
>  		return -ENOMEM;
>  
>  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> +
> +	s->vs_events_nr = 0;
>  
>  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
>  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
>  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
>  		vhost_scsi_flush_vq(vs, i);
>  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
>  }
>  
>  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
>  {
> -	if (features & ~VHOST_FEATURES)
> +	if (features & ~VHOST_SCSI_FEATURES)
>  		return -EOPNOTSUPP;
>  
>  	mutex_lock(&vs->dev.mutex);
> @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
>  			return -EFAULT;
>  		return 0;
>  	case VHOST_GET_FEATURES:
> -		features = VHOST_FEATURES;
> +		features = VHOST_SCSI_FEATURES;
>  		if (copy_to_user(featurep, &features, sizeof features))
>  			return -EFAULT;
>  		return 0;
> @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
>  	return "Unknown";
>  }
>  
> +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> +	struct se_lun *lun, bool plug)
> +{
> +
> +	struct vhost_scsi *vs = tpg->vhost_scsi;
> +	u32 reason;
> +
> +	mutex_lock(&tpg->tv_tpg_mutex);
> +	vs = tpg->vhost_scsi;
> +	mutex_unlock(&tpg->tv_tpg_mutex);
> +	if (!vs)
> +		return -EOPNOTSUPP;
> +
> +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> +		return -EOPNOTSUPP;
> +
> +	if (plug)
> +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> +	else
> +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> +
> +	return tcm_vhost_send_evt(vs, tpg, lun,
> +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> +			reason);
> +}
> +
> +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> +{
> +	return tcm_vhost_do_plug(tpg, lun, true);
> +}
> +
> +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> +{
> +	return tcm_vhost_do_plug(tpg, lun, false);
> +}
> +
>  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
>  	struct se_lun *lun)
>  {
> @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
>  	tv_tpg->tv_tpg_port_count++;
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
>  
> +	tcm_vhost_hotplug(tv_tpg, lun);
> +
>  	return 0;
>  }
>  
>  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> -	struct se_lun *se_lun)
> +	struct se_lun *lun)
>  {
>  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
>  				struct tcm_vhost_tpg, se_tpg);
> -
>  	mutex_lock(&tv_tpg->tv_tpg_mutex);
>  	tv_tpg->tv_tpg_port_count--;
>  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> +
> +	tcm_vhost_hotunplug(tv_tpg, lun);
>  }
>  
>  static struct se_node_acl *tcm_vhost_make_nodeacl(
> diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> index 1d2ae7a..94e9ee53 100644
> --- a/drivers/vhost/tcm_vhost.h
> +++ b/drivers/vhost/tcm_vhost.h
> @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
>  	struct se_node_acl se_node_acl;
>  };
>  
> +struct vhost_scsi;
>  struct tcm_vhost_tpg {
>  	/* Vhost port target portal group tag for TCM */
>  	u16 tport_tpgt;
> @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
>  	struct tcm_vhost_tport *tport;
>  	/* Returned by tcm_vhost_make_tpg() */
>  	struct se_portal_group se_tpg;
> +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> +	struct vhost_scsi *vhost_scsi;
>  };
>  
>  struct tcm_vhost_tport {
> @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
>  	struct se_wwn tport_wwn;
>  };
>  
> +struct tcm_vhost_evt {
> +	/* virtio_scsi event */
> +	struct virtio_scsi_event event;
> +	/* virtio_scsi event list, serviced from vhost worker thread */
> +	struct llist_node list;
> +};
> +
>  /*
>   * As per request from MST, keep TCM_VHOST related ioctl defines out of
>   * linux/vhost.h (user-space) for now..
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
  2013-03-22  5:39 ` [PATCH V4 1/2] tcm_vhost: Introduce tcm_vhost_check_feature() Asias He
  2013-03-22  5:39 ` [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support Asias He
@ 2013-03-24 15:20 ` Michael S. Tsirkin
  2013-03-25  8:20   ` Asias He
  2013-04-08 21:31 ` Nicholas A. Bellinger
  2013-04-08 21:36 ` Nicholas A. Bellinger
  4 siblings, 1 reply; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-03-24 15:20 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Fri, Mar 22, 2013 at 01:39:03PM +0800, Asias He wrote:
> Asias He (2):
>   tcm_vhost: Introduce tcm_vhost_check_feature()
>   tcm_vhost: Add hotplug/hotunplug support

So this work should stay out of tree until we have
2 users for vhost-scsi, but I sent some comment to
help you make progress.

>  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 229 insertions(+), 5 deletions(-)
> 
> -- 
> 1.8.1.4

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
       [not found]   ` <20130324152009.GB8657@redhat.com>
@ 2013-03-25  7:48     ` Asias He
       [not found]     ` <20130325074806.GB20435@hj.localdomain>
  1 sibling, 0 replies; 15+ messages in thread
From: Asias He @ 2013-03-25  7:48 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > virtio-scsi), hotplug support is added to virtio-scsi.
> > 
> > This patch adds hotplug and hotunplug support to tcm_vhost.
> > 
> > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > LUN in guest.
> > 
> > Changes in v4:
> > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > 
> > Changes in v3:
> > - Separate the bug fix to another thread
> > 
> > Changes in v2:
> > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > - Fix racing of vs_events_nr
> > - Add flush fix patch to this series
> > 
> > Signed-off-by: Asias He <asias@redhat.com>
> > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> >  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
> >  drivers/vhost/tcm_vhost.h |  10 +++
> >  2 files changed, 217 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > index d81e3a9..e734ead 100644
> > --- a/drivers/vhost/tcm_vhost.c
> > +++ b/drivers/vhost/tcm_vhost.c
> > @@ -62,6 +62,9 @@ enum {
> >  
> >  #define VHOST_SCSI_MAX_TARGET	256
> >  #define VHOST_SCSI_MAX_VQ	128
> > +#define VHOST_SCSI_MAX_EVENT	128
> > +
> > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
> >  
> >  struct vhost_scsi {
> >  	/* Protected by vhost_scsi->dev.mutex */
> > @@ -73,6 +76,12 @@ struct vhost_scsi {
> >  
> >  	struct vhost_work vs_completion_work; /* cmd completion work item */
> >  	struct llist_head vs_completion_list; /* cmd completion queue */
> > +
> > +	struct vhost_work vs_event_work; /* evt injection work item */
> > +	struct llist_head vs_event_list; /* evt injection queue */
> > +
> > +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> 
> Woa u64. We allow up to 128 of these. int will do.

u64 is used before we limit the total number of events, changed to int.

> >  };
> >  
> >  /* Local pointer to allocated TCM configfs fabric module */
> > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
> >  	return ret;
> >  }
> >  
> > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > +{
> > +	bool ret;
> > +
> > +	mutex_lock(&vs->dev.mutex);
> > +	ret = vs->vs_events_dropped;
> > +	mutex_unlock(&vs->dev.mutex);
> > +
> > +	return ret;
> > +}
> >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> >  {
> >  	return 1;
> > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
> >  	return 0;
> >  }
> >  
> > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> > +{
> > +	mutex_lock(&vs->dev.mutex);
> > +	vs->vs_events_nr--;
> > +	kfree(evt);
> > +	mutex_unlock(&vs->dev.mutex);
> > +}
> > +
> > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > +	u32 event, u32 reason)
> > +{
> > +	struct tcm_vhost_evt *evt;
> > +
> > +	mutex_lock(&vs->dev.mutex);
> > +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> 
> 
> Se eventfd dropped flag here and stop worrying about it in callers?

Set the flag here now and do not bother the callers.

> > +		mutex_unlock(&vs->dev.mutex);
> > +		return NULL;
> > +	}
> > +
> > +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > +	if (evt) {
> > +		evt->event.event = event;
> > +		evt->event.reason = reason;
> > +		vs->vs_events_nr++;
> > +	}
> > +	mutex_unlock(&vs->dev.mutex);
> > +
> > +	return evt;
> > +}
> > +
> >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> >  {
> >  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> >  	kfree(tv_cmd);
> >  }
> >  
> > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > +	struct virtio_scsi_event *event)
> > +{
> > +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > +	struct virtio_scsi_event __user *eventp;
> > +	unsigned out, in;
> > +	int head, ret;
> > +
> > +	if (!tcm_vhost_check_endpoint(vq))
> > +		return;
> > +
> > +	mutex_lock(&vq->mutex);
> > +again:
> > +	vhost_disable_notify(&vs->dev, vq);
> > +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > +			ARRAY_SIZE(vq->iov), &out, &in,
> > +			NULL, NULL);
> > +	if (head < 0) {
> > +		mutex_lock(&vs->dev.mutex);
> 
> This nests dev mutex within vq mutex, can cause deadlock.
> Should be the other way around.

Changed the order.

> > +		vs->vs_events_dropped = true;
> > +		mutex_unlock(&vs->dev.mutex);
> > +		goto out;
> > +	}
> > +	if (head == vq->num) {
> > +		if (vhost_enable_notify(&vs->dev, vq))
> > +			goto again;
> > +		mutex_lock(&vs->dev.mutex);
> > +		vs->vs_events_dropped = true;
> > +		mutex_unlock(&vs->dev.mutex);
> > +		goto out;
> > +	}
> > +
> > +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> > +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> > +				vq->iov[out].iov_len);
> > +		goto out;
> > +	}
> > +
> > +	mutex_lock(&vs->dev.mutex);
> > +	if (vs->vs_events_dropped) {
> > +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> > +		vs->vs_events_dropped = false;
> > +	}
> > +	mutex_unlock(&vs->dev.mutex);
> > +
> > +	eventp = vq->iov[out].iov_base;
> > +	ret = __copy_to_user(eventp, event, sizeof(*event));
> 
> This assumes specific framing, please don't do this.

As I mentioned in the previous mail about this issue. Let's do the
conversion to the no framing assumption mode in separate patch which
converts all of them. Specific framing is assumed everywhere currently.

> > +	if (!ret)
> > +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> > +	else
> > +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> > +out:
> > +	mutex_unlock(&vq->mutex);
> > +}
> > +
> > +static void tcm_vhost_evt_work(struct vhost_work *work)
> > +{
> > +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> > +					vs_event_work);
> > +	struct tcm_vhost_evt *evt;
> > +	struct llist_node *llnode;
> > +
> > +	llnode = llist_del_all(&vs->vs_event_list);
> > +	while (llnode) {
> > +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> > +		llnode = llist_next(llnode);
> > +		tcm_vhost_do_evt_work(vs, &evt->event);
> > +		tcm_vhost_free_evt(vs, evt);
> > +	}
> > +}
> > +
> >  /* Fill in status and signal that we are done processing this command
> >   *
> >   * This is scheduled in the vhost work queue so we are called with the owner
> > @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
> >  	pr_debug("%s: The handling func for control queue.\n", __func__);
> >  }
> >  
> > +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> > +	struct se_lun *lun, u32 event, u32 reason)
> > +{
> > +	struct tcm_vhost_evt *evt;
> > +
> > +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> > +	if (!evt) {
> > +		mutex_lock(&vs->dev.mutex);
> > +		vs->vs_events_dropped = true;
> > +		mutex_unlock(&vs->dev.mutex);
> > +		return -ENOMEM;
> > +	}
> > +
> > +	if (tpg && lun) {
> > +		/* TODO: share lun setup code with virtio-scsi.ko */
> > +		evt->event.lun[0] = 0x01;
> > +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> > +		if (lun->unpacked_lun >= 256)
> > +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> > +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> > +	}
> > +
> > +	llist_add(&evt->list, &vs->vs_event_list);
> > +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> > +
> > +	return 0;
> > +}
> > +
> >  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
> >  {
> > -	pr_debug("%s: The handling func for event queue.\n", __func__);
> > +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> > +						poll.work);
> > +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> > +
> > +	if (!tcm_vhost_check_endpoint(vq))
> > +		return;
> > +
> > +	if (tcm_vhost_check_events_dropped(vs))
> > +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> > +
> >  }
> >  
> >  static void vhost_scsi_handle_kick(struct vhost_work *work)
> > @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
> >  				return -EEXIST;
> >  			}
> >  			tv_tpg->tv_tpg_vhost_count++;
> > +			tv_tpg->vhost_scsi = vs;
> >  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> >  			smp_mb__after_atomic_inc();
> >  			match = true;
> > @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
> >  			goto err_tpg;
> >  		}
> >  		tv_tpg->tv_tpg_vhost_count--;
> > +		tv_tpg->vhost_scsi = NULL;
> >  		vs->vs_tpg[target] = NULL;
> >  		match = true;
> >  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
> >  		return -ENOMEM;
> >  
> >  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> > +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> > +
> > +	s->vs_events_nr = 0;
> >  
> >  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
> >  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> > @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> >  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
> >  		vhost_scsi_flush_vq(vs, i);
> >  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> > +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
> >  }
> >  
> >  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
> >  {
> > -	if (features & ~VHOST_FEATURES)
> > +	if (features & ~VHOST_SCSI_FEATURES)
> >  		return -EOPNOTSUPP;
> >  
> >  	mutex_lock(&vs->dev.mutex);
> > @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
> >  			return -EFAULT;
> >  		return 0;
> >  	case VHOST_GET_FEATURES:
> > -		features = VHOST_FEATURES;
> > +		features = VHOST_SCSI_FEATURES;
> >  		if (copy_to_user(featurep, &features, sizeof features))
> >  			return -EFAULT;
> >  		return 0;
> > @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
> >  	return "Unknown";
> >  }
> >  
> > +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> > +	struct se_lun *lun, bool plug)
> > +{
> > +
> > +	struct vhost_scsi *vs = tpg->vhost_scsi;
> > +	u32 reason;
> > +
> > +	mutex_lock(&tpg->tv_tpg_mutex);
> > +	vs = tpg->vhost_scsi;
> > +	mutex_unlock(&tpg->tv_tpg_mutex);
> > +	if (!vs)
> > +		return -EOPNOTSUPP;
> > +
> > +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> > +		return -EOPNOTSUPP;
> > +
> > +	if (plug)
> > +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> > +	else
> > +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> > +
> > +	return tcm_vhost_send_evt(vs, tpg, lun,
> > +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> > +			reason);
> > +}
> > +
> > +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > +{
> > +	return tcm_vhost_do_plug(tpg, lun, true);
> > +}
> > +
> > +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > +{
> > +	return tcm_vhost_do_plug(tpg, lun, false);
> > +}
> > +
> >  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> >  	struct se_lun *lun)
> >  {
> > @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> >  	tv_tpg->tv_tpg_port_count++;
> >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> >  
> > +	tcm_vhost_hotplug(tv_tpg, lun);
> > +
> >  	return 0;
> >  }
> >  
> >  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> > -	struct se_lun *se_lun)
> > +	struct se_lun *lun)
> >  {
> >  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
> >  				struct tcm_vhost_tpg, se_tpg);
> > -
> >  	mutex_lock(&tv_tpg->tv_tpg_mutex);
> >  	tv_tpg->tv_tpg_port_count--;
> >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > +
> > +	tcm_vhost_hotunplug(tv_tpg, lun);
> >  }
> >  
> >  static struct se_node_acl *tcm_vhost_make_nodeacl(
> > diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> > index 1d2ae7a..94e9ee53 100644
> > --- a/drivers/vhost/tcm_vhost.h
> > +++ b/drivers/vhost/tcm_vhost.h
> > @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
> >  	struct se_node_acl se_node_acl;
> >  };
> >  
> > +struct vhost_scsi;
> >  struct tcm_vhost_tpg {
> >  	/* Vhost port target portal group tag for TCM */
> >  	u16 tport_tpgt;
> > @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
> >  	struct tcm_vhost_tport *tport;
> >  	/* Returned by tcm_vhost_make_tpg() */
> >  	struct se_portal_group se_tpg;
> > +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> > +	struct vhost_scsi *vhost_scsi;
> >  };
> >  
> >  struct tcm_vhost_tport {
> > @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
> >  	struct se_wwn tport_wwn;
> >  };
> >  
> > +struct tcm_vhost_evt {
> > +	/* virtio_scsi event */
> > +	struct virtio_scsi_event event;
> > +	/* virtio_scsi event list, serviced from vhost worker thread */
> > +	struct llist_node list;
> > +};
> > +
> >  /*
> >   * As per request from MST, keep TCM_VHOST related ioctl defines out of
> >   * linux/vhost.h (user-space) for now..
> > -- 
> > 1.8.1.4

-- 
Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-03-24 15:20 ` [PATCH V4 0/2] tcm_vhost hotplug Michael S. Tsirkin
@ 2013-03-25  8:20   ` Asias He
  0 siblings, 0 replies; 15+ messages in thread
From: Asias He @ 2013-03-25  8:20 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Sun, Mar 24, 2013 at 05:20:54PM +0200, Michael S. Tsirkin wrote:
> On Fri, Mar 22, 2013 at 01:39:03PM +0800, Asias He wrote:
> > Asias He (2):
> >   tcm_vhost: Introduce tcm_vhost_check_feature()
> >   tcm_vhost: Add hotplug/hotunplug support
> 
> So this work should stay out of tree until we have
> 2 users for vhost-scsi, but I sent some comment to
> help you make progress.

Actually, I do not think the real reason you want to blcok/delay this
is that there is only one user. I believe the real reason is simply that
qemu is not using it. If there were already 2 users other than qemu
(qemu is still not using it), what would you say, you want 3 users of
this? Would you say the same 2 user thing, if a feature is only used by
qemu and other users are not using it?

Also, can you comment on Paolo's comment in the other thread.
"""
Il 19/03/2013 14:45, Michael S. Tsirkin ha scritto:
> On Tue, Mar 19, 2013 at 10:36:42AM +0100, Paolo Bonzini wrote:                                                                       
>> Il 18/03/2013 22:53, Michael S. Tsirkin ha scritto:                                                                                 
>>> Sorry, no, I'd prefer we get userspace support in qemu in first.                                                                   
>>> If there's only a single user for this driver (kvmtool),                                                                           
>>> then it was a mistake to merge it, the right thing would be to
>>> freeze it                                                           
>>> and look at whether we can drop it completely.                                                                                     
>>                                                                                                                                     
>> I'm still not sure why this matters for this patch, since it does not                                                               
>> change the userspace ABI.                                                                                                           
>                                                                                                                                      
> It enables a new feature bit.                                                                                                        

How does that matter if userspace is not supposed to do anything special
with that bit?

Paolo
"""

> >  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
> >  drivers/vhost/tcm_vhost.h |  10 +++
> >  2 files changed, 229 insertions(+), 5 deletions(-)
> > 
> > -- 
> > 1.8.1.4

-- 
Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
       [not found]     ` <20130325074806.GB20435@hj.localdomain>
@ 2013-03-25 11:10       ` Michael S. Tsirkin
  2013-03-26  2:56         ` Asias He
  0 siblings, 1 reply; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-03-25 11:10 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > 
> > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > 
> > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > LUN in guest.
> > > 
> > > Changes in v4:
> > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > 
> > > Changes in v3:
> > > - Separate the bug fix to another thread
> > > 
> > > Changes in v2:
> > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > - Fix racing of vs_events_nr
> > > - Add flush fix patch to this series
> > > 
> > > Signed-off-by: Asias He <asias@redhat.com>
> > > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > ---
> > >  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
> > >  drivers/vhost/tcm_vhost.h |  10 +++
> > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > 
> > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > index d81e3a9..e734ead 100644
> > > --- a/drivers/vhost/tcm_vhost.c
> > > +++ b/drivers/vhost/tcm_vhost.c
> > > @@ -62,6 +62,9 @@ enum {
> > >  
> > >  #define VHOST_SCSI_MAX_TARGET	256
> > >  #define VHOST_SCSI_MAX_VQ	128
> > > +#define VHOST_SCSI_MAX_EVENT	128
> > > +
> > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
> > >  
> > >  struct vhost_scsi {
> > >  	/* Protected by vhost_scsi->dev.mutex */
> > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > >  
> > >  	struct vhost_work vs_completion_work; /* cmd completion work item */
> > >  	struct llist_head vs_completion_list; /* cmd completion queue */
> > > +
> > > +	struct vhost_work vs_event_work; /* evt injection work item */
> > > +	struct llist_head vs_event_list; /* evt injection queue */
> > > +
> > > +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > > +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> > 
> > Woa u64. We allow up to 128 of these. int will do.
> 
> u64 is used before we limit the total number of events, changed to int.
> 
> > >  };
> > >  
> > >  /* Local pointer to allocated TCM configfs fabric module */
> > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
> > >  	return ret;
> > >  }
> > >  
> > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > +{
> > > +	bool ret;
> > > +
> > > +	mutex_lock(&vs->dev.mutex);
> > > +	ret = vs->vs_events_dropped;
> > > +	mutex_unlock(&vs->dev.mutex);
> > > +
> > > +	return ret;
> > > +}
> > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > >  {
> > >  	return 1;
> > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
> > >  	return 0;
> > >  }
> > >  
> > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> > > +{
> > > +	mutex_lock(&vs->dev.mutex);
> > > +	vs->vs_events_nr--;
> > > +	kfree(evt);
> > > +	mutex_unlock(&vs->dev.mutex);
> > > +}
> > > +
> > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > > +	u32 event, u32 reason)
> > > +{
> > > +	struct tcm_vhost_evt *evt;
> > > +
> > > +	mutex_lock(&vs->dev.mutex);
> > > +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > 
> > 
> > Se eventfd dropped flag here and stop worrying about it in callers?
> 
> Set the flag here now and do not bother the callers.
> 
> > > +		mutex_unlock(&vs->dev.mutex);
> > > +		return NULL;
> > > +	}
> > > +
> > > +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > +	if (evt) {
> > > +		evt->event.event = event;
> > > +		evt->event.reason = reason;
> > > +		vs->vs_events_nr++;
> > > +	}
> > > +	mutex_unlock(&vs->dev.mutex);
> > > +
> > > +	return evt;
> > > +}
> > > +
> > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > >  {
> > >  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > >  	kfree(tv_cmd);
> > >  }
> > >  
> > > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > > +	struct virtio_scsi_event *event)
> > > +{
> > > +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > > +	struct virtio_scsi_event __user *eventp;
> > > +	unsigned out, in;
> > > +	int head, ret;
> > > +
> > > +	if (!tcm_vhost_check_endpoint(vq))
> > > +		return;
> > > +
> > > +	mutex_lock(&vq->mutex);
> > > +again:
> > > +	vhost_disable_notify(&vs->dev, vq);
> > > +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > > +			ARRAY_SIZE(vq->iov), &out, &in,
> > > +			NULL, NULL);
> > > +	if (head < 0) {
> > > +		mutex_lock(&vs->dev.mutex);
> > 
> > This nests dev mutex within vq mutex, can cause deadlock.
> > Should be the other way around.
> 
> Changed the order.
> 
> > > +		vs->vs_events_dropped = true;
> > > +		mutex_unlock(&vs->dev.mutex);
> > > +		goto out;
> > > +	}
> > > +	if (head == vq->num) {
> > > +		if (vhost_enable_notify(&vs->dev, vq))
> > > +			goto again;
> > > +		mutex_lock(&vs->dev.mutex);
> > > +		vs->vs_events_dropped = true;
> > > +		mutex_unlock(&vs->dev.mutex);
> > > +		goto out;
> > > +	}
> > > +
> > > +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> > > +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> > > +				vq->iov[out].iov_len);
> > > +		goto out;
> > > +	}
> > > +
> > > +	mutex_lock(&vs->dev.mutex);
> > > +	if (vs->vs_events_dropped) {
> > > +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> > > +		vs->vs_events_dropped = false;
> > > +	}
> > > +	mutex_unlock(&vs->dev.mutex);
> > > +
> > > +	eventp = vq->iov[out].iov_base;
> > > +	ret = __copy_to_user(eventp, event, sizeof(*event));
> > 
> > This assumes specific framing, please don't do this.
> 
> As I mentioned in the previous mail about this issue. Let's do the
> conversion to the no framing assumption mode in separate patch which
> converts all of them. Specific framing is assumed everywhere currently.

Yes this is a bug, let's fix it.  I don't see any reason to add
buggy code, then rework it.

> > > +	if (!ret)
> > > +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> > > +	else
> > > +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> > > +out:
> > > +	mutex_unlock(&vq->mutex);
> > > +}
> > > +
> > > +static void tcm_vhost_evt_work(struct vhost_work *work)
> > > +{
> > > +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> > > +					vs_event_work);
> > > +	struct tcm_vhost_evt *evt;
> > > +	struct llist_node *llnode;
> > > +
> > > +	llnode = llist_del_all(&vs->vs_event_list);
> > > +	while (llnode) {
> > > +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> > > +		llnode = llist_next(llnode);
> > > +		tcm_vhost_do_evt_work(vs, &evt->event);
> > > +		tcm_vhost_free_evt(vs, evt);
> > > +	}
> > > +}
> > > +
> > >  /* Fill in status and signal that we are done processing this command
> > >   *
> > >   * This is scheduled in the vhost work queue so we are called with the owner
> > > @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
> > >  	pr_debug("%s: The handling func for control queue.\n", __func__);
> > >  }
> > >  
> > > +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> > > +	struct se_lun *lun, u32 event, u32 reason)
> > > +{
> > > +	struct tcm_vhost_evt *evt;
> > > +
> > > +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> > > +	if (!evt) {
> > > +		mutex_lock(&vs->dev.mutex);
> > > +		vs->vs_events_dropped = true;
> > > +		mutex_unlock(&vs->dev.mutex);
> > > +		return -ENOMEM;
> > > +	}
> > > +
> > > +	if (tpg && lun) {
> > > +		/* TODO: share lun setup code with virtio-scsi.ko */
> > > +		evt->event.lun[0] = 0x01;
> > > +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> > > +		if (lun->unpacked_lun >= 256)
> > > +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> > > +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> > > +	}
> > > +
> > > +	llist_add(&evt->list, &vs->vs_event_list);
> > > +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> > > +
> > > +	return 0;
> > > +}
> > > +
> > >  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
> > >  {
> > > -	pr_debug("%s: The handling func for event queue.\n", __func__);
> > > +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> > > +						poll.work);
> > > +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> > > +
> > > +	if (!tcm_vhost_check_endpoint(vq))
> > > +		return;
> > > +
> > > +	if (tcm_vhost_check_events_dropped(vs))
> > > +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> > > +
> > >  }
> > >  
> > >  static void vhost_scsi_handle_kick(struct vhost_work *work)
> > > @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
> > >  				return -EEXIST;
> > >  			}
> > >  			tv_tpg->tv_tpg_vhost_count++;
> > > +			tv_tpg->vhost_scsi = vs;
> > >  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> > >  			smp_mb__after_atomic_inc();
> > >  			match = true;
> > > @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
> > >  			goto err_tpg;
> > >  		}
> > >  		tv_tpg->tv_tpg_vhost_count--;
> > > +		tv_tpg->vhost_scsi = NULL;
> > >  		vs->vs_tpg[target] = NULL;
> > >  		match = true;
> > >  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
> > >  		return -ENOMEM;
> > >  
> > >  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> > > +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> > > +
> > > +	s->vs_events_nr = 0;
> > >  
> > >  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
> > >  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> > > @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> > >  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
> > >  		vhost_scsi_flush_vq(vs, i);
> > >  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> > > +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
> > >  }
> > >  
> > >  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
> > >  {
> > > -	if (features & ~VHOST_FEATURES)
> > > +	if (features & ~VHOST_SCSI_FEATURES)
> > >  		return -EOPNOTSUPP;
> > >  
> > >  	mutex_lock(&vs->dev.mutex);
> > > @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
> > >  			return -EFAULT;
> > >  		return 0;
> > >  	case VHOST_GET_FEATURES:
> > > -		features = VHOST_FEATURES;
> > > +		features = VHOST_SCSI_FEATURES;
> > >  		if (copy_to_user(featurep, &features, sizeof features))
> > >  			return -EFAULT;
> > >  		return 0;
> > > @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
> > >  	return "Unknown";
> > >  }
> > >  
> > > +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> > > +	struct se_lun *lun, bool plug)
> > > +{
> > > +
> > > +	struct vhost_scsi *vs = tpg->vhost_scsi;
> > > +	u32 reason;
> > > +
> > > +	mutex_lock(&tpg->tv_tpg_mutex);
> > > +	vs = tpg->vhost_scsi;
> > > +	mutex_unlock(&tpg->tv_tpg_mutex);
> > > +	if (!vs)
> > > +		return -EOPNOTSUPP;
> > > +
> > > +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> > > +		return -EOPNOTSUPP;
> > > +
> > > +	if (plug)
> > > +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> > > +	else
> > > +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> > > +
> > > +	return tcm_vhost_send_evt(vs, tpg, lun,
> > > +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> > > +			reason);
> > > +}
> > > +
> > > +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > +{
> > > +	return tcm_vhost_do_plug(tpg, lun, true);
> > > +}
> > > +
> > > +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > +{
> > > +	return tcm_vhost_do_plug(tpg, lun, false);
> > > +}
> > > +
> > >  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > >  	struct se_lun *lun)
> > >  {
> > > @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > >  	tv_tpg->tv_tpg_port_count++;
> > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > >  
> > > +	tcm_vhost_hotplug(tv_tpg, lun);
> > > +
> > >  	return 0;
> > >  }
> > >  
> > >  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> > > -	struct se_lun *se_lun)
> > > +	struct se_lun *lun)
> > >  {
> > >  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
> > >  				struct tcm_vhost_tpg, se_tpg);
> > > -
> > >  	mutex_lock(&tv_tpg->tv_tpg_mutex);
> > >  	tv_tpg->tv_tpg_port_count--;
> > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > +
> > > +	tcm_vhost_hotunplug(tv_tpg, lun);
> > >  }
> > >  
> > >  static struct se_node_acl *tcm_vhost_make_nodeacl(
> > > diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> > > index 1d2ae7a..94e9ee53 100644
> > > --- a/drivers/vhost/tcm_vhost.h
> > > +++ b/drivers/vhost/tcm_vhost.h
> > > @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
> > >  	struct se_node_acl se_node_acl;
> > >  };
> > >  
> > > +struct vhost_scsi;
> > >  struct tcm_vhost_tpg {
> > >  	/* Vhost port target portal group tag for TCM */
> > >  	u16 tport_tpgt;
> > > @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
> > >  	struct tcm_vhost_tport *tport;
> > >  	/* Returned by tcm_vhost_make_tpg() */
> > >  	struct se_portal_group se_tpg;
> > > +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> > > +	struct vhost_scsi *vhost_scsi;
> > >  };
> > >  
> > >  struct tcm_vhost_tport {
> > > @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
> > >  	struct se_wwn tport_wwn;
> > >  };
> > >  
> > > +struct tcm_vhost_evt {
> > > +	/* virtio_scsi event */
> > > +	struct virtio_scsi_event event;
> > > +	/* virtio_scsi event list, serviced from vhost worker thread */
> > > +	struct llist_node list;
> > > +};
> > > +
> > >  /*
> > >   * As per request from MST, keep TCM_VHOST related ioctl defines out of
> > >   * linux/vhost.h (user-space) for now..
> > > -- 
> > > 1.8.1.4
> 
> -- 
> Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
  2013-03-25 11:10       ` Michael S. Tsirkin
@ 2013-03-26  2:56         ` Asias He
  2013-03-26 20:46           ` Michael S. Tsirkin
  0 siblings, 1 reply; 15+ messages in thread
From: Asias He @ 2013-03-26  2:56 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > 
> > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > 
> > > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > > LUN in guest.
> > > > 
> > > > Changes in v4:
> > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > 
> > > > Changes in v3:
> > > > - Separate the bug fix to another thread
> > > > 
> > > > Changes in v2:
> > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > - Fix racing of vs_events_nr
> > > > - Add flush fix patch to this series
> > > > 
> > > > Signed-off-by: Asias He <asias@redhat.com>
> > > > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > > ---
> > > >  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
> > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > 
> > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > index d81e3a9..e734ead 100644
> > > > --- a/drivers/vhost/tcm_vhost.c
> > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > @@ -62,6 +62,9 @@ enum {
> > > >  
> > > >  #define VHOST_SCSI_MAX_TARGET	256
> > > >  #define VHOST_SCSI_MAX_VQ	128
> > > > +#define VHOST_SCSI_MAX_EVENT	128
> > > > +
> > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
> > > >  
> > > >  struct vhost_scsi {
> > > >  	/* Protected by vhost_scsi->dev.mutex */
> > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > >  
> > > >  	struct vhost_work vs_completion_work; /* cmd completion work item */
> > > >  	struct llist_head vs_completion_list; /* cmd completion queue */
> > > > +
> > > > +	struct vhost_work vs_event_work; /* evt injection work item */
> > > > +	struct llist_head vs_event_list; /* evt injection queue */
> > > > +
> > > > +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > > > +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> > > 
> > > Woa u64. We allow up to 128 of these. int will do.
> > 
> > u64 is used before we limit the total number of events, changed to int.
> > 
> > > >  };
> > > >  
> > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
> > > >  	return ret;
> > > >  }
> > > >  
> > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > +{
> > > > +	bool ret;
> > > > +
> > > > +	mutex_lock(&vs->dev.mutex);
> > > > +	ret = vs->vs_events_dropped;
> > > > +	mutex_unlock(&vs->dev.mutex);
> > > > +
> > > > +	return ret;
> > > > +}
> > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > >  {
> > > >  	return 1;
> > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
> > > >  	return 0;
> > > >  }
> > > >  
> > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> > > > +{
> > > > +	mutex_lock(&vs->dev.mutex);
> > > > +	vs->vs_events_nr--;
> > > > +	kfree(evt);
> > > > +	mutex_unlock(&vs->dev.mutex);
> > > > +}
> > > > +
> > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > > > +	u32 event, u32 reason)
> > > > +{
> > > > +	struct tcm_vhost_evt *evt;
> > > > +
> > > > +	mutex_lock(&vs->dev.mutex);
> > > > +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > 
> > > 
> > > Se eventfd dropped flag here and stop worrying about it in callers?
> > 
> > Set the flag here now and do not bother the callers.
> > 
> > > > +		mutex_unlock(&vs->dev.mutex);
> > > > +		return NULL;
> > > > +	}
> > > > +
> > > > +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > +	if (evt) {
> > > > +		evt->event.event = event;
> > > > +		evt->event.reason = reason;
> > > > +		vs->vs_events_nr++;
> > > > +	}
> > > > +	mutex_unlock(&vs->dev.mutex);
> > > > +
> > > > +	return evt;
> > > > +}
> > > > +
> > > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > >  {
> > > >  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > >  	kfree(tv_cmd);
> > > >  }
> > > >  
> > > > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > > > +	struct virtio_scsi_event *event)
> > > > +{
> > > > +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > > > +	struct virtio_scsi_event __user *eventp;
> > > > +	unsigned out, in;
> > > > +	int head, ret;
> > > > +
> > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > +		return;
> > > > +
> > > > +	mutex_lock(&vq->mutex);
> > > > +again:
> > > > +	vhost_disable_notify(&vs->dev, vq);
> > > > +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > > > +			ARRAY_SIZE(vq->iov), &out, &in,
> > > > +			NULL, NULL);
> > > > +	if (head < 0) {
> > > > +		mutex_lock(&vs->dev.mutex);
> > > 
> > > This nests dev mutex within vq mutex, can cause deadlock.
> > > Should be the other way around.
> > 
> > Changed the order.
> > 
> > > > +		vs->vs_events_dropped = true;
> > > > +		mutex_unlock(&vs->dev.mutex);
> > > > +		goto out;
> > > > +	}
> > > > +	if (head == vq->num) {
> > > > +		if (vhost_enable_notify(&vs->dev, vq))
> > > > +			goto again;
> > > > +		mutex_lock(&vs->dev.mutex);
> > > > +		vs->vs_events_dropped = true;
> > > > +		mutex_unlock(&vs->dev.mutex);
> > > > +		goto out;
> > > > +	}
> > > > +
> > > > +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> > > > +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> > > > +				vq->iov[out].iov_len);
> > > > +		goto out;
> > > > +	}
> > > > +
> > > > +	mutex_lock(&vs->dev.mutex);
> > > > +	if (vs->vs_events_dropped) {
> > > > +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> > > > +		vs->vs_events_dropped = false;
> > > > +	}
> > > > +	mutex_unlock(&vs->dev.mutex);
> > > > +
> > > > +	eventp = vq->iov[out].iov_base;
> > > > +	ret = __copy_to_user(eventp, event, sizeof(*event));
> > > 
> > > This assumes specific framing, please don't do this.
> > 
> > As I mentioned in the previous mail about this issue. Let's do the
> > conversion to the no framing assumption mode in separate patch which
> > converts all of them. Specific framing is assumed everywhere currently.
> 
> Yes this is a bug, let's fix it.  I don't see any reason to add
> buggy code, then rework it.

Firstly, I agree with we need the conversion. But I do not think this is
a bug. The virtio-scsi guest side is always using the framing currently.
Until we modify the guest side code, this is not a bug. 

This is just the matter of how we want to do the refactor.

> > > > +	if (!ret)
> > > > +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> > > > +	else
> > > > +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> > > > +out:
> > > > +	mutex_unlock(&vq->mutex);
> > > > +}
> > > > +
> > > > +static void tcm_vhost_evt_work(struct vhost_work *work)
> > > > +{
> > > > +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> > > > +					vs_event_work);
> > > > +	struct tcm_vhost_evt *evt;
> > > > +	struct llist_node *llnode;
> > > > +
> > > > +	llnode = llist_del_all(&vs->vs_event_list);
> > > > +	while (llnode) {
> > > > +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> > > > +		llnode = llist_next(llnode);
> > > > +		tcm_vhost_do_evt_work(vs, &evt->event);
> > > > +		tcm_vhost_free_evt(vs, evt);
> > > > +	}
> > > > +}
> > > > +
> > > >  /* Fill in status and signal that we are done processing this command
> > > >   *
> > > >   * This is scheduled in the vhost work queue so we are called with the owner
> > > > @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
> > > >  	pr_debug("%s: The handling func for control queue.\n", __func__);
> > > >  }
> > > >  
> > > > +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> > > > +	struct se_lun *lun, u32 event, u32 reason)
> > > > +{
> > > > +	struct tcm_vhost_evt *evt;
> > > > +
> > > > +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> > > > +	if (!evt) {
> > > > +		mutex_lock(&vs->dev.mutex);
> > > > +		vs->vs_events_dropped = true;
> > > > +		mutex_unlock(&vs->dev.mutex);
> > > > +		return -ENOMEM;
> > > > +	}
> > > > +
> > > > +	if (tpg && lun) {
> > > > +		/* TODO: share lun setup code with virtio-scsi.ko */
> > > > +		evt->event.lun[0] = 0x01;
> > > > +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> > > > +		if (lun->unpacked_lun >= 256)
> > > > +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> > > > +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> > > > +	}
> > > > +
> > > > +	llist_add(&evt->list, &vs->vs_event_list);
> > > > +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> > > > +
> > > > +	return 0;
> > > > +}
> > > > +
> > > >  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
> > > >  {
> > > > -	pr_debug("%s: The handling func for event queue.\n", __func__);
> > > > +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> > > > +						poll.work);
> > > > +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> > > > +
> > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > +		return;
> > > > +
> > > > +	if (tcm_vhost_check_events_dropped(vs))
> > > > +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> > > > +
> > > >  }
> > > >  
> > > >  static void vhost_scsi_handle_kick(struct vhost_work *work)
> > > > @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
> > > >  				return -EEXIST;
> > > >  			}
> > > >  			tv_tpg->tv_tpg_vhost_count++;
> > > > +			tv_tpg->vhost_scsi = vs;
> > > >  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> > > >  			smp_mb__after_atomic_inc();
> > > >  			match = true;
> > > > @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
> > > >  			goto err_tpg;
> > > >  		}
> > > >  		tv_tpg->tv_tpg_vhost_count--;
> > > > +		tv_tpg->vhost_scsi = NULL;
> > > >  		vs->vs_tpg[target] = NULL;
> > > >  		match = true;
> > > >  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
> > > >  		return -ENOMEM;
> > > >  
> > > >  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> > > > +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> > > > +
> > > > +	s->vs_events_nr = 0;
> > > >  
> > > >  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
> > > >  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> > > > @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> > > >  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
> > > >  		vhost_scsi_flush_vq(vs, i);
> > > >  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> > > > +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
> > > >  }
> > > >  
> > > >  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
> > > >  {
> > > > -	if (features & ~VHOST_FEATURES)
> > > > +	if (features & ~VHOST_SCSI_FEATURES)
> > > >  		return -EOPNOTSUPP;
> > > >  
> > > >  	mutex_lock(&vs->dev.mutex);
> > > > @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
> > > >  			return -EFAULT;
> > > >  		return 0;
> > > >  	case VHOST_GET_FEATURES:
> > > > -		features = VHOST_FEATURES;
> > > > +		features = VHOST_SCSI_FEATURES;
> > > >  		if (copy_to_user(featurep, &features, sizeof features))
> > > >  			return -EFAULT;
> > > >  		return 0;
> > > > @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
> > > >  	return "Unknown";
> > > >  }
> > > >  
> > > > +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> > > > +	struct se_lun *lun, bool plug)
> > > > +{
> > > > +
> > > > +	struct vhost_scsi *vs = tpg->vhost_scsi;
> > > > +	u32 reason;
> > > > +
> > > > +	mutex_lock(&tpg->tv_tpg_mutex);
> > > > +	vs = tpg->vhost_scsi;
> > > > +	mutex_unlock(&tpg->tv_tpg_mutex);
> > > > +	if (!vs)
> > > > +		return -EOPNOTSUPP;
> > > > +
> > > > +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> > > > +		return -EOPNOTSUPP;
> > > > +
> > > > +	if (plug)
> > > > +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> > > > +	else
> > > > +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> > > > +
> > > > +	return tcm_vhost_send_evt(vs, tpg, lun,
> > > > +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> > > > +			reason);
> > > > +}
> > > > +
> > > > +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > +{
> > > > +	return tcm_vhost_do_plug(tpg, lun, true);
> > > > +}
> > > > +
> > > > +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > +{
> > > > +	return tcm_vhost_do_plug(tpg, lun, false);
> > > > +}
> > > > +
> > > >  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > >  	struct se_lun *lun)
> > > >  {
> > > > @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > >  	tv_tpg->tv_tpg_port_count++;
> > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > >  
> > > > +	tcm_vhost_hotplug(tv_tpg, lun);
> > > > +
> > > >  	return 0;
> > > >  }
> > > >  
> > > >  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> > > > -	struct se_lun *se_lun)
> > > > +	struct se_lun *lun)
> > > >  {
> > > >  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
> > > >  				struct tcm_vhost_tpg, se_tpg);
> > > > -
> > > >  	mutex_lock(&tv_tpg->tv_tpg_mutex);
> > > >  	tv_tpg->tv_tpg_port_count--;
> > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > +
> > > > +	tcm_vhost_hotunplug(tv_tpg, lun);
> > > >  }
> > > >  
> > > >  static struct se_node_acl *tcm_vhost_make_nodeacl(
> > > > diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> > > > index 1d2ae7a..94e9ee53 100644
> > > > --- a/drivers/vhost/tcm_vhost.h
> > > > +++ b/drivers/vhost/tcm_vhost.h
> > > > @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
> > > >  	struct se_node_acl se_node_acl;
> > > >  };
> > > >  
> > > > +struct vhost_scsi;
> > > >  struct tcm_vhost_tpg {
> > > >  	/* Vhost port target portal group tag for TCM */
> > > >  	u16 tport_tpgt;
> > > > @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
> > > >  	struct tcm_vhost_tport *tport;
> > > >  	/* Returned by tcm_vhost_make_tpg() */
> > > >  	struct se_portal_group se_tpg;
> > > > +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> > > > +	struct vhost_scsi *vhost_scsi;
> > > >  };
> > > >  
> > > >  struct tcm_vhost_tport {
> > > > @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
> > > >  	struct se_wwn tport_wwn;
> > > >  };
> > > >  
> > > > +struct tcm_vhost_evt {
> > > > +	/* virtio_scsi event */
> > > > +	struct virtio_scsi_event event;
> > > > +	/* virtio_scsi event list, serviced from vhost worker thread */
> > > > +	struct llist_node list;
> > > > +};
> > > > +
> > > >  /*
> > > >   * As per request from MST, keep TCM_VHOST related ioctl defines out of
> > > >   * linux/vhost.h (user-space) for now..
> > > > -- 
> > > > 1.8.1.4
> > 
> > -- 
> > Asias

-- 
Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
  2013-03-26  2:56         ` Asias He
@ 2013-03-26 20:46           ` Michael S. Tsirkin
  2013-03-27  3:23             ` Asias He
  0 siblings, 1 reply; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-03-26 20:46 UTC (permalink / raw)
  To: Asias He
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Tue, Mar 26, 2013 at 10:56:33AM +0800, Asias He wrote:
> On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> > On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > > 
> > > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > > 
> > > > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > > > LUN in guest.
> > > > > 
> > > > > Changes in v4:
> > > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > > 
> > > > > Changes in v3:
> > > > > - Separate the bug fix to another thread
> > > > > 
> > > > > Changes in v2:
> > > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > > - Fix racing of vs_events_nr
> > > > > - Add flush fix patch to this series
> > > > > 
> > > > > Signed-off-by: Asias He <asias@redhat.com>
> > > > > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > > > ---
> > > > >  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
> > > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > > 
> > > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > > index d81e3a9..e734ead 100644
> > > > > --- a/drivers/vhost/tcm_vhost.c
> > > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > > @@ -62,6 +62,9 @@ enum {
> > > > >  
> > > > >  #define VHOST_SCSI_MAX_TARGET	256
> > > > >  #define VHOST_SCSI_MAX_VQ	128
> > > > > +#define VHOST_SCSI_MAX_EVENT	128
> > > > > +
> > > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
> > > > >  
> > > > >  struct vhost_scsi {
> > > > >  	/* Protected by vhost_scsi->dev.mutex */
> > > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > > >  
> > > > >  	struct vhost_work vs_completion_work; /* cmd completion work item */
> > > > >  	struct llist_head vs_completion_list; /* cmd completion queue */
> > > > > +
> > > > > +	struct vhost_work vs_event_work; /* evt injection work item */
> > > > > +	struct llist_head vs_event_list; /* evt injection queue */
> > > > > +
> > > > > +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > > > > +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> > > > 
> > > > Woa u64. We allow up to 128 of these. int will do.
> > > 
> > > u64 is used before we limit the total number of events, changed to int.
> > > 
> > > > >  };
> > > > >  
> > > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
> > > > >  	return ret;
> > > > >  }
> > > > >  
> > > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > > +{
> > > > > +	bool ret;
> > > > > +
> > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > +	ret = vs->vs_events_dropped;
> > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > +
> > > > > +	return ret;
> > > > > +}
> > > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > > >  {
> > > > >  	return 1;
> > > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
> > > > >  	return 0;
> > > > >  }
> > > > >  
> > > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> > > > > +{
> > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > +	vs->vs_events_nr--;
> > > > > +	kfree(evt);
> > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > +}
> > > > > +
> > > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > > > > +	u32 event, u32 reason)
> > > > > +{
> > > > > +	struct tcm_vhost_evt *evt;
> > > > > +
> > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > > 
> > > > 
> > > > Se eventfd dropped flag here and stop worrying about it in callers?
> > > 
> > > Set the flag here now and do not bother the callers.
> > > 
> > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > +		return NULL;
> > > > > +	}
> > > > > +
> > > > > +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > > +	if (evt) {
> > > > > +		evt->event.event = event;
> > > > > +		evt->event.reason = reason;
> > > > > +		vs->vs_events_nr++;
> > > > > +	}
> > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > +
> > > > > +	return evt;
> > > > > +}
> > > > > +
> > > > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > > >  {
> > > > >  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > > >  	kfree(tv_cmd);
> > > > >  }
> > > > >  
> > > > > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > > > > +	struct virtio_scsi_event *event)
> > > > > +{
> > > > > +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > > > > +	struct virtio_scsi_event __user *eventp;
> > > > > +	unsigned out, in;
> > > > > +	int head, ret;
> > > > > +
> > > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > > +		return;
> > > > > +
> > > > > +	mutex_lock(&vq->mutex);
> > > > > +again:
> > > > > +	vhost_disable_notify(&vs->dev, vq);
> > > > > +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > > > > +			ARRAY_SIZE(vq->iov), &out, &in,
> > > > > +			NULL, NULL);
> > > > > +	if (head < 0) {
> > > > > +		mutex_lock(&vs->dev.mutex);
> > > > 
> > > > This nests dev mutex within vq mutex, can cause deadlock.
> > > > Should be the other way around.
> > > 
> > > Changed the order.
> > > 
> > > > > +		vs->vs_events_dropped = true;
> > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > +		goto out;
> > > > > +	}
> > > > > +	if (head == vq->num) {
> > > > > +		if (vhost_enable_notify(&vs->dev, vq))
> > > > > +			goto again;
> > > > > +		mutex_lock(&vs->dev.mutex);
> > > > > +		vs->vs_events_dropped = true;
> > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > +		goto out;
> > > > > +	}
> > > > > +
> > > > > +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> > > > > +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> > > > > +				vq->iov[out].iov_len);
> > > > > +		goto out;
> > > > > +	}
> > > > > +
> > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > +	if (vs->vs_events_dropped) {
> > > > > +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> > > > > +		vs->vs_events_dropped = false;
> > > > > +	}
> > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > +
> > > > > +	eventp = vq->iov[out].iov_base;
> > > > > +	ret = __copy_to_user(eventp, event, sizeof(*event));
> > > > 
> > > > This assumes specific framing, please don't do this.
> > > 
> > > As I mentioned in the previous mail about this issue. Let's do the
> > > conversion to the no framing assumption mode in separate patch which
> > > converts all of them. Specific framing is assumed everywhere currently.
> > 
> > Yes this is a bug, let's fix it.  I don't see any reason to add
> > buggy code, then rework it.
> 
> Firstly, I agree with we need the conversion. But I do not think this is
> a bug. The virtio-scsi guest side is always using the framing currently.
> Until we modify the guest side code, this is not a bug. 

It's making assumptions not in the spec. It's a bug that current Linhux
guest does not trigger, so it works when guest is Linux 3.9 but will
break with Linux X.Y.

> This is just the matter of how we want to do the refactor.

IMO we should fix bugs.

> > > > > +	if (!ret)
> > > > > +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> > > > > +	else
> > > > > +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> > > > > +out:
> > > > > +	mutex_unlock(&vq->mutex);
> > > > > +}
> > > > > +
> > > > > +static void tcm_vhost_evt_work(struct vhost_work *work)
> > > > > +{
> > > > > +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> > > > > +					vs_event_work);
> > > > > +	struct tcm_vhost_evt *evt;
> > > > > +	struct llist_node *llnode;
> > > > > +
> > > > > +	llnode = llist_del_all(&vs->vs_event_list);
> > > > > +	while (llnode) {
> > > > > +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> > > > > +		llnode = llist_next(llnode);
> > > > > +		tcm_vhost_do_evt_work(vs, &evt->event);
> > > > > +		tcm_vhost_free_evt(vs, evt);
> > > > > +	}
> > > > > +}
> > > > > +
> > > > >  /* Fill in status and signal that we are done processing this command
> > > > >   *
> > > > >   * This is scheduled in the vhost work queue so we are called with the owner
> > > > > @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
> > > > >  	pr_debug("%s: The handling func for control queue.\n", __func__);
> > > > >  }
> > > > >  
> > > > > +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> > > > > +	struct se_lun *lun, u32 event, u32 reason)
> > > > > +{
> > > > > +	struct tcm_vhost_evt *evt;
> > > > > +
> > > > > +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> > > > > +	if (!evt) {
> > > > > +		mutex_lock(&vs->dev.mutex);
> > > > > +		vs->vs_events_dropped = true;
> > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > +		return -ENOMEM;
> > > > > +	}
> > > > > +
> > > > > +	if (tpg && lun) {
> > > > > +		/* TODO: share lun setup code with virtio-scsi.ko */
> > > > > +		evt->event.lun[0] = 0x01;
> > > > > +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> > > > > +		if (lun->unpacked_lun >= 256)
> > > > > +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> > > > > +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> > > > > +	}
> > > > > +
> > > > > +	llist_add(&evt->list, &vs->vs_event_list);
> > > > > +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> > > > > +
> > > > > +	return 0;
> > > > > +}
> > > > > +
> > > > >  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
> > > > >  {
> > > > > -	pr_debug("%s: The handling func for event queue.\n", __func__);
> > > > > +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> > > > > +						poll.work);
> > > > > +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> > > > > +
> > > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > > +		return;
> > > > > +
> > > > > +	if (tcm_vhost_check_events_dropped(vs))
> > > > > +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> > > > > +
> > > > >  }
> > > > >  
> > > > >  static void vhost_scsi_handle_kick(struct vhost_work *work)
> > > > > @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
> > > > >  				return -EEXIST;
> > > > >  			}
> > > > >  			tv_tpg->tv_tpg_vhost_count++;
> > > > > +			tv_tpg->vhost_scsi = vs;
> > > > >  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> > > > >  			smp_mb__after_atomic_inc();
> > > > >  			match = true;
> > > > > @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
> > > > >  			goto err_tpg;
> > > > >  		}
> > > > >  		tv_tpg->tv_tpg_vhost_count--;
> > > > > +		tv_tpg->vhost_scsi = NULL;
> > > > >  		vs->vs_tpg[target] = NULL;
> > > > >  		match = true;
> > > > >  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > > @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
> > > > >  		return -ENOMEM;
> > > > >  
> > > > >  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> > > > > +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> > > > > +
> > > > > +	s->vs_events_nr = 0;
> > > > >  
> > > > >  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
> > > > >  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> > > > > @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> > > > >  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
> > > > >  		vhost_scsi_flush_vq(vs, i);
> > > > >  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> > > > > +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
> > > > >  }
> > > > >  
> > > > >  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
> > > > >  {
> > > > > -	if (features & ~VHOST_FEATURES)
> > > > > +	if (features & ~VHOST_SCSI_FEATURES)
> > > > >  		return -EOPNOTSUPP;
> > > > >  
> > > > >  	mutex_lock(&vs->dev.mutex);
> > > > > @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
> > > > >  			return -EFAULT;
> > > > >  		return 0;
> > > > >  	case VHOST_GET_FEATURES:
> > > > > -		features = VHOST_FEATURES;
> > > > > +		features = VHOST_SCSI_FEATURES;
> > > > >  		if (copy_to_user(featurep, &features, sizeof features))
> > > > >  			return -EFAULT;
> > > > >  		return 0;
> > > > > @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
> > > > >  	return "Unknown";
> > > > >  }
> > > > >  
> > > > > +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> > > > > +	struct se_lun *lun, bool plug)
> > > > > +{
> > > > > +
> > > > > +	struct vhost_scsi *vs = tpg->vhost_scsi;
> > > > > +	u32 reason;
> > > > > +
> > > > > +	mutex_lock(&tpg->tv_tpg_mutex);
> > > > > +	vs = tpg->vhost_scsi;
> > > > > +	mutex_unlock(&tpg->tv_tpg_mutex);
> > > > > +	if (!vs)
> > > > > +		return -EOPNOTSUPP;
> > > > > +
> > > > > +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> > > > > +		return -EOPNOTSUPP;
> > > > > +
> > > > > +	if (plug)
> > > > > +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> > > > > +	else
> > > > > +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> > > > > +
> > > > > +	return tcm_vhost_send_evt(vs, tpg, lun,
> > > > > +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> > > > > +			reason);
> > > > > +}
> > > > > +
> > > > > +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > > +{
> > > > > +	return tcm_vhost_do_plug(tpg, lun, true);
> > > > > +}
> > > > > +
> > > > > +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > > +{
> > > > > +	return tcm_vhost_do_plug(tpg, lun, false);
> > > > > +}
> > > > > +
> > > > >  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > > >  	struct se_lun *lun)
> > > > >  {
> > > > > @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > > >  	tv_tpg->tv_tpg_port_count++;
> > > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > >  
> > > > > +	tcm_vhost_hotplug(tv_tpg, lun);
> > > > > +
> > > > >  	return 0;
> > > > >  }
> > > > >  
> > > > >  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> > > > > -	struct se_lun *se_lun)
> > > > > +	struct se_lun *lun)
> > > > >  {
> > > > >  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
> > > > >  				struct tcm_vhost_tpg, se_tpg);
> > > > > -
> > > > >  	mutex_lock(&tv_tpg->tv_tpg_mutex);
> > > > >  	tv_tpg->tv_tpg_port_count--;
> > > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > > +
> > > > > +	tcm_vhost_hotunplug(tv_tpg, lun);
> > > > >  }
> > > > >  
> > > > >  static struct se_node_acl *tcm_vhost_make_nodeacl(
> > > > > diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> > > > > index 1d2ae7a..94e9ee53 100644
> > > > > --- a/drivers/vhost/tcm_vhost.h
> > > > > +++ b/drivers/vhost/tcm_vhost.h
> > > > > @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
> > > > >  	struct se_node_acl se_node_acl;
> > > > >  };
> > > > >  
> > > > > +struct vhost_scsi;
> > > > >  struct tcm_vhost_tpg {
> > > > >  	/* Vhost port target portal group tag for TCM */
> > > > >  	u16 tport_tpgt;
> > > > > @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
> > > > >  	struct tcm_vhost_tport *tport;
> > > > >  	/* Returned by tcm_vhost_make_tpg() */
> > > > >  	struct se_portal_group se_tpg;
> > > > > +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> > > > > +	struct vhost_scsi *vhost_scsi;
> > > > >  };
> > > > >  
> > > > >  struct tcm_vhost_tport {
> > > > > @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
> > > > >  	struct se_wwn tport_wwn;
> > > > >  };
> > > > >  
> > > > > +struct tcm_vhost_evt {
> > > > > +	/* virtio_scsi event */
> > > > > +	struct virtio_scsi_event event;
> > > > > +	/* virtio_scsi event list, serviced from vhost worker thread */
> > > > > +	struct llist_node list;
> > > > > +};
> > > > > +
> > > > >  /*
> > > > >   * As per request from MST, keep TCM_VHOST related ioctl defines out of
> > > > >   * linux/vhost.h (user-space) for now..
> > > > > -- 
> > > > > 1.8.1.4
> > > 
> > > -- 
> > > Asias
> 
> -- 
> Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support
  2013-03-26 20:46           ` Michael S. Tsirkin
@ 2013-03-27  3:23             ` Asias He
  0 siblings, 0 replies; 15+ messages in thread
From: Asias He @ 2013-03-27  3:23 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

On Tue, Mar 26, 2013 at 10:46:28PM +0200, Michael S. Tsirkin wrote:
> On Tue, Mar 26, 2013 at 10:56:33AM +0800, Asias He wrote:
> > On Mon, Mar 25, 2013 at 01:10:33PM +0200, Michael S. Tsirkin wrote:
> > > On Mon, Mar 25, 2013 at 03:48:06PM +0800, Asias He wrote:
> > > > On Sun, Mar 24, 2013 at 05:20:09PM +0200, Michael S. Tsirkin wrote:
> > > > > On Fri, Mar 22, 2013 at 01:39:05PM +0800, Asias He wrote:
> > > > > > In commit 365a7150094 ([SCSI] virtio-scsi: hotplug support for
> > > > > > virtio-scsi), hotplug support is added to virtio-scsi.
> > > > > > 
> > > > > > This patch adds hotplug and hotunplug support to tcm_vhost.
> > > > > > 
> > > > > > You can create or delete a LUN in targetcli to hotplug or hotunplug a
> > > > > > LUN in guest.
> > > > > > 
> > > > > > Changes in v4:
> > > > > > - Drop tcm_vhost_check_endpoint in tcm_vhost_send_evt
> > > > > > - Add tcm_vhost_check_endpoint in vhost_scsi_evt_handle_kick
> > > > > > 
> > > > > > Changes in v3:
> > > > > > - Separate the bug fix to another thread
> > > > > > 
> > > > > > Changes in v2:
> > > > > > - Remove code duplication in tcm_vhost_{hotplug,hotunplug}
> > > > > > - Fix racing of vs_events_nr
> > > > > > - Add flush fix patch to this series
> > > > > > 
> > > > > > Signed-off-by: Asias He <asias@redhat.com>
> > > > > > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
> > > > > > ---
> > > > > >  drivers/vhost/tcm_vhost.c | 212 ++++++++++++++++++++++++++++++++++++++++++++--
> > > > > >  drivers/vhost/tcm_vhost.h |  10 +++
> > > > > >  2 files changed, 217 insertions(+), 5 deletions(-)
> > > > > > 
> > > > > > diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c
> > > > > > index d81e3a9..e734ead 100644
> > > > > > --- a/drivers/vhost/tcm_vhost.c
> > > > > > +++ b/drivers/vhost/tcm_vhost.c
> > > > > > @@ -62,6 +62,9 @@ enum {
> > > > > >  
> > > > > >  #define VHOST_SCSI_MAX_TARGET	256
> > > > > >  #define VHOST_SCSI_MAX_VQ	128
> > > > > > +#define VHOST_SCSI_MAX_EVENT	128
> > > > > > +
> > > > > > +#define VHOST_SCSI_FEATURES (VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG))
> > > > > >  
> > > > > >  struct vhost_scsi {
> > > > > >  	/* Protected by vhost_scsi->dev.mutex */
> > > > > > @@ -73,6 +76,12 @@ struct vhost_scsi {
> > > > > >  
> > > > > >  	struct vhost_work vs_completion_work; /* cmd completion work item */
> > > > > >  	struct llist_head vs_completion_list; /* cmd completion queue */
> > > > > > +
> > > > > > +	struct vhost_work vs_event_work; /* evt injection work item */
> > > > > > +	struct llist_head vs_event_list; /* evt injection queue */
> > > > > > +
> > > > > > +	bool vs_events_dropped; /* any missed events, protected by dev.mutex */
> > > > > > +	u64 vs_events_nr; /* num of pending events, protected by dev.mutex */
> > > > > 
> > > > > Woa u64. We allow up to 128 of these. int will do.
> > > > 
> > > > u64 is used before we limit the total number of events, changed to int.
> > > > 
> > > > > >  };
> > > > > >  
> > > > > >  /* Local pointer to allocated TCM configfs fabric module */
> > > > > > @@ -120,6 +129,16 @@ static bool tcm_vhost_check_endpoint(struct vhost_virtqueue *vq)
> > > > > >  	return ret;
> > > > > >  }
> > > > > >  
> > > > > > +static bool tcm_vhost_check_events_dropped(struct vhost_scsi *vs)
> > > > > > +{
> > > > > > +	bool ret;
> > > > > > +
> > > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > > +	ret = vs->vs_events_dropped;
> > > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > > +
> > > > > > +	return ret;
> > > > > > +}
> > > > > >  static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
> > > > > >  {
> > > > > >  	return 1;
> > > > > > @@ -370,6 +389,36 @@ static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
> > > > > >  	return 0;
> > > > > >  }
> > > > > >  
> > > > > > +static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
> > > > > > +{
> > > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > > +	vs->vs_events_nr--;
> > > > > > +	kfree(evt);
> > > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > > +}
> > > > > > +
> > > > > > +static struct tcm_vhost_evt *tcm_vhost_allocate_evt(struct vhost_scsi *vs,
> > > > > > +	u32 event, u32 reason)
> > > > > > +{
> > > > > > +	struct tcm_vhost_evt *evt;
> > > > > > +
> > > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > > +	if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
> > > > > 
> > > > > 
> > > > > Se eventfd dropped flag here and stop worrying about it in callers?
> > > > 
> > > > Set the flag here now and do not bother the callers.
> > > > 
> > > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > > +		return NULL;
> > > > > > +	}
> > > > > > +
> > > > > > +	evt = kzalloc(sizeof(*evt), GFP_KERNEL);
> > > > > > +	if (evt) {
> > > > > > +		evt->event.event = event;
> > > > > > +		evt->event.reason = reason;
> > > > > > +		vs->vs_events_nr++;
> > > > > > +	}
> > > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > > +
> > > > > > +	return evt;
> > > > > > +}
> > > > > > +
> > > > > >  static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > > > >  {
> > > > > >  	struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd;
> > > > > > @@ -388,6 +437,77 @@ static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd)
> > > > > >  	kfree(tv_cmd);
> > > > > >  }
> > > > > >  
> > > > > > +static void tcm_vhost_do_evt_work(struct vhost_scsi *vs,
> > > > > > +	struct virtio_scsi_event *event)
> > > > > > +{
> > > > > > +	struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT];
> > > > > > +	struct virtio_scsi_event __user *eventp;
> > > > > > +	unsigned out, in;
> > > > > > +	int head, ret;
> > > > > > +
> > > > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > > > +		return;
> > > > > > +
> > > > > > +	mutex_lock(&vq->mutex);
> > > > > > +again:
> > > > > > +	vhost_disable_notify(&vs->dev, vq);
> > > > > > +	head = vhost_get_vq_desc(&vs->dev, vq, vq->iov,
> > > > > > +			ARRAY_SIZE(vq->iov), &out, &in,
> > > > > > +			NULL, NULL);
> > > > > > +	if (head < 0) {
> > > > > > +		mutex_lock(&vs->dev.mutex);
> > > > > 
> > > > > This nests dev mutex within vq mutex, can cause deadlock.
> > > > > Should be the other way around.
> > > > 
> > > > Changed the order.
> > > > 
> > > > > > +		vs->vs_events_dropped = true;
> > > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > > +		goto out;
> > > > > > +	}
> > > > > > +	if (head == vq->num) {
> > > > > > +		if (vhost_enable_notify(&vs->dev, vq))
> > > > > > +			goto again;
> > > > > > +		mutex_lock(&vs->dev.mutex);
> > > > > > +		vs->vs_events_dropped = true;
> > > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > > +		goto out;
> > > > > > +	}
> > > > > > +
> > > > > > +	if ((vq->iov[out].iov_len != sizeof(struct virtio_scsi_event))) {
> > > > > > +		vq_err(vq, "Expecting virtio_scsi_event, got %zu bytes\n",
> > > > > > +				vq->iov[out].iov_len);
> > > > > > +		goto out;
> > > > > > +	}
> > > > > > +
> > > > > > +	mutex_lock(&vs->dev.mutex);
> > > > > > +	if (vs->vs_events_dropped) {
> > > > > > +		event->event |= VIRTIO_SCSI_T_EVENTS_MISSED;
> > > > > > +		vs->vs_events_dropped = false;
> > > > > > +	}
> > > > > > +	mutex_unlock(&vs->dev.mutex);
> > > > > > +
> > > > > > +	eventp = vq->iov[out].iov_base;
> > > > > > +	ret = __copy_to_user(eventp, event, sizeof(*event));
> > > > > 
> > > > > This assumes specific framing, please don't do this.
> > > > 
> > > > As I mentioned in the previous mail about this issue. Let's do the
> > > > conversion to the no framing assumption mode in separate patch which
> > > > converts all of them. Specific framing is assumed everywhere currently.
> > > 
> > > Yes this is a bug, let's fix it.  I don't see any reason to add
> > > buggy code, then rework it.
> > 
> > Firstly, I agree with we need the conversion. But I do not think this is
> > a bug. The virtio-scsi guest side is always using the framing currently.
> > Until we modify the guest side code, this is not a bug. 
> 
> It's making assumptions not in the spec. It's a bug that current Linhux
> guest does not trigger, so it works when guest is Linux 3.9 but will
> break with Linux X.Y.

In any case, with or without this patch, if guest changes the
assumption, it will not work anyway. This patch does not introduce new
issue.

> > This is just the matter of how we want to do the refactor.
> 
> IMO we should fix bugs.

> > > > > > +	if (!ret)
> > > > > > +		vhost_add_used_and_signal(&vs->dev, vq, head, 0);
> > > > > > +	else
> > > > > > +		vq_err(vq, "Faulted on tcm_vhost_send_event\n");
> > > > > > +out:
> > > > > > +	mutex_unlock(&vq->mutex);
> > > > > > +}
> > > > > > +
> > > > > > +static void tcm_vhost_evt_work(struct vhost_work *work)
> > > > > > +{
> > > > > > +	struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
> > > > > > +					vs_event_work);
> > > > > > +	struct tcm_vhost_evt *evt;
> > > > > > +	struct llist_node *llnode;
> > > > > > +
> > > > > > +	llnode = llist_del_all(&vs->vs_event_list);
> > > > > > +	while (llnode) {
> > > > > > +		evt = llist_entry(llnode, struct tcm_vhost_evt, list);
> > > > > > +		llnode = llist_next(llnode);
> > > > > > +		tcm_vhost_do_evt_work(vs, &evt->event);
> > > > > > +		tcm_vhost_free_evt(vs, evt);
> > > > > > +	}
> > > > > > +}
> > > > > > +
> > > > > >  /* Fill in status and signal that we are done processing this command
> > > > > >   *
> > > > > >   * This is scheduled in the vhost work queue so we are called with the owner
> > > > > > @@ -785,9 +905,46 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
> > > > > >  	pr_debug("%s: The handling func for control queue.\n", __func__);
> > > > > >  }
> > > > > >  
> > > > > > +static int tcm_vhost_send_evt(struct vhost_scsi *vs, struct tcm_vhost_tpg *tpg,
> > > > > > +	struct se_lun *lun, u32 event, u32 reason)
> > > > > > +{
> > > > > > +	struct tcm_vhost_evt *evt;
> > > > > > +
> > > > > > +	evt = tcm_vhost_allocate_evt(vs, event, reason);
> > > > > > +	if (!evt) {
> > > > > > +		mutex_lock(&vs->dev.mutex);
> > > > > > +		vs->vs_events_dropped = true;
> > > > > > +		mutex_unlock(&vs->dev.mutex);
> > > > > > +		return -ENOMEM;
> > > > > > +	}
> > > > > > +
> > > > > > +	if (tpg && lun) {
> > > > > > +		/* TODO: share lun setup code with virtio-scsi.ko */
> > > > > > +		evt->event.lun[0] = 0x01;
> > > > > > +		evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
> > > > > > +		if (lun->unpacked_lun >= 256)
> > > > > > +			evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
> > > > > > +		evt->event.lun[3] = lun->unpacked_lun & 0xFF;
> > > > > > +	}
> > > > > > +
> > > > > > +	llist_add(&evt->list, &vs->vs_event_list);
> > > > > > +	vhost_work_queue(&vs->dev, &vs->vs_event_work);
> > > > > > +
> > > > > > +	return 0;
> > > > > > +}
> > > > > > +
> > > > > >  static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
> > > > > >  {
> > > > > > -	pr_debug("%s: The handling func for event queue.\n", __func__);
> > > > > > +	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
> > > > > > +						poll.work);
> > > > > > +	struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
> > > > > > +
> > > > > > +	if (!tcm_vhost_check_endpoint(vq))
> > > > > > +		return;
> > > > > > +
> > > > > > +	if (tcm_vhost_check_events_dropped(vs))
> > > > > > +		tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
> > > > > > +
> > > > > >  }
> > > > > >  
> > > > > >  static void vhost_scsi_handle_kick(struct vhost_work *work)
> > > > > > @@ -844,6 +1001,7 @@ static int vhost_scsi_set_endpoint(
> > > > > >  				return -EEXIST;
> > > > > >  			}
> > > > > >  			tv_tpg->tv_tpg_vhost_count++;
> > > > > > +			tv_tpg->vhost_scsi = vs;
> > > > > >  			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
> > > > > >  			smp_mb__after_atomic_inc();
> > > > > >  			match = true;
> > > > > > @@ -914,6 +1072,7 @@ static int vhost_scsi_clear_endpoint(
> > > > > >  			goto err_tpg;
> > > > > >  		}
> > > > > >  		tv_tpg->tv_tpg_vhost_count--;
> > > > > > +		tv_tpg->vhost_scsi = NULL;
> > > > > >  		vs->vs_tpg[target] = NULL;
> > > > > >  		match = true;
> > > > > >  		mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > > > @@ -947,6 +1106,9 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
> > > > > >  		return -ENOMEM;
> > > > > >  
> > > > > >  	vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work);
> > > > > > +	vhost_work_init(&s->vs_event_work, tcm_vhost_evt_work);
> > > > > > +
> > > > > > +	s->vs_events_nr = 0;
> > > > > >  
> > > > > >  	s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick;
> > > > > >  	s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick;
> > > > > > @@ -989,11 +1151,12 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
> > > > > >  	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
> > > > > >  		vhost_scsi_flush_vq(vs, i);
> > > > > >  	vhost_work_flush(&vs->dev, &vs->vs_completion_work);
> > > > > > +	vhost_work_flush(&vs->dev, &vs->vs_event_work);
> > > > > >  }
> > > > > >  
> > > > > >  static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
> > > > > >  {
> > > > > > -	if (features & ~VHOST_FEATURES)
> > > > > > +	if (features & ~VHOST_SCSI_FEATURES)
> > > > > >  		return -EOPNOTSUPP;
> > > > > >  
> > > > > >  	mutex_lock(&vs->dev.mutex);
> > > > > > @@ -1039,7 +1202,7 @@ static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
> > > > > >  			return -EFAULT;
> > > > > >  		return 0;
> > > > > >  	case VHOST_GET_FEATURES:
> > > > > > -		features = VHOST_FEATURES;
> > > > > > +		features = VHOST_SCSI_FEATURES;
> > > > > >  		if (copy_to_user(featurep, &features, sizeof features))
> > > > > >  			return -EFAULT;
> > > > > >  		return 0;
> > > > > > @@ -1109,6 +1272,42 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
> > > > > >  	return "Unknown";
> > > > > >  }
> > > > > >  
> > > > > > +static int tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
> > > > > > +	struct se_lun *lun, bool plug)
> > > > > > +{
> > > > > > +
> > > > > > +	struct vhost_scsi *vs = tpg->vhost_scsi;
> > > > > > +	u32 reason;
> > > > > > +
> > > > > > +	mutex_lock(&tpg->tv_tpg_mutex);
> > > > > > +	vs = tpg->vhost_scsi;
> > > > > > +	mutex_unlock(&tpg->tv_tpg_mutex);
> > > > > > +	if (!vs)
> > > > > > +		return -EOPNOTSUPP;
> > > > > > +
> > > > > > +	if (!tcm_vhost_check_feature(vs, VIRTIO_SCSI_F_HOTPLUG))
> > > > > > +		return -EOPNOTSUPP;
> > > > > > +
> > > > > > +	if (plug)
> > > > > > +		reason = VIRTIO_SCSI_EVT_RESET_RESCAN;
> > > > > > +	else
> > > > > > +		reason = VIRTIO_SCSI_EVT_RESET_REMOVED;
> > > > > > +
> > > > > > +	return tcm_vhost_send_evt(vs, tpg, lun,
> > > > > > +			VIRTIO_SCSI_T_TRANSPORT_RESET,
> > > > > > +			reason);
> > > > > > +}
> > > > > > +
> > > > > > +static int tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > > > +{
> > > > > > +	return tcm_vhost_do_plug(tpg, lun, true);
> > > > > > +}
> > > > > > +
> > > > > > +static int tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
> > > > > > +{
> > > > > > +	return tcm_vhost_do_plug(tpg, lun, false);
> > > > > > +}
> > > > > > +
> > > > > >  static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > > > >  	struct se_lun *lun)
> > > > > >  {
> > > > > > @@ -1119,18 +1318,21 @@ static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
> > > > > >  	tv_tpg->tv_tpg_port_count++;
> > > > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > > >  
> > > > > > +	tcm_vhost_hotplug(tv_tpg, lun);
> > > > > > +
> > > > > >  	return 0;
> > > > > >  }
> > > > > >  
> > > > > >  static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
> > > > > > -	struct se_lun *se_lun)
> > > > > > +	struct se_lun *lun)
> > > > > >  {
> > > > > >  	struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg,
> > > > > >  				struct tcm_vhost_tpg, se_tpg);
> > > > > > -
> > > > > >  	mutex_lock(&tv_tpg->tv_tpg_mutex);
> > > > > >  	tv_tpg->tv_tpg_port_count--;
> > > > > >  	mutex_unlock(&tv_tpg->tv_tpg_mutex);
> > > > > > +
> > > > > > +	tcm_vhost_hotunplug(tv_tpg, lun);
> > > > > >  }
> > > > > >  
> > > > > >  static struct se_node_acl *tcm_vhost_make_nodeacl(
> > > > > > diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h
> > > > > > index 1d2ae7a..94e9ee53 100644
> > > > > > --- a/drivers/vhost/tcm_vhost.h
> > > > > > +++ b/drivers/vhost/tcm_vhost.h
> > > > > > @@ -53,6 +53,7 @@ struct tcm_vhost_nacl {
> > > > > >  	struct se_node_acl se_node_acl;
> > > > > >  };
> > > > > >  
> > > > > > +struct vhost_scsi;
> > > > > >  struct tcm_vhost_tpg {
> > > > > >  	/* Vhost port target portal group tag for TCM */
> > > > > >  	u16 tport_tpgt;
> > > > > > @@ -70,6 +71,8 @@ struct tcm_vhost_tpg {
> > > > > >  	struct tcm_vhost_tport *tport;
> > > > > >  	/* Returned by tcm_vhost_make_tpg() */
> > > > > >  	struct se_portal_group se_tpg;
> > > > > > +	/* Pointer back to struct vhost_scsi, protected by tv_tpg_mutex */
> > > > > > +	struct vhost_scsi *vhost_scsi;
> > > > > >  };
> > > > > >  
> > > > > >  struct tcm_vhost_tport {
> > > > > > @@ -83,6 +86,13 @@ struct tcm_vhost_tport {
> > > > > >  	struct se_wwn tport_wwn;
> > > > > >  };
> > > > > >  
> > > > > > +struct tcm_vhost_evt {
> > > > > > +	/* virtio_scsi event */
> > > > > > +	struct virtio_scsi_event event;
> > > > > > +	/* virtio_scsi event list, serviced from vhost worker thread */
> > > > > > +	struct llist_node list;
> > > > > > +};
> > > > > > +
> > > > > >  /*
> > > > > >   * As per request from MST, keep TCM_VHOST related ioctl defines out of
> > > > > >   * linux/vhost.h (user-space) for now..
> > > > > > -- 
> > > > > > 1.8.1.4
> > > > 
> > > > -- 
> > > > Asias
> > 
> > -- 
> > Asias

-- 
Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-04-08 21:31 ` Nicholas A. Bellinger
@ 2013-04-08 20:41   ` Michael S. Tsirkin
  2013-04-09  0:57   ` Asias He
  1 sibling, 0 replies; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-04-08 20:41 UTC (permalink / raw)
  To: Nicholas A. Bellinger
  Cc: aliguori, kvm, virtualization, Stefan Hajnoczi, Paolo Bonzini

On Mon, Apr 08, 2013 at 02:31:27PM -0700, Nicholas A. Bellinger wrote:
> On Fri, 2013-03-22 at 13:39 +0800, Asias He wrote:
> > Asias He (2):
> >   tcm_vhost: Introduce tcm_vhost_check_feature()
> >   tcm_vhost: Add hotplug/hotunplug support
> > 
> >  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
> >  drivers/vhost/tcm_vhost.h |  10 +++
> >  2 files changed, 229 insertions(+), 5 deletions(-)
> > 
> 
> Hi MST,
> 
> With vhost-scsi-pci now on the right track for upstream QEMU, will you
> consider this code as a v3.10 item..?

I'd prefer to see it merged, really. We still have a week or two
until the merge window, don't we? Let's decide then.

> Asias, I assume this will need to be rebased against your latest patches
> in target-pending/master, yes..?
> 
> --nab

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
                   ` (2 preceding siblings ...)
  2013-03-24 15:20 ` [PATCH V4 0/2] tcm_vhost hotplug Michael S. Tsirkin
@ 2013-04-08 21:31 ` Nicholas A. Bellinger
  2013-04-08 20:41   ` Michael S. Tsirkin
  2013-04-09  0:57   ` Asias He
  2013-04-08 21:36 ` Nicholas A. Bellinger
  4 siblings, 2 replies; 15+ messages in thread
From: Nicholas A. Bellinger @ 2013-04-08 21:31 UTC (permalink / raw)
  To: Michael S. Tsirkin; +Cc: kvm, virtualization, Stefan Hajnoczi, Paolo Bonzini

On Fri, 2013-03-22 at 13:39 +0800, Asias He wrote:
> Asias He (2):
>   tcm_vhost: Introduce tcm_vhost_check_feature()
>   tcm_vhost: Add hotplug/hotunplug support
> 
>  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 229 insertions(+), 5 deletions(-)
> 

Hi MST,

With vhost-scsi-pci now on the right track for upstream QEMU, will you
consider this code as a v3.10 item..?

Asias, I assume this will need to be rebased against your latest patches
in target-pending/master, yes..?

--nab

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
                   ` (3 preceding siblings ...)
  2013-04-08 21:31 ` Nicholas A. Bellinger
@ 2013-04-08 21:36 ` Nicholas A. Bellinger
  4 siblings, 0 replies; 15+ messages in thread
From: Nicholas A. Bellinger @ 2013-04-08 21:36 UTC (permalink / raw)
  To: Michael S. Tsirkin
  Cc: kvm, virtualization, target-devel, Stefan Hajnoczi, Paolo Bonzini

(Re-send, as target-devel CC' managed to get dropped)

On Fri, 2013-03-22 at 13:39 +0800, Asias He wrote:
> Asias He (2):
>   tcm_vhost: Introduce tcm_vhost_check_feature()
>   tcm_vhost: Add hotplug/hotunplug support
> 
>  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
>  drivers/vhost/tcm_vhost.h |  10 +++
>  2 files changed, 229 insertions(+), 5 deletions(-)
> 

Hi MST,

With vhost-scsi-pci now on the right track for upstream QEMU, will you
consider this code as a v3.10 item..?

Asias, I assume this will need to be rebased against your latest patches
in target-pending/master, yes..?

--nab

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH V4 0/2] tcm_vhost hotplug
  2013-04-08 21:31 ` Nicholas A. Bellinger
  2013-04-08 20:41   ` Michael S. Tsirkin
@ 2013-04-09  0:57   ` Asias He
  1 sibling, 0 replies; 15+ messages in thread
From: Asias He @ 2013-04-09  0:57 UTC (permalink / raw)
  To: Nicholas A. Bellinger
  Cc: kvm, Michael S. Tsirkin, virtualization, Stefan Hajnoczi,
	Paolo Bonzini

On Mon, Apr 08, 2013 at 02:31:27PM -0700, Nicholas A. Bellinger wrote:
> On Fri, 2013-03-22 at 13:39 +0800, Asias He wrote:
> > Asias He (2):
> >   tcm_vhost: Introduce tcm_vhost_check_feature()
> >   tcm_vhost: Add hotplug/hotunplug support
> > 
> >  drivers/vhost/tcm_vhost.c | 224 ++++++++++++++++++++++++++++++++++++++++++++--
> >  drivers/vhost/tcm_vhost.h |  10 +++
> >  2 files changed, 229 insertions(+), 5 deletions(-)
> > 
> 
> Hi MST,
> 
> With vhost-scsi-pci now on the right track for upstream QEMU, will you
> consider this code as a v3.10 item..?
> 
> Asias, I assume this will need to be rebased against your latest patches
> in target-pending/master, yes..?

I will rebase this one and the flush one shortly.

> --nab
> 

-- 
Asias

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2013-04-09  0:57 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-03-22  5:39 [PATCH V4 0/2] tcm_vhost hotplug Asias He
2013-03-22  5:39 ` [PATCH V4 1/2] tcm_vhost: Introduce tcm_vhost_check_feature() Asias He
2013-03-22  5:39 ` [PATCH V4 2/2] tcm_vhost: Add hotplug/hotunplug support Asias He
2013-03-24 15:20   ` Michael S. Tsirkin
     [not found]   ` <20130324152009.GB8657@redhat.com>
2013-03-25  7:48     ` Asias He
     [not found]     ` <20130325074806.GB20435@hj.localdomain>
2013-03-25 11:10       ` Michael S. Tsirkin
2013-03-26  2:56         ` Asias He
2013-03-26 20:46           ` Michael S. Tsirkin
2013-03-27  3:23             ` Asias He
2013-03-24 15:20 ` [PATCH V4 0/2] tcm_vhost hotplug Michael S. Tsirkin
2013-03-25  8:20   ` Asias He
2013-04-08 21:31 ` Nicholas A. Bellinger
2013-04-08 20:41   ` Michael S. Tsirkin
2013-04-09  0:57   ` Asias He
2013-04-08 21:36 ` Nicholas A. Bellinger

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).