* [PATCH] kvm tools: Add initial virtio-scsi support
@ 2012-08-09 0:51 Asias He
2012-08-13 7:24 ` Pekka Enberg
0 siblings, 1 reply; 5+ messages in thread
From: Asias He @ 2012-08-09 0:51 UTC (permalink / raw)
To: Pekka Enberg
Cc: Sasha Levin, Ingo Molnar, Cyrill Gorcunov, kvm,
Nicholas A. Bellinger, Stefan Hajnoczi, Paolo Bonzini
This patch brings virito-scsi support to kvm tool.
With the introduce of tcm_vhost (vhost-scsi)
tcm_vhost: Initial merge for vhost level target fabric driver
we can implement virito-scsi by simply having vhost-scsi to handle the
SCSI command.
Howto use:
1) Setup the tcm_vhost target through /sys/kernel/config
[Stefan Hajnoczi, Thanks for the script to setup tcm_vhost]
** Setup wwpn and tpgt
$ wwpn="naa.0"
$ tpgt=/sys/kernel/config/target/vhost/$wwpn/tpgt_0
$ nexus=$tpgt/nexus
$ mkdir -p $tpgt
$ echo -n $wwpn > $nexus
** Setup lun using /dev/ram
$ n=0
$ lun=$tpgt/lun/lun_${n}
$ data=/sys/kernel/config/target/core/iblock_0/data_${n}
$ ram=/dev/ram${n}
$ mkdir -p $lun
$ mkdir -p $data
$ echo -n udev_path=${ram} > $data/control
$ echo -n 1 > $data/enable
$ ln -s $data $lun
2) Run kvm tool with the new disk option '-d scsi:$wwpn:$tpgt', e.g
$ lkvm run -k /boot/bzImage -d ~/img/sid.img -d scsi:naa.0:0
Signed-off-by: Asias He <asias.hejun@gmail.com>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Cc: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
---
tools/kvm/Makefile | 1 +
tools/kvm/builtin-run.c | 25 +++
tools/kvm/disk/core.c | 14 ++
tools/kvm/include/kvm/disk-image.h | 4 +
tools/kvm/include/kvm/virtio-pci-dev.h | 1 +
tools/kvm/include/kvm/virtio-scsi.h | 11 ++
tools/kvm/include/linux/compiler.h | 1 +
tools/kvm/virtio/blk.c | 2 +
tools/kvm/virtio/scsi.c | 332 +++++++++++++++++++++++++++++++++
9 files changed, 391 insertions(+)
create mode 100644 tools/kvm/include/kvm/virtio-scsi.h
create mode 100644 tools/kvm/virtio/scsi.c
diff --git a/tools/kvm/Makefile b/tools/kvm/Makefile
index f9e1ec1..749956a 100644
--- a/tools/kvm/Makefile
+++ b/tools/kvm/Makefile
@@ -55,6 +55,7 @@ OBJS += mmio.o
OBJS += pci.o
OBJS += term.o
OBJS += virtio/blk.o
+OBJS += virtio/scsi.o
OBJS += virtio/console.o
OBJS += virtio/core.o
OBJS += virtio/net.o
diff --git a/tools/kvm/builtin-run.c b/tools/kvm/builtin-run.c
index a36bd00..c3ec469 100644
--- a/tools/kvm/builtin-run.c
+++ b/tools/kvm/builtin-run.c
@@ -8,6 +8,7 @@
#include "kvm/framebuffer.h"
#include "kvm/disk-image.h"
#include "kvm/threadpool.h"
+#include "kvm/virtio-scsi.h"
#include "kvm/virtio-blk.h"
#include "kvm/virtio-net.h"
#include "kvm/virtio-rng.h"
@@ -171,6 +172,19 @@ static int img_name_parser(const struct option *opt, const char *arg, int unset)
disk_image[image_count].filename = arg;
cur = arg;
+
+ if (strncmp(arg, "scsi:", 5) == 0) {
+ sep = strstr(arg, ":");
+ if (sep)
+ disk_image[image_count].wwpn = sep + 1;
+ sep = strstr(sep + 1, ":");
+ if (sep) {
+ *sep = 0;
+ disk_image[image_count].tpgt = sep + 1;
+ }
+ cur = sep + 1;
+ }
+
do {
sep = strstr(cur, ",");
if (sep) {
@@ -1184,6 +1198,13 @@ static int kvm_cmd_run_init(int argc, const char **argv)
goto fail;
}
+ r = virtio_scsi_init(kvm);
+ if (r < 0) {
+ pr_err("virtio_scsi_init() failed with error %d\n", r);
+ goto fail;
+ }
+
+
if (active_console == CONSOLE_VIRTIO)
virtio_console__init(kvm);
@@ -1332,6 +1353,10 @@ static void kvm_cmd_run_exit(int guest_ret)
fb__stop();
+ r = virtio_scsi_exit(kvm);
+ if (r < 0)
+ pr_warning("virtio_scsi_exit() failed with error %d\n", r);
+
r = virtio_blk__exit(kvm);
if (r < 0)
pr_warning("virtio_blk__exit() failed with error %d\n", r);
diff --git a/tools/kvm/disk/core.c b/tools/kvm/disk/core.c
index 621c940..f7e2c7f 100644
--- a/tools/kvm/disk/core.c
+++ b/tools/kvm/disk/core.c
@@ -122,6 +122,8 @@ struct disk_image **disk_image__open_all(struct disk_image_params *params, int c
{
struct disk_image **disks;
const char *filename;
+ const char *wwpn;
+ const char *tpgt;
bool readonly;
bool direct;
void *err;
@@ -140,6 +142,18 @@ struct disk_image **disk_image__open_all(struct disk_image_params *params, int c
filename = params[i].filename;
readonly = params[i].readonly;
direct = params[i].direct;
+ wwpn = params[i].wwpn;
+ tpgt = params[i].tpgt;
+
+ if (wwpn) {
+ disks[i] = malloc(sizeof(struct disk_image));
+ if (!disks[i])
+ return ERR_PTR(-ENOMEM);
+ disks[i]->wwpn = wwpn;
+ disks[i]->tpgt = tpgt;
+ continue;
+ }
+
if (!filename)
continue;
diff --git a/tools/kvm/include/kvm/disk-image.h b/tools/kvm/include/kvm/disk-image.h
index 7ae17f8..54e4047 100644
--- a/tools/kvm/include/kvm/disk-image.h
+++ b/tools/kvm/include/kvm/disk-image.h
@@ -41,6 +41,8 @@ struct disk_image_operations {
struct disk_image_params {
const char *filename;
+ const char *wwpn;
+ const char *tpgt;
bool readonly;
bool direct;
};
@@ -57,6 +59,8 @@ struct disk_image {
#ifdef CONFIG_HAS_AIO
io_context_t ctx;
#endif
+ const char *wwpn;
+ const char *tpgt;
};
struct disk_image *disk_image__open(const char *filename, bool readonly, bool direct);
diff --git a/tools/kvm/include/kvm/virtio-pci-dev.h b/tools/kvm/include/kvm/virtio-pci-dev.h
index 7ceb125..48ae018 100644
--- a/tools/kvm/include/kvm/virtio-pci-dev.h
+++ b/tools/kvm/include/kvm/virtio-pci-dev.h
@@ -13,6 +13,7 @@
#define PCI_DEVICE_ID_VIRTIO_CONSOLE 0x1003
#define PCI_DEVICE_ID_VIRTIO_RNG 0x1004
#define PCI_DEVICE_ID_VIRTIO_BLN 0x1005
+#define PCI_DEVICE_ID_VIRTIO_SCSI 0x1008
#define PCI_DEVICE_ID_VIRTIO_9P 0x1009
#define PCI_DEVICE_ID_VESA 0x2000
#define PCI_DEVICE_ID_PCI_SHMEM 0x0001
diff --git a/tools/kvm/include/kvm/virtio-scsi.h b/tools/kvm/include/kvm/virtio-scsi.h
new file mode 100644
index 0000000..d64aa7e
--- /dev/null
+++ b/tools/kvm/include/kvm/virtio-scsi.h
@@ -0,0 +1,11 @@
+#ifndef KVM__SCSI_VIRTIO_H
+#define KVM__SCSI_VIRTIO_H
+
+#include "kvm/disk-image.h"
+
+struct kvm;
+
+int virtio_scsi_init(struct kvm *kvm);
+int virtio_scsi_exit(struct kvm *kvm);
+
+#endif /* KVM__SCSI_VIRTIO_H */
diff --git a/tools/kvm/include/linux/compiler.h b/tools/kvm/include/linux/compiler.h
index b9c5346..898420b 100644
--- a/tools/kvm/include/linux/compiler.h
+++ b/tools/kvm/include/linux/compiler.h
@@ -11,6 +11,7 @@
#endif
#define __used __attribute__((__unused__))
+#define __packed __attribute__((packed))
#define __iomem
#define __force
#define __must_check
diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c
index 1fb969f..740442a 100644
--- a/tools/kvm/virtio/blk.c
+++ b/tools/kvm/virtio/blk.c
@@ -290,6 +290,8 @@ int virtio_blk__init(struct kvm *kvm)
int i, r = 0;
for (i = 0; i < kvm->nr_disks; i++) {
+ if (kvm->disks[i]->wwpn)
+ continue;
r = virtio_blk__init_one(kvm, kvm->disks[i]);
if (r < 0)
goto cleanup;
diff --git a/tools/kvm/virtio/scsi.c b/tools/kvm/virtio/scsi.c
new file mode 100644
index 0000000..5bcb00c
--- /dev/null
+++ b/tools/kvm/virtio/scsi.c
@@ -0,0 +1,332 @@
+#include "kvm/virtio-scsi.h"
+#include "kvm/virtio-pci-dev.h"
+#include "kvm/disk-image.h"
+#include "kvm/kvm.h"
+#include "kvm/pci.h"
+#include "kvm/ioeventfd.h"
+#include "kvm/guest_compat.h"
+#include "kvm/virtio-pci.h"
+#include "kvm/virtio.h"
+
+#include <linux/kernel.h>
+#include <linux/virtio_scsi.h>
+#include <linux/vhost.h>
+
+/*----------------------------------------------------*/
+/* TODO: Remove this when tcm_vhost goes upstream */
+#define TRANSPORT_IQN_LEN 224
+#define VHOST_SCSI_ABI_VERSION 0
+struct vhost_scsi_target {
+ int abi_version;
+ unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
+ unsigned short vhost_tpgt;
+};
+/* VHOST_SCSI specific defines */
+#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
+#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
+#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
+/*----------------------------------------------------*/
+
+
+#define VIRTIO_SCSI_QUEUE_SIZE 128
+#define NUM_VIRT_QUEUES 3
+
+static LIST_HEAD(sdevs);
+static int compat_id = -1;
+
+struct scsi_dev {
+ struct virt_queue vqs[NUM_VIRT_QUEUES];
+ struct virtio_scsi_config scsi_config;
+ struct vhost_scsi_target target;
+ u32 features;
+ int vhost_fd;
+ struct virtio_device vdev;
+ struct list_head list;
+ struct kvm *kvm;
+};
+
+static void set_config(struct kvm *kvm, void *dev, u8 data, u32 offset)
+{
+ struct scsi_dev *sdev = dev;
+
+ ((u8 *)(&sdev->scsi_config))[offset] = data;
+}
+
+static u8 get_config(struct kvm *kvm, void *dev, u32 offset)
+{
+ struct scsi_dev *sdev = dev;
+
+ return ((u8 *)(&sdev->scsi_config))[offset];
+}
+
+static u32 get_host_features(struct kvm *kvm, void *dev)
+{
+ return 1UL << VIRTIO_RING_F_EVENT_IDX |
+ 1UL << VIRTIO_RING_F_INDIRECT_DESC;
+}
+
+static void set_guest_features(struct kvm *kvm, void *dev, u32 features)
+{
+ struct scsi_dev *sdev = dev;
+
+ sdev->features = features;
+}
+
+static int init_vq(struct kvm *kvm, void *dev, u32 vq, u32 pfn)
+{
+ struct vhost_vring_state state = { .index = vq };
+ struct vhost_vring_addr addr;
+ struct scsi_dev *sdev = dev;
+ struct virt_queue *queue;
+ void *p;
+ int r;
+
+ compat__remove_message(compat_id);
+
+ queue = &sdev->vqs[vq];
+ queue->pfn = pfn;
+ p = guest_pfn_to_host(kvm, queue->pfn);
+
+ vring_init(&queue->vring, VIRTIO_SCSI_QUEUE_SIZE, p, VIRTIO_PCI_VRING_ALIGN);
+
+ if (sdev->vhost_fd == 0)
+ return 0;
+
+ state.num = queue->vring.num;
+ r = ioctl(sdev->vhost_fd, VHOST_SET_VRING_NUM, &state);
+ if (r < 0)
+ die_perror("VHOST_SET_VRING_NUM failed");
+ state.num = 0;
+ r = ioctl(sdev->vhost_fd, VHOST_SET_VRING_BASE, &state);
+ if (r < 0)
+ die_perror("VHOST_SET_VRING_BASE failed");
+
+ addr = (struct vhost_vring_addr) {
+ .index = vq,
+ .desc_user_addr = (u64)(unsigned long)queue->vring.desc,
+ .avail_user_addr = (u64)(unsigned long)queue->vring.avail,
+ .used_user_addr = (u64)(unsigned long)queue->vring.used,
+ };
+
+ r = ioctl(sdev->vhost_fd, VHOST_SET_VRING_ADDR, &addr);
+ if (r < 0)
+ die_perror("VHOST_SET_VRING_ADDR failed");
+
+ return 0;
+}
+
+static void notify_vq_gsi(struct kvm *kvm, void *dev, u32 vq, u32 gsi)
+{
+ struct vhost_vring_file file;
+ struct scsi_dev *sdev = dev;
+ struct kvm_irqfd irq;
+ int r;
+
+ if (sdev->vhost_fd == 0)
+ return;
+
+ irq = (struct kvm_irqfd) {
+ .gsi = gsi,
+ .fd = eventfd(0, 0),
+ };
+ file = (struct vhost_vring_file) {
+ .index = vq,
+ .fd = irq.fd,
+ };
+
+ r = ioctl(kvm->vm_fd, KVM_IRQFD, &irq);
+ if (r < 0)
+ die_perror("KVM_IRQFD failed");
+
+ r = ioctl(sdev->vhost_fd, VHOST_SET_VRING_CALL, &file);
+ if (r < 0)
+ die_perror("VHOST_SET_VRING_CALL failed");
+
+ if (vq > 0)
+ return;
+
+ r = ioctl(sdev->vhost_fd, VHOST_SCSI_SET_ENDPOINT, &sdev->target);
+ if (r != 0)
+ die("VHOST_SCSI_SET_ENDPOINT failed %d", errno);
+}
+
+static void notify_vq_eventfd(struct kvm *kvm, void *dev, u32 vq, u32 efd)
+{
+ struct scsi_dev *sdev = dev;
+ struct vhost_vring_file file = {
+ .index = vq,
+ .fd = efd,
+ };
+ int r;
+
+ if (sdev->vhost_fd == 0)
+ return;
+
+ r = ioctl(sdev->vhost_fd, VHOST_SET_VRING_KICK, &file);
+ if (r < 0)
+ die_perror("VHOST_SET_VRING_KICK failed");
+}
+
+static int notify_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ return 0;
+}
+
+static int get_pfn_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ struct scsi_dev *sdev = dev;
+
+ return sdev->vqs[vq].pfn;
+}
+
+static int get_size_vq(struct kvm *kvm, void *dev, u32 vq)
+{
+ return VIRTIO_SCSI_QUEUE_SIZE;
+}
+
+static int set_size_vq(struct kvm *kvm, void *dev, u32 vq, int size)
+{
+ return size;
+}
+
+static struct virtio_ops scsi_dev_virtio_ops = (struct virtio_ops) {
+ .set_config = set_config,
+ .get_config = get_config,
+ .get_host_features = get_host_features,
+ .set_guest_features = set_guest_features,
+ .init_vq = init_vq,
+ .get_pfn_vq = get_pfn_vq,
+ .get_size_vq = get_size_vq,
+ .set_size_vq = set_size_vq,
+ .notify_vq = notify_vq,
+ .notify_vq_gsi = notify_vq_gsi,
+ .notify_vq_eventfd = notify_vq_eventfd,
+};
+
+static void virtio_scsi_vhost_init(struct kvm *kvm, struct scsi_dev *sdev)
+{
+ struct vhost_memory *mem;
+ u64 features;
+ int r;
+
+ sdev->vhost_fd = open("/dev/vhost-scsi", O_RDWR);
+ if (sdev->vhost_fd < 0)
+ die_perror("Failed openning vhost-scsi device");
+
+ mem = calloc(1, sizeof(*mem) + sizeof(struct vhost_memory_region));
+ if (mem == NULL)
+ die("Failed allocating memory for vhost memory map");
+
+ mem->nregions = 1;
+ mem->regions[0] = (struct vhost_memory_region) {
+ .guest_phys_addr = 0,
+ .memory_size = kvm->ram_size,
+ .userspace_addr = (unsigned long)kvm->ram_start,
+ };
+
+ r = ioctl(sdev->vhost_fd, VHOST_SET_OWNER);
+ if (r != 0)
+ die_perror("VHOST_SET_OWNER failed");
+
+ r = ioctl(sdev->vhost_fd, VHOST_GET_FEATURES, &features);
+ if (r != 0)
+ die_perror("VHOST_GET_FEATURES failed");
+
+ r = ioctl(sdev->vhost_fd, VHOST_SET_FEATURES, &features);
+ if (r != 0)
+ die_perror("VHOST_SET_FEATURES failed");
+ r = ioctl(sdev->vhost_fd, VHOST_SET_MEM_TABLE, mem);
+ if (r != 0)
+ die_perror("VHOST_SET_MEM_TABLE failed");
+
+ sdev->vdev.use_vhost = true;
+
+ free(mem);
+}
+
+
+static int virtio_scsi_init_one(struct kvm *kvm, struct disk_image *disk)
+{
+ struct scsi_dev *sdev;
+
+ if (!disk)
+ return -EINVAL;
+
+ sdev = calloc(1, sizeof(struct scsi_dev));
+ if (sdev == NULL)
+ return -ENOMEM;
+
+ *sdev = (struct scsi_dev) {
+ .scsi_config = (struct virtio_scsi_config) {
+ .num_queues = NUM_VIRT_QUEUES - 2,
+ .seg_max = VIRTIO_SCSI_CDB_SIZE - 2,
+ .max_sectors = 65535,
+ .cmd_per_lun = 128,
+ .sense_size = VIRTIO_SCSI_SENSE_SIZE,
+ .cdb_size = VIRTIO_SCSI_CDB_SIZE,
+ .max_channel = 0,
+ .max_target = 0,
+ .max_lun = 16383,
+ .event_info_size = sizeof(struct virtio_scsi_event),
+ },
+ .kvm = kvm,
+ };
+
+ strncpy((char *)&sdev->target.vhost_wwpn, disk->wwpn, sizeof(sdev->target.vhost_wwpn));
+ sdev->target.vhost_tpgt = strtol(disk->tpgt, NULL, 0);
+
+ virtio_init(kvm, sdev, &sdev->vdev, &scsi_dev_virtio_ops,
+ VIRTIO_PCI, PCI_DEVICE_ID_VIRTIO_SCSI, VIRTIO_ID_SCSI, PCI_CLASS_BLK);
+
+ list_add_tail(&sdev->list, &sdevs);
+
+ virtio_scsi_vhost_init(kvm, sdev);
+
+ if (compat_id == -1)
+ compat_id = virtio_compat_add_message("virtio-scsi", "CONFIG_VIRTIO_SCSI");
+
+ return 0;
+}
+
+static int virtio_scsi_exit_one(struct kvm *kvm, struct scsi_dev *sdev)
+{
+ int r;
+
+ r = ioctl(sdev->vhost_fd, VHOST_SCSI_CLEAR_ENDPOINT, &sdev->target);
+ if (r != 0)
+ die("VHOST_SCSI_CLEAR_ENDPOINT failed %d", errno);
+
+ list_del(&sdev->list);
+ free(sdev);
+
+ return 0;
+}
+
+int virtio_scsi_init(struct kvm *kvm)
+{
+ int i, r = 0;
+
+ for (i = 0; i < kvm->nr_disks; i++) {
+ if (!kvm->disks[i]->wwpn)
+ continue;
+ r = virtio_scsi_init_one(kvm, kvm->disks[i]);
+ if (r < 0)
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ return virtio_scsi_exit(kvm);
+}
+
+int virtio_scsi_exit(struct kvm *kvm)
+{
+ while (!list_empty(&sdevs)) {
+ struct scsi_dev *sdev;
+
+ sdev = list_first_entry(&sdevs, struct scsi_dev, list);
+ virtio_scsi_exit_one(kvm, sdev);
+ }
+
+ return 0;
+}
--
1.7.11.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm tools: Add initial virtio-scsi support
2012-08-09 0:51 [PATCH] kvm tools: Add initial virtio-scsi support Asias He
@ 2012-08-13 7:24 ` Pekka Enberg
2012-08-14 1:52 ` Asias He
0 siblings, 1 reply; 5+ messages in thread
From: Pekka Enberg @ 2012-08-13 7:24 UTC (permalink / raw)
To: Asias He
Cc: Sasha Levin, Ingo Molnar, Cyrill Gorcunov, kvm,
Nicholas A. Bellinger, Stefan Hajnoczi, Paolo Bonzini
On Thu, Aug 9, 2012 at 3:51 AM, Asias He <asias.hejun@gmail.com> wrote:
> This patch brings virito-scsi support to kvm tool.
>
> With the introduce of tcm_vhost (vhost-scsi)
>
> tcm_vhost: Initial merge for vhost level target fabric driver
>
> we can implement virito-scsi by simply having vhost-scsi to handle the
> SCSI command.
>
> Howto use:
> 1) Setup the tcm_vhost target through /sys/kernel/config
>
> [Stefan Hajnoczi, Thanks for the script to setup tcm_vhost]
>
> ** Setup wwpn and tpgt
> $ wwpn="naa.0"
> $ tpgt=/sys/kernel/config/target/vhost/$wwpn/tpgt_0
> $ nexus=$tpgt/nexus
> $ mkdir -p $tpgt
> $ echo -n $wwpn > $nexus
>
> ** Setup lun using /dev/ram
> $ n=0
> $ lun=$tpgt/lun/lun_${n}
> $ data=/sys/kernel/config/target/core/iblock_0/data_${n}
> $ ram=/dev/ram${n}
> $ mkdir -p $lun
> $ mkdir -p $data
> $ echo -n udev_path=${ram} > $data/control
> $ echo -n 1 > $data/enable
> $ ln -s $data $lun
>
> 2) Run kvm tool with the new disk option '-d scsi:$wwpn:$tpgt', e.g
> $ lkvm run -k /boot/bzImage -d ~/img/sid.img -d scsi:naa.0:0
>
> Signed-off-by: Asias He <asias.hejun@gmail.com>
> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
> Cc: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
I've included some comments below but overall, looks good to me. Sasha?
> ---
> diff --git a/tools/kvm/include/kvm/disk-image.h b/tools/kvm/include/kvm/disk-image.h
> index 7ae17f8..54e4047 100644
> --- a/tools/kvm/include/kvm/disk-image.h
> +++ b/tools/kvm/include/kvm/disk-image.h
> @@ -41,6 +41,8 @@ struct disk_image_operations {
>
> struct disk_image_params {
> const char *filename;
> + const char *wwpn;
> + const char *tpgt;
Maybe it's just me but "wwpn" and "tpgt" really could use a comment
explaining what they are...
> bool readonly;
> bool direct;
> };
> @@ -57,6 +59,8 @@ struct disk_image {
> #ifdef CONFIG_HAS_AIO
> io_context_t ctx;
> #endif
> + const char *wwpn;
> + const char *tpgt;
> };
>
> struct disk_image *disk_image__open(const char *filename, bool readonly, bool direct);
> diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c
> index 1fb969f..740442a 100644
> --- a/tools/kvm/virtio/blk.c
> +++ b/tools/kvm/virtio/blk.c
> @@ -290,6 +290,8 @@ int virtio_blk__init(struct kvm *kvm)
> int i, r = 0;
>
> for (i = 0; i < kvm->nr_disks; i++) {
> + if (kvm->disks[i]->wwpn)
> + continue;
> r = virtio_blk__init_one(kvm, kvm->disks[i]);
> if (r < 0)
> goto cleanup;
> diff --git a/tools/kvm/virtio/scsi.c b/tools/kvm/virtio/scsi.c
> new file mode 100644
> index 0000000..5bcb00c
> --- /dev/null
> +++ b/tools/kvm/virtio/scsi.c
> @@ -0,0 +1,332 @@
> +#include "kvm/virtio-scsi.h"
> +#include "kvm/virtio-pci-dev.h"
> +#include "kvm/disk-image.h"
> +#include "kvm/kvm.h"
> +#include "kvm/pci.h"
> +#include "kvm/ioeventfd.h"
> +#include "kvm/guest_compat.h"
> +#include "kvm/virtio-pci.h"
> +#include "kvm/virtio.h"
> +
> +#include <linux/kernel.h>
> +#include <linux/virtio_scsi.h>
> +#include <linux/vhost.h>
> +
> +/*----------------------------------------------------*/
> +/* TODO: Remove this when tcm_vhost goes upstream */
> +#define TRANSPORT_IQN_LEN 224
> +#define VHOST_SCSI_ABI_VERSION 0
> +struct vhost_scsi_target {
> + int abi_version;
> + unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
> + unsigned short vhost_tpgt;
> +};
> +/* VHOST_SCSI specific defines */
> +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
> +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
> +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
> +/*----------------------------------------------------*/
You might as well move the above block into a small helper header file.
> +
> +
> +#define VIRTIO_SCSI_QUEUE_SIZE 128
> +#define NUM_VIRT_QUEUES 3
> +
> +static LIST_HEAD(sdevs);
> +static int compat_id = -1;
> +
> +struct scsi_dev {
> + struct virt_queue vqs[NUM_VIRT_QUEUES];
> + struct virtio_scsi_config scsi_config;
> + struct vhost_scsi_target target;
> + u32 features;
> + int vhost_fd;
> + struct virtio_device vdev;
> + struct list_head list;
> + struct kvm *kvm;
> +};
> +
> +static void set_config(struct kvm *kvm, void *dev, u8 data, u32 offset)
> +{
> + struct scsi_dev *sdev = dev;
> +
> + ((u8 *)(&sdev->scsi_config))[offset] = data;
Can you introduce a helper function for this, please?
> +}
> +
> +static u8 get_config(struct kvm *kvm, void *dev, u32 offset)
> +{
> + struct scsi_dev *sdev = dev;
> +
> + return ((u8 *)(&sdev->scsi_config))[offset];
Ditto.
> +}
> +
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm tools: Add initial virtio-scsi support
2012-08-13 7:24 ` Pekka Enberg
@ 2012-08-14 1:52 ` Asias He
2012-08-14 6:45 ` Stefan Hajnoczi
0 siblings, 1 reply; 5+ messages in thread
From: Asias He @ 2012-08-14 1:52 UTC (permalink / raw)
To: Pekka Enberg
Cc: Sasha Levin, Ingo Molnar, Cyrill Gorcunov, kvm,
Nicholas A. Bellinger, Stefan Hajnoczi, Paolo Bonzini
On Mon, Aug 13, 2012 at 3:24 PM, Pekka Enberg <penberg@kernel.org> wrote:
> On Thu, Aug 9, 2012 at 3:51 AM, Asias He <asias.hejun@gmail.com> wrote:
>> This patch brings virito-scsi support to kvm tool.
>>
>> With the introduce of tcm_vhost (vhost-scsi)
>>
>> tcm_vhost: Initial merge for vhost level target fabric driver
>>
>> we can implement virito-scsi by simply having vhost-scsi to handle the
>> SCSI command.
>>
>> Howto use:
>> 1) Setup the tcm_vhost target through /sys/kernel/config
>>
>> [Stefan Hajnoczi, Thanks for the script to setup tcm_vhost]
>>
>> ** Setup wwpn and tpgt
>> $ wwpn="naa.0"
>> $ tpgt=/sys/kernel/config/target/vhost/$wwpn/tpgt_0
>> $ nexus=$tpgt/nexus
>> $ mkdir -p $tpgt
>> $ echo -n $wwpn > $nexus
>>
>> ** Setup lun using /dev/ram
>> $ n=0
>> $ lun=$tpgt/lun/lun_${n}
>> $ data=/sys/kernel/config/target/core/iblock_0/data_${n}
>> $ ram=/dev/ram${n}
>> $ mkdir -p $lun
>> $ mkdir -p $data
>> $ echo -n udev_path=${ram} > $data/control
>> $ echo -n 1 > $data/enable
>> $ ln -s $data $lun
>>
>> 2) Run kvm tool with the new disk option '-d scsi:$wwpn:$tpgt', e.g
>> $ lkvm run -k /boot/bzImage -d ~/img/sid.img -d scsi:naa.0:0
>>
>> Signed-off-by: Asias He <asias.hejun@gmail.com>
>> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
>> Cc: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>
> I've included some comments below but overall, looks good to me. Sasha?
>
>> ---
>> diff --git a/tools/kvm/include/kvm/disk-image.h b/tools/kvm/include/kvm/disk-image.h
>> index 7ae17f8..54e4047 100644
>> --- a/tools/kvm/include/kvm/disk-image.h
>> +++ b/tools/kvm/include/kvm/disk-image.h
>> @@ -41,6 +41,8 @@ struct disk_image_operations {
>>
>> struct disk_image_params {
>> const char *filename;
>> + const char *wwpn;
>> + const char *tpgt;
>
> Maybe it's just me but "wwpn" and "tpgt" really could use a comment
> explaining what they are...
OK. They are terminology from linux target.
wwpn == world wide port number
tpgt == target port group tag
Nicholas, is this right?
>> bool readonly;
>> bool direct;
>> };
>> @@ -57,6 +59,8 @@ struct disk_image {
>> #ifdef CONFIG_HAS_AIO
>> io_context_t ctx;
>> #endif
>> + const char *wwpn;
>> + const char *tpgt;
>> };
>>
>> struct disk_image *disk_image__open(const char *filename, bool readonly, bool direct);
>> diff --git a/tools/kvm/virtio/blk.c b/tools/kvm/virtio/blk.c
>> index 1fb969f..740442a 100644
>> --- a/tools/kvm/virtio/blk.c
>> +++ b/tools/kvm/virtio/blk.c
>> @@ -290,6 +290,8 @@ int virtio_blk__init(struct kvm *kvm)
>> int i, r = 0;
>>
>> for (i = 0; i < kvm->nr_disks; i++) {
>> + if (kvm->disks[i]->wwpn)
>> + continue;
>> r = virtio_blk__init_one(kvm, kvm->disks[i]);
>> if (r < 0)
>> goto cleanup;
>> diff --git a/tools/kvm/virtio/scsi.c b/tools/kvm/virtio/scsi.c
>> new file mode 100644
>> index 0000000..5bcb00c
>> --- /dev/null
>> +++ b/tools/kvm/virtio/scsi.c
>> @@ -0,0 +1,332 @@
>> +#include "kvm/virtio-scsi.h"
>> +#include "kvm/virtio-pci-dev.h"
>> +#include "kvm/disk-image.h"
>> +#include "kvm/kvm.h"
>> +#include "kvm/pci.h"
>> +#include "kvm/ioeventfd.h"
>> +#include "kvm/guest_compat.h"
>> +#include "kvm/virtio-pci.h"
>> +#include "kvm/virtio.h"
>> +
>> +#include <linux/kernel.h>
>> +#include <linux/virtio_scsi.h>
>> +#include <linux/vhost.h>
>> +
>> +/*----------------------------------------------------*/
>> +/* TODO: Remove this when tcm_vhost goes upstream */
>> +#define TRANSPORT_IQN_LEN 224
>> +#define VHOST_SCSI_ABI_VERSION 0
>> +struct vhost_scsi_target {
>> + int abi_version;
>> + unsigned char vhost_wwpn[TRANSPORT_IQN_LEN];
>> + unsigned short vhost_tpgt;
>> +};
>> +/* VHOST_SCSI specific defines */
>> +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target)
>> +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target)
>> +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target)
>> +/*----------------------------------------------------*/
>
> You might as well move the above block into a small helper header file.
How about include/kvm/virtio-scsi.h ?
>> +
>> +
>> +#define VIRTIO_SCSI_QUEUE_SIZE 128
>> +#define NUM_VIRT_QUEUES 3
>> +
>> +static LIST_HEAD(sdevs);
>> +static int compat_id = -1;
>> +
>> +struct scsi_dev {
>> + struct virt_queue vqs[NUM_VIRT_QUEUES];
>> + struct virtio_scsi_config scsi_config;
>> + struct vhost_scsi_target target;
>> + u32 features;
>> + int vhost_fd;
>> + struct virtio_device vdev;
>> + struct list_head list;
>> + struct kvm *kvm;
>> +};
>> +
>> +static void set_config(struct kvm *kvm, void *dev, u8 data, u32 offset)
>> +{
>> + struct scsi_dev *sdev = dev;
>> +
>> + ((u8 *)(&sdev->scsi_config))[offset] = data;
>
> Can you introduce a helper function for this, please?
We have this similar code in other devices' get_config, get_config as well.
Let's clean them up after this patch? Also, I have plans to clean up
the vhost code.
>
>> +}
>> +
>> +static u8 get_config(struct kvm *kvm, void *dev, u32 offset)
>> +{
>> + struct scsi_dev *sdev = dev;
>> +
>> + return ((u8 *)(&sdev->scsi_config))[offset];
>
> Ditto.
>
>> +}
>> +
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
--
Asias He
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm tools: Add initial virtio-scsi support
2012-08-14 1:52 ` Asias He
@ 2012-08-14 6:45 ` Stefan Hajnoczi
2012-08-15 0:06 ` Nicholas A. Bellinger
0 siblings, 1 reply; 5+ messages in thread
From: Stefan Hajnoczi @ 2012-08-14 6:45 UTC (permalink / raw)
To: Asias He
Cc: Pekka Enberg, Sasha Levin, Ingo Molnar, Cyrill Gorcunov, kvm,
Nicholas A. Bellinger, Paolo Bonzini
On Tue, Aug 14, 2012 at 09:52:55AM +0800, Asias He wrote:
> On Mon, Aug 13, 2012 at 3:24 PM, Pekka Enberg <penberg@kernel.org> wrote:
> > On Thu, Aug 9, 2012 at 3:51 AM, Asias He <asias.hejun@gmail.com> wrote:
> >> This patch brings virito-scsi support to kvm tool.
> >>
> >> With the introduce of tcm_vhost (vhost-scsi)
> >>
> >> tcm_vhost: Initial merge for vhost level target fabric driver
> >>
> >> we can implement virito-scsi by simply having vhost-scsi to handle the
> >> SCSI command.
> >>
> >> Howto use:
> >> 1) Setup the tcm_vhost target through /sys/kernel/config
> >>
> >> [Stefan Hajnoczi, Thanks for the script to setup tcm_vhost]
> >>
> >> ** Setup wwpn and tpgt
> >> $ wwpn="naa.0"
> >> $ tpgt=/sys/kernel/config/target/vhost/$wwpn/tpgt_0
> >> $ nexus=$tpgt/nexus
> >> $ mkdir -p $tpgt
> >> $ echo -n $wwpn > $nexus
> >>
> >> ** Setup lun using /dev/ram
> >> $ n=0
> >> $ lun=$tpgt/lun/lun_${n}
> >> $ data=/sys/kernel/config/target/core/iblock_0/data_${n}
> >> $ ram=/dev/ram${n}
> >> $ mkdir -p $lun
> >> $ mkdir -p $data
> >> $ echo -n udev_path=${ram} > $data/control
> >> $ echo -n 1 > $data/enable
> >> $ ln -s $data $lun
> >>
> >> 2) Run kvm tool with the new disk option '-d scsi:$wwpn:$tpgt', e.g
> >> $ lkvm run -k /boot/bzImage -d ~/img/sid.img -d scsi:naa.0:0
> >>
> >> Signed-off-by: Asias He <asias.hejun@gmail.com>
> >> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
> >> Cc: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
> >> Cc: Paolo Bonzini <pbonzini@redhat.com>
> >
> > I've included some comments below but overall, looks good to me. Sasha?
> >
> >> ---
> >> diff --git a/tools/kvm/include/kvm/disk-image.h b/tools/kvm/include/kvm/disk-image.h
> >> index 7ae17f8..54e4047 100644
> >> --- a/tools/kvm/include/kvm/disk-image.h
> >> +++ b/tools/kvm/include/kvm/disk-image.h
> >> @@ -41,6 +41,8 @@ struct disk_image_operations {
> >>
> >> struct disk_image_params {
> >> const char *filename;
> >> + const char *wwpn;
> >> + const char *tpgt;
> >
> > Maybe it's just me but "wwpn" and "tpgt" really could use a comment
> > explaining what they are...
>
> OK. They are terminology from linux target.
>
> wwpn == world wide port number
> tpgt == target port group tag
Target Portal Group Tag
TPGT is iSCSI-specific terminology but other transports like SAS or FCP
have similar concepts of naming target ports. I think this terminology
is used by the LIO target regardless of the fabric.
The SCSI Architecture Model standard says the target port name is the
"name (see 3.1.70) of a SCSI target port that is world wide unique
within the SCSI transport protocol of the SCSI domain of that SCSI
target port".
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH] kvm tools: Add initial virtio-scsi support
2012-08-14 6:45 ` Stefan Hajnoczi
@ 2012-08-15 0:06 ` Nicholas A. Bellinger
0 siblings, 0 replies; 5+ messages in thread
From: Nicholas A. Bellinger @ 2012-08-15 0:06 UTC (permalink / raw)
To: Stefan Hajnoczi
Cc: Asias He, Pekka Enberg, Sasha Levin, Ingo Molnar, Cyrill Gorcunov,
kvm, Paolo Bonzini
On Tue, 2012-08-14 at 07:45 +0100, Stefan Hajnoczi wrote:
> On Tue, Aug 14, 2012 at 09:52:55AM +0800, Asias He wrote:
> > On Mon, Aug 13, 2012 at 3:24 PM, Pekka Enberg <penberg@kernel.org> wrote:
> > > On Thu, Aug 9, 2012 at 3:51 AM, Asias He <asias.hejun@gmail.com> wrote:
<SNIP>
> > >> ---
> > >> diff --git a/tools/kvm/include/kvm/disk-image.h b/tools/kvm/include/kvm/disk-image.h
> > >> index 7ae17f8..54e4047 100644
> > >> --- a/tools/kvm/include/kvm/disk-image.h
> > >> +++ b/tools/kvm/include/kvm/disk-image.h
> > >> @@ -41,6 +41,8 @@ struct disk_image_operations {
> > >>
> > >> struct disk_image_params {
> > >> const char *filename;
> > >> + const char *wwpn;
> > >> + const char *tpgt;
> > >
> > > Maybe it's just me but "wwpn" and "tpgt" really could use a comment
> > > explaining what they are...
> >
> > OK. They are terminology from linux target.
> >
> > wwpn == world wide port number
> > tpgt == target port group tag
>
> Target Portal Group Tag
>
> TPGT is iSCSI-specific terminology but other transports like SAS or FCP
> have similar concepts of naming target ports. I think this terminology
> is used by the LIO target regardless of the fabric.
>
> The SCSI Architecture Model standard says the target port name is the
> "name (see 3.1.70) of a SCSI target port that is world wide unique
> within the SCSI transport protocol of the SCSI domain of that SCSI
> target port".
>
The idea of a TargetPortalGroupTag originally came from iSCSI to do
logical partitioning of a single TargetName (IQN or EUI) instance with
different LUN/auth/network portal configurations. For iSCSI fabrics,
the SCSI target WWPN takes the form of a symbolic string as:
$TARGETNAME+t+0x$TPGT
In generic target_core_fabric_configfs.c code, all SCSI target endpoints
are attached to se_portal_group->tpg_group residing individually
at /sys/kernel/config/target/$FABRIC/$FABRIC_WWN/$TPGT/. It's parent
configfs group is at se_wwn->wwn_group, who's higher level parent is the
top level fabric group registered by the fabric driver with TCM.
So the $FABRIC_WWN/$TPGT/[luns,acls] groups hang off this primary
config_group, which means that currently non iSCSI target based fabric
drivers are required to individually enforce TPGT=1 usage in their own
code when mapping SCSI target WWPNs to underlying storage HW WWPNs, or a
fabric using software based WWPNs (as iSCSI does).
Also, from the user level of rtsadmin/targetcli we do hide the notion of
TPGTs for all non iscsi fabrics, however using rtslib directly does
still require following the underlying $FABRIC/$FABRIC_WWN/$TPGT/ object
layout.
--nab
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-08-15 0:06 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-08-09 0:51 [PATCH] kvm tools: Add initial virtio-scsi support Asias He
2012-08-13 7:24 ` Pekka Enberg
2012-08-14 1:52 ` Asias He
2012-08-14 6:45 ` Stefan Hajnoczi
2012-08-15 0:06 ` Nicholas A. Bellinger
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).