qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Ming Lin <mlin@kernel.org>
To: linux-nvme@lists.infradead.org
Cc: Ming Lin <ming.l@ssi.samsung.com>,
	qemu-devel@nongnu.org,
	"Nicholas A. Bellinger" <nab@linux-iscsi.org>,
	virtualization@lists.linux-foundation.org,
	Hannes Reinecke <hare@suse.de>, Christoph Hellwig <hch@lst.de>
Subject: [Qemu-devel] [RFC PATCH 8/9] nvme-vhost: add vhost memory helpers
Date: Thu, 19 Nov 2015 16:21:07 -0800	[thread overview]
Message-ID: <1447978868-17138-9-git-send-email-mlin@kernel.org> (raw)
In-Reply-To: <1447978868-17138-1-git-send-email-mlin@kernel.org>

From: Ming Lin <ming.l@ssi.samsung.com>

This borrows code from Hannes Reinecke's rts-megasas.

Cc: Hannes Reinecke <hare@suse.de>
Signed-off-by: Ming Lin <ming.l@ssi.samsung.com>
---
 drivers/nvme/target/vhost.c | 108 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 108 insertions(+)

diff --git a/drivers/nvme/target/vhost.c b/drivers/nvme/target/vhost.c
index 04ed0bc..6847c86 100644
--- a/drivers/nvme/target/vhost.c
+++ b/drivers/nvme/target/vhost.c
@@ -5,6 +5,7 @@
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
 #include <linux/file.h>
+#include <linux/highmem.h>
 #include "../../vhost/vhost.h"
 #include "nvmet.h"
 
@@ -95,6 +96,113 @@ struct nvmet_vhost_ctrl {
 	u32 page_size;
 };
 
+const struct vhost_memory_region *
+find_region(struct vhost_dev *hba, __u64 addr, __u32 len)
+{
+	struct vhost_memory *mem;
+	struct vhost_memory_region *reg;
+	int i;
+
+	if (!hba->memory)
+		return NULL;
+
+	mem = hba->memory;
+	/* linear search is not brilliant, but we really have on the order of 6
+	 * regions in practice */
+	for (i = 0; i < mem->nregions; ++i) {
+		reg = mem->regions + i;
+		if (reg->guest_phys_addr <= addr &&
+		    reg->guest_phys_addr + reg->memory_size - 1 >= addr)
+			return reg;
+	}
+	return NULL;
+}
+
+static bool check_region_boundary(const struct vhost_memory_region *reg,
+				  uint64_t addr, size_t len)
+{
+	unsigned long max_size;
+
+	max_size = reg->memory_size - addr + reg->guest_phys_addr;
+	return (max_size < len);
+}
+
+static void __user *map_to_region(const struct vhost_memory_region *reg,
+				   uint64_t addr)
+{
+	return (void __user *)(unsigned long)
+		(reg->userspace_addr + addr - reg->guest_phys_addr);
+}
+
+static void __user *map_guest_to_host(struct vhost_dev *dev,
+				       uint64_t addr, int size)
+{
+	const struct vhost_memory_region *reg = NULL;
+
+	reg = find_region(dev, addr, size);
+	if (unlikely(!reg))
+		return ERR_PTR(-EPERM);
+
+	if (unlikely(check_region_boundary(reg, addr, size)))
+		return ERR_PTR(-EFAULT);
+
+	return map_to_region(reg, addr);
+}
+
+static int nvmet_vhost_rw(struct vhost_dev *dev, u64 guest_pa,
+		void *buf, uint32_t size, int write)
+{
+	void __user *host_user_va;
+	void *host_kernel_va;
+	struct page *page;
+	uintptr_t offset;
+	int ret;
+
+	host_user_va = map_guest_to_host(dev, guest_pa, size);
+	if (unlikely(!host_user_va)) {
+		pr_warn("cannot map guest addr %p, error %ld\n",
+			(void *)guest_pa, PTR_ERR(host_user_va));
+		return -EINVAL;
+	}
+
+	ret = get_user_pages(current, dev->mm,
+				(unsigned long)host_user_va, 1,
+				false, 0, &page, NULL);
+	if (unlikely(ret != 1)) {
+		pr_warn("get_user_pages fail!!!\n");
+		return -EINVAL;
+	}
+
+	host_kernel_va = kmap(page);
+	if (unlikely(!host_kernel_va)) {
+		pr_warn("kmap fail!!!\n");
+		put_page(page);
+		return -EINVAL;
+	}
+
+	offset = (uintptr_t)host_user_va & ~PAGE_MASK;
+	if (write)
+		memcpy(host_kernel_va + offset, buf, size);
+	else
+		memcpy(buf, host_kernel_va + offset, size);
+	kunmap(host_kernel_va);
+	put_page(page);
+
+	return 0;
+}
+
+int nvmet_vhost_read(struct vhost_dev *dev, u64 guest_pa,
+		void *buf, uint32_t size)
+{
+	return nvmet_vhost_rw(dev, guest_pa, buf, size, 0);
+}
+
+int nvmet_vhost_write(struct vhost_dev *dev, u64 guest_pa,
+		void *buf, uint32_t size)
+{
+	return nvmet_vhost_rw(dev, guest_pa, buf, size, 1);
+}
+
 #define sq_to_vsq(sq) container_of(sq, struct nvmet_vhost_sq, sq)
 #define cq_to_vcq(cq) container_of(cq, struct nvmet_vhost_cq, cq)
 
-- 
1.9.1

  parent reply	other threads:[~2015-11-20  0:21 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-20  0:20 [Qemu-devel] [RFC PATCH 0/9] vhost-nvme: new qemu nvme backend using nvme target Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 1/9] nvme-vhost: add initial commit Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 2/9] nvme-vhost: add basic ioctl handlers Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 3/9] nvme-vhost: add basic nvme bar read/write Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 4/9] nvmet: add a controller "start" hook Ming Lin
2015-11-20  5:13   ` Christoph Hellwig
2015-11-20  5:31     ` Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 5/9] nvme-vhost: add controller "start" callback Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 6/9] nvmet: add a "parse_extra_admin_cmd" hook Ming Lin
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 7/9] nvme-vhost: add "parse_extra_admin_cmd" callback Ming Lin
2015-11-20  0:21 ` Ming Lin [this message]
2015-11-20  0:21 ` [Qemu-devel] [RFC PATCH 9/9] nvme-vhost: add nvme queue handlers Ming Lin
2015-11-20  5:16 ` [Qemu-devel] [RFC PATCH 0/9] vhost-nvme: new qemu nvme backend using nvme target Christoph Hellwig
2015-11-20  5:33   ` Ming Lin
2015-11-21 13:11 ` Paolo Bonzini
2015-11-23  8:17   ` Ming Lin
2015-11-23 14:14     ` Paolo Bonzini
2015-11-24  7:27       ` Ming Lin
2015-11-24  8:23         ` Ming Lin
2015-11-24 10:51         ` Paolo Bonzini
2015-11-24 19:25           ` Ming Lin
2015-11-25 11:27             ` Paolo Bonzini
2015-11-25 18:51               ` Ming Lin
2015-11-25 19:32                 ` Paolo Bonzini
2015-11-30 23:20       ` Ming Lin
2015-12-01 16:02         ` Paolo Bonzini
2015-12-01 16:26           ` Ming Lin
2015-12-01 16:59             ` Paolo Bonzini
2015-12-02  5:13               ` Ming Lin
2015-12-02 10:07                 ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1447978868-17138-9-git-send-email-mlin@kernel.org \
    --to=mlin@kernel.org \
    --cc=hare@suse.de \
    --cc=hch@lst.de \
    --cc=linux-nvme@lists.infradead.org \
    --cc=ming.l@ssi.samsung.com \
    --cc=nab@linux-iscsi.org \
    --cc=qemu-devel@nongnu.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).