public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Dongwon Kim <dongwon.kim@intel.com>
To: linux-kernel@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org, xen-devel@lists.xenproject.org,
	mateuszx.potrola@intel.com, dongwon.kim@intel.com
Subject: [RFC PATCH 41/60] hyper_dmabuf: re-organize driver source
Date: Tue, 19 Dec 2017 11:29:57 -0800	[thread overview]
Message-ID: <1513711816-2618-41-git-send-email-dongwon.kim@intel.com> (raw)
In-Reply-To: <1513711816-2618-1-git-send-email-dongwon.kim@intel.com>

Re-orginized source code for more intuitive structure

For this,

1. driver's file operations other than ioctls have been moved to
hyper_dmabuf_drv.c.

2. Separated out dma-buf operations from hyper_dmabuf_ops.c
and put those in a new file, 'hyper_dmabuf_ops.c'. Remaining part
(SGT core management) is also put in the a new file,
'hyper_dmabuf_sgt_proc.c'. hyper_dmabuf_imp.c and hyper_dmabuf_imp.h
are removed as a result.

3. Header files and Makefile are also updated accordingly.

Signed-off-by: Dongwon Kim <dongwon.kim@intel.com>
---
 drivers/xen/hyper_dmabuf/Makefile                  |   3 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c        |  95 ++-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c        | 682 ---------------------
 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h        |  48 --
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c      | 136 +---
 drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c        |   1 -
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c        | 471 ++++++++++++++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h        |  32 +
 .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c    |   2 +-
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c   | 258 ++++++++
 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h   |  41 ++
 11 files changed, 920 insertions(+), 849 deletions(-)
 delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
 delete mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
 create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h

diff --git a/drivers/xen/hyper_dmabuf/Makefile b/drivers/xen/hyper_dmabuf/Makefile
index 8865f50..5040b9f 100644
--- a/drivers/xen/hyper_dmabuf/Makefile
+++ b/drivers/xen/hyper_dmabuf/Makefile
@@ -7,7 +7,8 @@ ifneq ($(KERNELRELEASE),)
 	$(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \
                                  hyper_dmabuf_ioctl.o \
                                  hyper_dmabuf_list.o \
-				 hyper_dmabuf_imp.o \
+				 hyper_dmabuf_sgl_proc.o \
+				 hyper_dmabuf_ops.o \
 				 hyper_dmabuf_msg.o \
 				 hyper_dmabuf_id.o \
 				 hyper_dmabuf_remote_sync.o \
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
index c802c3e..8c488d7 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c
@@ -28,10 +28,13 @@
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/miscdevice.h>
 #include <linux/workqueue.h>
 #include <linux/device.h>
+#include <linux/dma-buf.h>
 #include "hyper_dmabuf_drv.h"
 #include "hyper_dmabuf_conf.h"
+#include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_id.h"
@@ -44,12 +47,94 @@ extern struct hyper_dmabuf_backend_ops xen_backend_ops;
 MODULE_LICENSE("GPL and additional rights");
 MODULE_AUTHOR("Intel Corporation");
 
-int register_device(void);
-int unregister_device(void);
-
 struct hyper_dmabuf_private hyper_dmabuf_private;
 
-/*===============================================================================================*/
+long hyper_dmabuf_ioctl(struct file *filp,
+			unsigned int cmd, unsigned long param);
+
+void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
+				    void *attr);
+
+int hyper_dmabuf_open(struct inode *inode, struct file *filp)
+{
+	int ret = 0;
+
+	/* Do not allow exclusive open */
+	if (filp->f_flags & O_EXCL)
+		return -EBUSY;
+
+	/*
+	 * Initialize backend if neededm,
+	 * use mutex to prevent race conditions when
+	 * two userspace apps will open device at the same time
+	 */
+	mutex_lock(&hyper_dmabuf_private.lock);
+
+	if (!hyper_dmabuf_private.backend_initialized) {
+		hyper_dmabuf_private.domid = hyper_dmabuf_private.backend_ops->get_vm_id();
+
+		ret = hyper_dmabuf_private.backend_ops->init_comm_env();
+	        if (ret < 0) {
+			dev_err(hyper_dmabuf_private.device,
+				"failed to initiailize hypervisor-specific comm env\n");
+		} else {
+			hyper_dmabuf_private.backend_initialized = true;
+		}
+	}
+
+	mutex_unlock(&hyper_dmabuf_private.lock);
+
+	return ret;
+}
+
+int hyper_dmabuf_release(struct inode *inode, struct file *filp)
+{
+	hyper_dmabuf_foreach_exported(hyper_dmabuf_emergency_release, filp);
+
+	return 0;
+}
+
+static struct file_operations hyper_dmabuf_driver_fops =
+{
+	.owner = THIS_MODULE,
+	.open = hyper_dmabuf_open,
+	.release = hyper_dmabuf_release,
+	.unlocked_ioctl = hyper_dmabuf_ioctl,
+};
+
+static struct miscdevice hyper_dmabuf_miscdev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "xen/hyper_dmabuf",
+	.fops = &hyper_dmabuf_driver_fops,
+};
+
+int register_device(void)
+{
+	int ret = 0;
+
+	ret = misc_register(&hyper_dmabuf_miscdev);
+
+	if (ret) {
+		printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n");
+		return ret;
+	}
+
+	hyper_dmabuf_private.device = hyper_dmabuf_miscdev.this_device;
+
+	/* TODO: Check if there is a different way to initialize dma mask nicely */
+	dma_coerce_mask_and_coherent(hyper_dmabuf_private.device, DMA_BIT_MASK(64));
+
+	return ret;
+}
+
+void unregister_device(void)
+{
+	dev_info(hyper_dmabuf_private.device,
+		"hyper_dmabuf: unregister_device() is called\n");
+
+	misc_deregister(&hyper_dmabuf_miscdev);
+}
+
 static int __init hyper_dmabuf_drv_init(void)
 {
 	int ret = 0;
@@ -103,7 +188,6 @@ static int __init hyper_dmabuf_drv_init(void)
 	return ret;
 }
 
-/*-----------------------------------------------------------------------------------------------*/
 static void hyper_dmabuf_drv_exit(void)
 {
 #ifdef CONFIG_HYPER_DMABUF_SYSFS
@@ -128,7 +212,6 @@ static void hyper_dmabuf_drv_exit(void)
 
 	unregister_device();
 }
-/*===============================================================================================*/
 
 module_init(hyper_dmabuf_drv_init);
 module_exit(hyper_dmabuf_drv_exit);
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
deleted file mode 100644
index 2bf0835..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Dongwon Kim <dongwon.kim@intel.com>
- *    Mateusz Polrola <mateuszx.potrola@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/dma-buf.h>
-#include <xen/grant_table.h>
-#include <asm/xen/page.h>
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_struct.h"
-#include "hyper_dmabuf_imp.h"
-#include "hyper_dmabuf_id.h"
-#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_list.h"
-
-extern struct hyper_dmabuf_private hyper_dmabuf_private;
-
-#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
-
-int dmabuf_refcount(struct dma_buf *dma_buf)
-{
-	if ((dma_buf != NULL) && (dma_buf->file != NULL))
-		return file_count(dma_buf->file);
-
-	return -1;
-}
-
-/* return total number of pages referenced by a sgt
- * for pre-calculation of # of pages behind a given sgt
- */
-static int hyper_dmabuf_get_num_pgs(struct sg_table *sgt)
-{
-	struct scatterlist *sgl;
-	int length, i;
-	/* at least one page */
-	int num_pages = 1;
-
-	sgl = sgt->sgl;
-
-	length = sgl->length - PAGE_SIZE + sgl->offset;
-	num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); /* round-up */
-
-	for (i = 1; i < sgt->nents; i++) {
-		sgl = sg_next(sgl);
-		num_pages += ((sgl->length + PAGE_SIZE - 1) / PAGE_SIZE); /* round-up */
-	}
-
-	return num_pages;
-}
-
-/* extract pages directly from struct sg_table */
-struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
-{
-	struct hyper_dmabuf_pages_info *pinfo;
-	int i, j, k;
-	int length;
-	struct scatterlist *sgl;
-
-	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
-	if (!pinfo)
-		return NULL;
-
-	pinfo->pages = kmalloc(sizeof(struct page *)*hyper_dmabuf_get_num_pgs(sgt), GFP_KERNEL);
-	if (!pinfo->pages)
-		return NULL;
-
-	sgl = sgt->sgl;
-
-	pinfo->nents = 1;
-	pinfo->frst_ofst = sgl->offset;
-	pinfo->pages[0] = sg_page(sgl);
-	length = sgl->length - PAGE_SIZE + sgl->offset;
-	i = 1;
-
-	while (length > 0) {
-		pinfo->pages[i] = nth_page(sg_page(sgl), i);
-		length -= PAGE_SIZE;
-		pinfo->nents++;
-		i++;
-	}
-
-	for (j = 1; j < sgt->nents; j++) {
-		sgl = sg_next(sgl);
-		pinfo->pages[i++] = sg_page(sgl);
-		length = sgl->length - PAGE_SIZE;
-		pinfo->nents++;
-		k = 1;
-
-		while (length > 0) {
-			pinfo->pages[i++] = nth_page(sg_page(sgl), k++);
-			length -= PAGE_SIZE;
-			pinfo->nents++;
-		}
-	}
-
-	/*
-	 * lenght at that point will be 0 or negative,
-	 * so to calculate last page size just add it to PAGE_SIZE
-	 */
-	pinfo->last_len = PAGE_SIZE + length;
-
-	return pinfo;
-}
-
-/* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
-					 int frst_ofst, int last_len, int nents)
-{
-	struct sg_table *sgt;
-	struct scatterlist *sgl;
-	int i, ret;
-
-	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
-	if (!sgt) {
-		return NULL;
-	}
-
-	ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
-	if (ret) {
-		if (sgt) {
-			sg_free_table(sgt);
-			kfree(sgt);
-		}
-
-		return NULL;
-	}
-
-	sgl = sgt->sgl;
-
-	sg_set_page(sgl, pages[0], PAGE_SIZE-frst_ofst, frst_ofst);
-
-	for (i=1; i<nents-1; i++) {
-		sgl = sg_next(sgl);
-		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
-	}
-
-	if (nents > 1) /* more than one page */ {
-		sgl = sg_next(sgl);
-		sg_set_page(sgl, pages[i], last_len, 0);
-	}
-
-	return sgt;
-}
-
-int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force)
-{
-	struct sgt_list *sgtl;
-	struct attachment_list *attachl;
-	struct kmap_vaddr_list *va_kmapl;
-	struct vmap_vaddr_list *va_vmapl;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-
-	if (!sgt_info) {
-		dev_err(hyper_dmabuf_private.device, "invalid hyper_dmabuf_id\n");
-		return -EINVAL;
-	}
-
-	/* if force != 1, sgt_info can be released only if
-	 * there's no activity on exported dma-buf on importer
-	 * side.
-	 */
-	if (!force &&
-	    sgt_info->importer_exported) {
-		dev_warn(hyper_dmabuf_private.device, "dma-buf is used by importer\n");
-		return -EPERM;
-	}
-
-	/* force == 1 is not recommended */
-	while (!list_empty(&sgt_info->va_kmapped->list)) {
-		va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
-					    struct kmap_vaddr_list, list);
-
-		dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
-		list_del(&va_kmapl->list);
-		kfree(va_kmapl);
-	}
-
-	while (!list_empty(&sgt_info->va_vmapped->list)) {
-		va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
-					    struct vmap_vaddr_list, list);
-
-		dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
-		list_del(&va_vmapl->list);
-		kfree(va_vmapl);
-	}
-
-	while (!list_empty(&sgt_info->active_sgts->list)) {
-		attachl = list_first_entry(&sgt_info->active_attached->list,
-					   struct attachment_list, list);
-
-		sgtl = list_first_entry(&sgt_info->active_sgts->list,
-					struct sgt_list, list);
-
-		dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
-					 DMA_BIDIRECTIONAL);
-		list_del(&sgtl->list);
-		kfree(sgtl);
-	}
-
-	while (!list_empty(&sgt_info->active_sgts->list)) {
-		attachl = list_first_entry(&sgt_info->active_attached->list,
-					   struct attachment_list, list);
-
-		dma_buf_detach(sgt_info->dma_buf, attachl->attach);
-		list_del(&attachl->list);
-		kfree(attachl);
-	}
-
-	/* Start cleanup of buffer in reverse order to exporting */
-	ops->unshare_pages(&sgt_info->refs_info, sgt_info->nents);
-
-	/* unmap dma-buf */
-	dma_buf_unmap_attachment(sgt_info->active_attached->attach,
-				 sgt_info->active_sgts->sgt,
-				 DMA_BIDIRECTIONAL);
-
-	/* detatch dma-buf */
-	dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
-
-	/* close connection to dma-buf completely */
-	dma_buf_put(sgt_info->dma_buf);
-	sgt_info->dma_buf = NULL;
-
-	kfree(sgt_info->active_sgts);
-	kfree(sgt_info->active_attached);
-	kfree(sgt_info->va_kmapped);
-	kfree(sgt_info->va_vmapped);
-
-	return 0;
-}
-
-#define WAIT_AFTER_SYNC_REQ 0
-
-inline int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
-{
-	struct hyper_dmabuf_req *req;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-	int operands[5];
-	int i;
-	int ret;
-
-	operands[0] = hid.id;
-
-	for (i=0; i<3; i++)
-		operands[i+1] = hid.rng_key[i];
-
-	operands[4] = dmabuf_ops;
-
-	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
-
-	if (!req) {
-		dev_err(hyper_dmabuf_private.device,
-			"No memory left to be allocated\n");
-		return -ENOMEM;
-	}
-
-	hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
-
-	/* send request and wait for a response */
-	ret = ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, WAIT_AFTER_SYNC_REQ);
-
-	kfree(req);
-
-	return ret;
-}
-
-static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev,
-			struct dma_buf_attachment *attach)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!attach->dmabuf->priv)
-		return -EINVAL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_ATTACH);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-		return ret;
-	}
-
-	return 0;
-}
-
-static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attachment *attach)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!attach->dmabuf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_DETACH);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-}
-
-static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachment,
-						enum dma_data_direction dir)
-{
-	struct sg_table *st;
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	struct hyper_dmabuf_pages_info *page_info;
-	int ret;
-
-	if (!attachment->dmabuf->priv)
-		return NULL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
-
-	/* extract pages from sgt */
-	page_info = hyper_dmabuf_ext_pgs(sgt_info->sgt);
-
-	if (!page_info) {
-		return NULL;
-	}
-
-	/* create a new sg_table with extracted pages */
-	st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
-				page_info->last_len, page_info->nents);
-	if (!st)
-		goto err_free_sg;
-
-        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
-                goto err_free_sg;
-        }
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_MAP);
-
-	kfree(page_info->pages);
-	kfree(page_info);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return st;
-
-err_free_sg:
-	if (st) {
-		sg_free_table(st);
-		kfree(st);
-	}
-
-	return NULL;
-}
-
-static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
-				   struct sg_table *sg,
-				   enum dma_data_direction dir)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!attachment->dmabuf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
-
-	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
-
-	sg_free_table(sg);
-	kfree(sg);
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_UNMAP);
-
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-}
-
-static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
-	int ret;
-	int final_release;
-
-	if (!dma_buf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dma_buf->priv;
-
-	if (!dmabuf_refcount(sgt_info->dma_buf)) {
-		sgt_info->dma_buf = NULL;
-	}
-
-	sgt_info->num_importers--;
-
-	if (sgt_info->num_importers == 0) {
-		ops->unmap_shared_pages(&sgt_info->refs_info, sgt_info->nents);
-
-		if (sgt_info->sgt) {
-			sg_free_table(sgt_info->sgt);
-			kfree(sgt_info->sgt);
-			sgt_info->sgt = NULL;
-		}
-	}
-
-	final_release = sgt_info && !sgt_info->valid &&
-		        !sgt_info->num_importers;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_RELEASE);
-	if (ret < 0) {
-		dev_warn(hyper_dmabuf_private.device,
-			 "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	/*
-	 * Check if buffer is still valid and if not remove it from imported list.
-	 * That has to be done after sending sync request
-	 */
-	if (final_release) {
-		hyper_dmabuf_remove_imported(sgt_info->hid);
-		kfree(sgt_info);
-	}
-}
-
-static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return -EINVAL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return ret;
-}
-
-static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return -EINVAL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_END_CPU_ACCESS);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return 0;
-}
-
-static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return NULL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_KMAP_ATOMIC);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return NULL; /* for now NULL.. need to return the address of mapped region */
-}
-
-static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-}
-
-static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return NULL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_KMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return NULL; /* for now NULL.. need to return the address of mapped region */
-}
-
-static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_KUNMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-}
-
-static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return -EINVAL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_MMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return ret;
-}
-
-static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return NULL;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_VMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-
-	return NULL;
-}
-
-static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
-{
-	struct hyper_dmabuf_imported_sgt_info *sgt_info;
-	int ret;
-
-	if (!dmabuf->priv)
-		return;
-
-	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
-
-	ret = hyper_dmabuf_sync_request(sgt_info->hid,
-					HYPER_DMABUF_OPS_VUNMAP);
-	if (ret < 0) {
-		dev_err(hyper_dmabuf_private.device,
-			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
-	}
-}
-
-static const struct dma_buf_ops hyper_dmabuf_ops = {
-		.attach = hyper_dmabuf_ops_attach,
-		.detach = hyper_dmabuf_ops_detach,
-		.map_dma_buf = hyper_dmabuf_ops_map,
-		.unmap_dma_buf = hyper_dmabuf_ops_unmap,
-		.release = hyper_dmabuf_ops_release,
-		.begin_cpu_access = (void*)hyper_dmabuf_ops_begin_cpu_access,
-		.end_cpu_access = (void*)hyper_dmabuf_ops_end_cpu_access,
-		.map_atomic = hyper_dmabuf_ops_kmap_atomic,
-		.unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
-		.map = hyper_dmabuf_ops_kmap,
-		.unmap = hyper_dmabuf_ops_kunmap,
-		.mmap = hyper_dmabuf_ops_mmap,
-		.vmap = hyper_dmabuf_ops_vmap,
-		.vunmap = hyper_dmabuf_ops_vunmap,
-};
-
-/* exporting dmabuf as fd */
-int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags)
-{
-	int fd = -1;
-
-	/* call hyper_dmabuf_export_dmabuf and create
-	 * and bind a handle for it then release
-	 */
-	hyper_dmabuf_export_dma_buf(dinfo);
-
-	if (dinfo->dma_buf) {
-		fd = dma_buf_fd(dinfo->dma_buf, flags);
-	}
-
-	return fd;
-}
-
-void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo)
-{
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
-	exp_info.ops = &hyper_dmabuf_ops;
-
-	/* multiple of PAGE_SIZE, not considering offset */
-	exp_info.size = dinfo->sgt->nents * PAGE_SIZE;
-	exp_info.flags = /* not sure about flag */0;
-	exp_info.priv = dinfo;
-
-	dinfo->dma_buf = dma_buf_export(&exp_info);
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
deleted file mode 100644
index eda075b3..0000000
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright © 2017 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#ifndef __HYPER_DMABUF_IMP_H__
-#define __HYPER_DMABUF_IMP_H__
-
-#include <linux/fs.h>
-#include "hyper_dmabuf_struct.h"
-
-/* extract pages directly from struct sg_table */
-struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
-
-/* create sg_table with given pages and other parameters */
-struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
-                                int frst_ofst, int last_len, int nents);
-
-int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force);
-
-void hyper_dmabuf_free_sgt(struct sg_table *sgt);
-
-int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags);
-
-void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
-
-int dmabuf_refcount(struct dma_buf *dma_buf);
-
-#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
index 9d05d66..283fe5a 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c
@@ -41,7 +41,8 @@
 #include "hyper_dmabuf_ioctl.h"
 #include "hyper_dmabuf_list.h"
 #include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_imp.h"
+#include "hyper_dmabuf_sgl_proc.h"
+#include "hyper_dmabuf_ops.h"
 #include "hyper_dmabuf_query.h"
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -618,7 +619,29 @@ static int hyper_dmabuf_query_ioctl(struct file *filp, void *data)
 	return 0;
 }
 
-static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
+void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
+				    void *attr)
+{
+	struct ioctl_hyper_dmabuf_unexport unexport_attr;
+	struct file *filp = (struct file*) attr;
+
+	if (!filp || !sgt_info)
+		return;
+
+	if (sgt_info->filp == filp) {
+		dev_dbg(hyper_dmabuf_private.device,
+			"Executing emergency release of buffer {id:%d key:%d %d %d}\n",
+			 sgt_info->hid.id, sgt_info->hid.rng_key[0],
+			 sgt_info->hid.rng_key[1], sgt_info->hid.rng_key[2]);
+
+		unexport_attr.hid = sgt_info->hid;
+		unexport_attr.delay_ms = 0;
+
+		hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
+	}
+}
+
+const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, hyper_dmabuf_tx_ch_setup_ioctl, 0),
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, hyper_dmabuf_rx_ch_setup_ioctl, 0),
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, hyper_dmabuf_export_remote_ioctl, 0),
@@ -627,7 +650,7 @@ static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
 	HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, hyper_dmabuf_query_ioctl, 0),
 };
 
-static long hyper_dmabuf_ioctl(struct file *filp,
+long hyper_dmabuf_ioctl(struct file *filp,
 			unsigned int cmd, unsigned long param)
 {
 	const struct hyper_dmabuf_ioctl_desc *ioctl = NULL;
@@ -672,110 +695,3 @@ static long hyper_dmabuf_ioctl(struct file *filp,
 
 	return ret;
 }
-
-int hyper_dmabuf_open(struct inode *inode, struct file *filp)
-{
-	int ret = 0;
-
-	/* Do not allow exclusive open */
-	if (filp->f_flags & O_EXCL)
-		return -EBUSY;
-
-	/*
-	 * Initialize backend if neededm,
-	 * use mutex to prevent race conditions when
-	 * two userspace apps will open device at the same time
-	 */
-	mutex_lock(&hyper_dmabuf_private.lock);
-
-	if (!hyper_dmabuf_private.backend_initialized) {
-		hyper_dmabuf_private.domid = hyper_dmabuf_private.backend_ops->get_vm_id();
-
-		ret = hyper_dmabuf_private.backend_ops->init_comm_env();
-	        if (ret < 0) {
-			dev_err(hyper_dmabuf_private.device,
-				"failed to initiailize hypervisor-specific comm env\n");
-		} else {
-			hyper_dmabuf_private.backend_initialized = true;
-		}
-	}
-
-	mutex_unlock(&hyper_dmabuf_private.lock);
-
-	return ret;
-}
-
-static void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_info,
-					   void *attr)
-{
-	struct ioctl_hyper_dmabuf_unexport unexport_attr;
-	struct file *filp = (struct file*) attr;
-
-	if (!filp || !sgt_info)
-		return;
-
-	if (sgt_info->filp == filp) {
-		dev_dbg(hyper_dmabuf_private.device,
-			"Executing emergency release of buffer {id:%d key:%d %d %d}\n",
-			 sgt_info->hid.id, sgt_info->hid.rng_key[0],
-			 sgt_info->hid.rng_key[1], sgt_info->hid.rng_key[2]);
-
-		unexport_attr.hid = sgt_info->hid;
-		unexport_attr.delay_ms = 0;
-
-		hyper_dmabuf_unexport_ioctl(filp, &unexport_attr);
-	}
-}
-
-int hyper_dmabuf_release(struct inode *inode, struct file *filp)
-{
-	hyper_dmabuf_foreach_exported(hyper_dmabuf_emergency_release, filp);
-
-	return 0;
-}
-
-/*===============================================================================================*/
-static struct file_operations hyper_dmabuf_driver_fops =
-{
-   .owner = THIS_MODULE,
-   .open = hyper_dmabuf_open,
-   .release = hyper_dmabuf_release,
-   .unlocked_ioctl = hyper_dmabuf_ioctl,
-};
-
-static struct miscdevice hyper_dmabuf_miscdev = {
-	.minor = MISC_DYNAMIC_MINOR,
-	.name = "xen/hyper_dmabuf",
-	.fops = &hyper_dmabuf_driver_fops,
-};
-
-static const char device_name[] = "hyper_dmabuf";
-
-/*===============================================================================================*/
-int register_device(void)
-{
-	int ret = 0;
-
-	ret = misc_register(&hyper_dmabuf_miscdev);
-
-	if (ret) {
-		printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n");
-		return ret;
-	}
-
-	hyper_dmabuf_private.device = hyper_dmabuf_miscdev.this_device;
-
-	/* TODO: Check if there is a different way to initialize dma mask nicely */
-	dma_coerce_mask_and_coherent(hyper_dmabuf_private.device, DMA_BIT_MASK(64));
-
-	return ret;
-}
-
-/*-----------------------------------------------------------------------------------------------*/
-void unregister_device(void)
-{
-	dev_info(hyper_dmabuf_private.device,
-		 "hyper_dmabuf: unregister_device() is called\n");
-
-	misc_deregister(&hyper_dmabuf_miscdev);
-}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
index 12ebad3..c516df8 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c
@@ -35,7 +35,6 @@
 #include <linux/workqueue.h>
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_imp.h"
 #include "hyper_dmabuf_remote_sync.h"
 #include "hyper_dmabuf_list.h"
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
new file mode 100644
index 0000000..81cb09f
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dongwon Kim <dongwon.kim@intel.com>
+ *    Mateusz Polrola <mateuszx.potrola@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <xen/grant_table.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_ops.h"
+#include "hyper_dmabuf_sgl_proc.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_list.h"
+
+#define WAIT_AFTER_SYNC_REQ 0
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
+inline int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
+{
+	struct hyper_dmabuf_req *req;
+	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	int operands[5];
+	int i;
+	int ret;
+
+	operands[0] = hid.id;
+
+	for (i=0; i<3; i++)
+		operands[i+1] = hid.rng_key[i];
+
+	operands[4] = dmabuf_ops;
+
+	req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+
+	if (!req) {
+		dev_err(hyper_dmabuf_private.device,
+			"No memory left to be allocated\n");
+		return -ENOMEM;
+	}
+
+	hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
+
+	/* send request and wait for a response */
+	ret = ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, WAIT_AFTER_SYNC_REQ);
+
+	kfree(req);
+
+	return ret;
+}
+
+static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev,
+			struct dma_buf_attachment *attach)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!attach->dmabuf->priv)
+		return -EINVAL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_ATTACH);
+
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attachment *attach)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!attach->dmabuf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_DETACH);
+
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+}
+
+static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachment,
+						enum dma_data_direction dir)
+{
+	struct sg_table *st;
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct hyper_dmabuf_pages_info *page_info;
+	int ret;
+
+	if (!attachment->dmabuf->priv)
+		return NULL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
+
+	/* extract pages from sgt */
+	page_info = hyper_dmabuf_ext_pgs(sgt_info->sgt);
+
+	if (!page_info) {
+		return NULL;
+	}
+
+	/* create a new sg_table with extracted pages */
+	st = hyper_dmabuf_create_sgt(page_info->pages, page_info->frst_ofst,
+				page_info->last_len, page_info->nents);
+	if (!st)
+		goto err_free_sg;
+
+        if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+                goto err_free_sg;
+        }
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_MAP);
+
+	kfree(page_info->pages);
+	kfree(page_info);
+
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return st;
+
+err_free_sg:
+	if (st) {
+		sg_free_table(st);
+		kfree(st);
+	}
+
+	return NULL;
+}
+
+static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
+				   struct sg_table *sg,
+				   enum dma_data_direction dir)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!attachment->dmabuf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attachment->dmabuf->priv;
+
+	dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+
+	sg_free_table(sg);
+	kfree(sg);
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_UNMAP);
+
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+}
+
+static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+	int ret;
+	int final_release;
+
+	if (!dma_buf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dma_buf->priv;
+
+	if (!dmabuf_refcount(sgt_info->dma_buf)) {
+		sgt_info->dma_buf = NULL;
+	}
+
+	sgt_info->num_importers--;
+
+	if (sgt_info->num_importers == 0) {
+		ops->unmap_shared_pages(&sgt_info->refs_info, sgt_info->nents);
+
+		if (sgt_info->sgt) {
+			sg_free_table(sgt_info->sgt);
+			kfree(sgt_info->sgt);
+			sgt_info->sgt = NULL;
+		}
+	}
+
+	final_release = sgt_info && !sgt_info->valid &&
+		        !sgt_info->num_importers;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_RELEASE);
+	if (ret < 0) {
+		dev_warn(hyper_dmabuf_private.device,
+			 "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	/*
+	 * Check if buffer is still valid and if not remove it from imported list.
+	 * That has to be done after sending sync request
+	 */
+	if (final_release) {
+		hyper_dmabuf_remove_imported(sgt_info->hid);
+		kfree(sgt_info);
+	}
+}
+
+static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return -EINVAL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return ret;
+}
+
+static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return -EINVAL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_END_CPU_ACCESS);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return 0;
+}
+
+static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return NULL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_KMAP_ATOMIC);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return NULL; /* for now NULL.. need to return the address of mapped region */
+}
+
+static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+}
+
+static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return NULL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_KMAP);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return NULL; /* for now NULL.. need to return the address of mapped region */
+}
+
+static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, void *vaddr)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_KUNMAP);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+}
+
+static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return -EINVAL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_MMAP);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return ret;
+}
+
+static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return NULL;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_VMAP);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+
+	return NULL;
+}
+
+static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	struct hyper_dmabuf_imported_sgt_info *sgt_info;
+	int ret;
+
+	if (!dmabuf->priv)
+		return;
+
+	sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+
+	ret = hyper_dmabuf_sync_request(sgt_info->hid,
+					HYPER_DMABUF_OPS_VUNMAP);
+	if (ret < 0) {
+		dev_err(hyper_dmabuf_private.device,
+			"hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+	}
+}
+
+static const struct dma_buf_ops hyper_dmabuf_ops = {
+		.attach = hyper_dmabuf_ops_attach,
+		.detach = hyper_dmabuf_ops_detach,
+		.map_dma_buf = hyper_dmabuf_ops_map,
+		.unmap_dma_buf = hyper_dmabuf_ops_unmap,
+		.release = hyper_dmabuf_ops_release,
+		.begin_cpu_access = (void*)hyper_dmabuf_ops_begin_cpu_access,
+		.end_cpu_access = (void*)hyper_dmabuf_ops_end_cpu_access,
+		.map_atomic = hyper_dmabuf_ops_kmap_atomic,
+		.unmap_atomic = hyper_dmabuf_ops_kunmap_atomic,
+		.map = hyper_dmabuf_ops_kmap,
+		.unmap = hyper_dmabuf_ops_kunmap,
+		.mmap = hyper_dmabuf_ops_mmap,
+		.vmap = hyper_dmabuf_ops_vmap,
+		.vunmap = hyper_dmabuf_ops_vunmap,
+};
+
+/* exporting dmabuf as fd */
+int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags)
+{
+	int fd = -1;
+
+	/* call hyper_dmabuf_export_dmabuf and create
+	 * and bind a handle for it then release
+	 */
+	hyper_dmabuf_export_dma_buf(dinfo);
+
+	if (dinfo->dma_buf) {
+		fd = dma_buf_fd(dinfo->dma_buf, flags);
+	}
+
+	return fd;
+}
+
+void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo)
+{
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	exp_info.ops = &hyper_dmabuf_ops;
+
+	/* multiple of PAGE_SIZE, not considering offset */
+	exp_info.size = dinfo->sgt->nents * PAGE_SIZE;
+	exp_info.flags = /* not sure about flag */0;
+	exp_info.priv = dinfo;
+
+	dinfo->dma_buf = dma_buf_export(&exp_info);
+}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
new file mode 100644
index 0000000..8c06fc6
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_ops.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_OPS_H__
+#define __HYPER_DMABUF_OPS_H__
+
+int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags);
+
+void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
+
+#endif /* __HYPER_DMABUF_IMP_H__ */
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
index be1d395..9004406 100644
--- a/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c
@@ -37,7 +37,7 @@
 #include "hyper_dmabuf_msg.h"
 #include "hyper_dmabuf_id.h"
 #include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_imp.h"
+#include "hyper_dmabuf_sgl_proc.h"
 
 extern struct hyper_dmabuf_private hyper_dmabuf_private;
 
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
new file mode 100644
index 0000000..c2d013a
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Dongwon Kim <dongwon.kim@intel.com>
+ *    Mateusz Polrola <mateuszx.potrola@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <xen/grant_table.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_sgl_proc.h"
+#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_list.h"
+
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
+#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+
+int dmabuf_refcount(struct dma_buf *dma_buf)
+{
+	if ((dma_buf != NULL) && (dma_buf->file != NULL))
+		return file_count(dma_buf->file);
+
+	return -1;
+}
+
+/* return total number of pages referenced by a sgt
+ * for pre-calculation of # of pages behind a given sgt
+ */
+static int hyper_dmabuf_get_num_pgs(struct sg_table *sgt)
+{
+	struct scatterlist *sgl;
+	int length, i;
+	/* at least one page */
+	int num_pages = 1;
+
+	sgl = sgt->sgl;
+
+	length = sgl->length - PAGE_SIZE + sgl->offset;
+	num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); /* round-up */
+
+	for (i = 1; i < sgt->nents; i++) {
+		sgl = sg_next(sgl);
+		num_pages += ((sgl->length + PAGE_SIZE - 1) / PAGE_SIZE); /* round-up */
+	}
+
+	return num_pages;
+}
+
+/* extract pages directly from struct sg_table */
+struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt)
+{
+	struct hyper_dmabuf_pages_info *pinfo;
+	int i, j, k;
+	int length;
+	struct scatterlist *sgl;
+
+	pinfo = kmalloc(sizeof(*pinfo), GFP_KERNEL);
+	if (!pinfo)
+		return NULL;
+
+	pinfo->pages = kmalloc(sizeof(struct page *)*hyper_dmabuf_get_num_pgs(sgt), GFP_KERNEL);
+	if (!pinfo->pages)
+		return NULL;
+
+	sgl = sgt->sgl;
+
+	pinfo->nents = 1;
+	pinfo->frst_ofst = sgl->offset;
+	pinfo->pages[0] = sg_page(sgl);
+	length = sgl->length - PAGE_SIZE + sgl->offset;
+	i = 1;
+
+	while (length > 0) {
+		pinfo->pages[i] = nth_page(sg_page(sgl), i);
+		length -= PAGE_SIZE;
+		pinfo->nents++;
+		i++;
+	}
+
+	for (j = 1; j < sgt->nents; j++) {
+		sgl = sg_next(sgl);
+		pinfo->pages[i++] = sg_page(sgl);
+		length = sgl->length - PAGE_SIZE;
+		pinfo->nents++;
+		k = 1;
+
+		while (length > 0) {
+			pinfo->pages[i++] = nth_page(sg_page(sgl), k++);
+			length -= PAGE_SIZE;
+			pinfo->nents++;
+		}
+	}
+
+	/*
+	 * lenght at that point will be 0 or negative,
+	 * so to calculate last page size just add it to PAGE_SIZE
+	 */
+	pinfo->last_len = PAGE_SIZE + length;
+
+	return pinfo;
+}
+
+/* create sg_table with given pages and other parameters */
+struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
+					 int frst_ofst, int last_len, int nents)
+{
+	struct sg_table *sgt;
+	struct scatterlist *sgl;
+	int i, ret;
+
+	sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!sgt) {
+		return NULL;
+	}
+
+	ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+	if (ret) {
+		if (sgt) {
+			sg_free_table(sgt);
+			kfree(sgt);
+		}
+
+		return NULL;
+	}
+
+	sgl = sgt->sgl;
+
+	sg_set_page(sgl, pages[0], PAGE_SIZE-frst_ofst, frst_ofst);
+
+	for (i=1; i<nents-1; i++) {
+		sgl = sg_next(sgl);
+		sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
+	}
+
+	if (nents > 1) /* more than one page */ {
+		sgl = sg_next(sgl);
+		sg_set_page(sgl, pages[i], last_len, 0);
+	}
+
+	return sgt;
+}
+
+int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force)
+{
+	struct sgt_list *sgtl;
+	struct attachment_list *attachl;
+	struct kmap_vaddr_list *va_kmapl;
+	struct vmap_vaddr_list *va_vmapl;
+	struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
+
+	if (!sgt_info) {
+		dev_err(hyper_dmabuf_private.device, "invalid hyper_dmabuf_id\n");
+		return -EINVAL;
+	}
+
+	/* if force != 1, sgt_info can be released only if
+	 * there's no activity on exported dma-buf on importer
+	 * side.
+	 */
+	if (!force &&
+	    sgt_info->importer_exported) {
+		dev_warn(hyper_dmabuf_private.device, "dma-buf is used by importer\n");
+		return -EPERM;
+	}
+
+	/* force == 1 is not recommended */
+	while (!list_empty(&sgt_info->va_kmapped->list)) {
+		va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
+					    struct kmap_vaddr_list, list);
+
+		dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+		list_del(&va_kmapl->list);
+		kfree(va_kmapl);
+	}
+
+	while (!list_empty(&sgt_info->va_vmapped->list)) {
+		va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
+					    struct vmap_vaddr_list, list);
+
+		dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
+		list_del(&va_vmapl->list);
+		kfree(va_vmapl);
+	}
+
+	while (!list_empty(&sgt_info->active_sgts->list)) {
+		attachl = list_first_entry(&sgt_info->active_attached->list,
+					   struct attachment_list, list);
+
+		sgtl = list_first_entry(&sgt_info->active_sgts->list,
+					struct sgt_list, list);
+
+		dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
+					 DMA_BIDIRECTIONAL);
+		list_del(&sgtl->list);
+		kfree(sgtl);
+	}
+
+	while (!list_empty(&sgt_info->active_sgts->list)) {
+		attachl = list_first_entry(&sgt_info->active_attached->list,
+					   struct attachment_list, list);
+
+		dma_buf_detach(sgt_info->dma_buf, attachl->attach);
+		list_del(&attachl->list);
+		kfree(attachl);
+	}
+
+	/* Start cleanup of buffer in reverse order to exporting */
+	ops->unshare_pages(&sgt_info->refs_info, sgt_info->nents);
+
+	/* unmap dma-buf */
+	dma_buf_unmap_attachment(sgt_info->active_attached->attach,
+				 sgt_info->active_sgts->sgt,
+				 DMA_BIDIRECTIONAL);
+
+	/* detatch dma-buf */
+	dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
+
+	/* close connection to dma-buf completely */
+	dma_buf_put(sgt_info->dma_buf);
+	sgt_info->dma_buf = NULL;
+
+	kfree(sgt_info->active_sgts);
+	kfree(sgt_info->active_attached);
+	kfree(sgt_info->va_kmapped);
+	kfree(sgt_info->va_vmapped);
+
+	return 0;
+}
diff --git a/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
new file mode 100644
index 0000000..237ccf5
--- /dev/null
+++ b/drivers/xen/hyper_dmabuf/hyper_dmabuf_sgl_proc.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_IMP_H__
+#define __HYPER_DMABUF_IMP_H__
+
+int dmabuf_refcount(struct dma_buf *dma_buf);
+
+/* extract pages directly from struct sg_table */
+struct hyper_dmabuf_pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt);
+
+/* create sg_table with given pages and other parameters */
+struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
+                                int frst_ofst, int last_len, int nents);
+
+int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force);
+
+void hyper_dmabuf_free_sgt(struct sg_table *sgt);
+
+#endif /* __HYPER_DMABUF_IMP_H__ */
-- 
2.7.4

  parent reply	other threads:[~2017-12-19 19:42 UTC|newest]

Thread overview: 70+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-19 19:29 [RFC PATCH 01/60] hyper_dmabuf: initial working version of hyper_dmabuf drv Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 02/60] hyper_dmabuf: added a doc for hyper_dmabuf sharing Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 03/60] hyper_dmabuf: re-use dma_buf previously exported if exist Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 04/60] hyper_dmabuf: new index, k for pointing a right n-th page Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 05/60] hyper_dmabuf: skip creating a comm ch if exist for the VM Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 06/60] hyper_dmabuf: map shared pages only once when importing Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 07/60] hyper_dmabuf: message parsing done via workqueue Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 08/60] hyper_dmabuf: automatic comm channel initialization using xenstore Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 09/60] hyper_dmabuf: indirect DMA_BUF synchronization via shadowing Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 10/60] hyper_dmabuf: make sure to free memory to prevent leak Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 11/60] hyper_dmabuf: check stack before unmapping/detaching shadow DMA_BUF Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 12/60] hyper_dmabuf: two different unexporting mechanisms Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 13/60] hyper_dmabuf: postponing cleanup of hyper_DMABUF Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 14/60] hyper_dmabuf: clean-up process based on file->f_count Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 15/60] hyper_dmabuf: reusing previously released hyper_dmabuf_id Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 16/60] hyper_dmabuf: define hypervisor specific backend API Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 17/60] hyper_dmabuf: use dynamic debug macros for logging Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 18/60] hyper_dmabuf: reset comm channel when one end has disconnected Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 19/60] hyper_dmabuf: fix the case with sharing a buffer with 2 pages Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 20/60] hyper_dmabuf: optimized loop with less condition check Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 21/60] hyper_dmabuf: exposing drv information using sysfs Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 22/60] hyper_dmabuf: configure license Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 23/60] hyper_dmabuf: use CONFIG_HYPER_DMABUF_XEN instead of CONFIG_XEN Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 24/60] hyper_dmabuf: waits for resp only if WAIT_AFTER_SYNC_REQ == 1 Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 25/60] hyper_dmabuf: introduced delayed unexport Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 26/60] hyper_dmabuf: add mutexes to prevent several race conditions Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 27/60] hyper_dmabuf: use proper error codes Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 28/60] hyper_dmabuf: address several synchronization issues Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 29/60] hyper_dmabuf: make sure to release allocated buffers when exiting Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 30/60] hyper_dmabuf: free already mapped pages when error happens Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 31/60] hyper_dmabuf: built-in compilation option Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 32/60] hyper_dmabuf: make all shared pages read-only Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 33/60] hyper_dmabuf: error checking on the result of dma_buf_map_attachment Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 34/60] hyper_dmabuf: extend DMA bitmask to 64-bits Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 35/60] hyper_dmabuf: 128bit hyper_dmabuf_id with random keys Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 36/60] hyper_dmabuf: error handling when share_pages fails Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 37/60] hyper_dmabuf: implementation of query ioctl Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 38/60] hyper_dmabuf: preventing self exporting of dma_buf Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 39/60] hyper_dmabuf: correcting DMA-BUF clean-up order Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 40/60] hyper_dmabuf: do not use 'private' as field name Dongwon Kim
2017-12-19 19:29 ` Dongwon Kim [this message]
2017-12-19 19:29 ` [RFC PATCH 42/60] hyper_dmabuf: always generate a new random keys Dongwon Kim
2017-12-19 19:29 ` [RFC PATCH 43/60] hyper_dmabuf: fixes on memory leaks in various places Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 44/60] hyper_dmabuf: proper handling of sgt_info->priv Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 45/60] hyper_dmabuf: adding poll/read for event generation Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 46/60] hyper_dmabuf: delay auto initialization of comm_env Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 47/60] hyper_dmabuf: fix issues with event-polling Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 48/60] hyper_dmabuf: add query items for buffer private info Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 49/60] hyper_dmabuf: general clean-up and fixes Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 50/60] hyper_dmabuf: fix styling err and warns caught by checkpatch.pl Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 51/60] hyper_dmabuf: missing mutex_unlock and move spinlock Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 52/60] hyper_dmabuf: remove prefix 'hyper_dmabuf' from static func and backend APIs Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 53/60] hyper_dmabuf: define fastpath_export for exporting existing buffer Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 54/60] hyper_dmabuf: 'backend_ops' reduced to 'bknd_ops' and 'ops' to 'bknd_ops' Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 55/60] hyper_dmabuf: fixed wrong send_req call Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 56/60] hyper_dmabuf: add initialization and cleanup to bknd_ops Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 57/60] hyper_dmabuf: change type of ref to shared pages to unsigned long Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 58/60] hyper_dmabuf: move device node out of /dev/xen/ Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 59/60] hyper_dmabuf: freeing hy_drv_priv when drv init fails (v2) Dongwon Kim
2017-12-19 19:30 ` [RFC PATCH 60/60] hyper_dmabuf: move hyper_dmabuf to under drivers/dma-buf/ Dongwon Kim
2017-12-19 23:27 ` [RFC PATCH 01/60] hyper_dmabuf: initial working version of hyper_dmabuf drv Dongwon Kim
2017-12-20  8:17   ` [Xen-devel] " Juergen Gross
2018-01-10 23:21     ` Dongwon Kim
2017-12-20  8:38   ` Oleksandr Andrushchenko
2018-01-10 23:14     ` Dongwon Kim
2017-12-20  9:59   ` Daniel Vetter
2017-12-26 18:19     ` Matt Roper
2017-12-29 13:03       ` Tomeu Vizoso
2018-01-10 23:13     ` Dongwon Kim
2018-02-15  1:34 ` Dongwon Kim

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1513711816-2618-41-git-send-email-dongwon.kim@intel.com \
    --to=dongwon.kim@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mateuszx.potrola@intel.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox