From: Mauro Carvalho Chehab <mchehab@redhat.com>
To: Tomasz Stanislawski <t.stanislaws@samsung.com>
Cc: linaro-mm-sig@lists.linaro.org, linux-media@vger.kernel.org,
sumit.semwal@ti.com, jesse.barker@linaro.org, rob@ti.com,
daniel@ffwll.ch, m.szyprowski@samsung.com,
kyungmin.park@samsung.com, hverkuil@xs4all.nl,
laurent.pinchart@ideasonboard.com, pawel@osciak.com,
Andrzej Pietrasiewicz <andrzej.p@samsung.com>,
Sumit Semwal <sumit.semwal@linaro.org>
Subject: Re: [PATCH 08/10] v4l: vb2-dma-contig: code refactoring, support for DMABUF exporting
Date: Mon, 23 Jan 2012 12:26:14 -0200 [thread overview]
Message-ID: <4F1D6E06.7000409@redhat.com> (raw)
In-Reply-To: <1327326675-8431-9-git-send-email-t.stanislaws@samsung.com>
Em 23-01-2012 11:51, Tomasz Stanislawski escreveu:
> Signed-off-by: Pawel Osciak <pawel@osciak.com>
> [author of the original file]
> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
> [implemetation of mmap, finish/prepare]
> Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
> [implementation of userprt handling]
> Signed-off-by: Sumit Semwal <sumit.semwal@ti.com>
> Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
> [PoC for importing dmabuf buffer]
> Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
> [buffer exporting using dmabuf, code refactoring]
> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Please provide a diff against the existing driver, in order to allow reviewing
what has changed.
> ---
> drivers/media/video/videobuf2-dma-contig.c | 762 ++++++++++++++++++++++++++++
> 1 files changed, 762 insertions(+), 0 deletions(-)
> create mode 100644 drivers/media/video/videobuf2-dma-contig.c
>
> diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
> new file mode 100644
> index 0000000..d95b23a
> --- /dev/null
> +++ b/drivers/media/video/videobuf2-dma-contig.c
> @@ -0,0 +1,762 @@
> +/*
> + * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
> + *
> + * Copyright (C) 2010 Samsung Electronics
> + *
> + * Author: Pawel Osciak <pawel@osciak.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation.
> + */
> +
> +#include <linux/dma-buf.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/module.h>
> +#include <linux/scatterlist.h>
> +#include <linux/sched.h>
> +#include <linux/slab.h>
> +
> +#include <media/videobuf2-core.h>
> +#include <media/videobuf2-memops.h>
> +
> +struct vb2_dc_buf {
> + struct device *dev;
> + void *vaddr;
> + unsigned long size;
> + dma_addr_t dma_addr;
> + struct sg_table *dma_sgt;
> + enum dma_data_direction dma_dir;
> +
> + /* MMAP related */
> + struct vb2_vmarea_handler handler;
> + atomic_t refcount;
> + struct dma_buf *dma_buf;
> + struct sg_table *sgt_base;
> +
> + /* USERPTR related */
> + struct vm_area_struct *vma;
> +
> + /* DMABUF related */
> + struct dma_buf_attachment *db_attach;
> +};
> +
> +/*********************************************/
> +/* scatterlist table functions */
> +/*********************************************/
> +
> +static struct sg_table *vb2_dc_pages_to_sgt(struct page **pages,
> + unsigned long n_pages, size_t offset, size_t offset2)
> +{
> + struct sg_table *sgt;
> + int i, j; /* loop counters */
> + int cur_page, chunks;
> + int ret;
> + struct scatterlist *s;
> +
> + sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
> + if (!sgt)
> + return ERR_PTR(-ENOMEM);
> +
> + /* compute number of chunks */
> + chunks = 1;
> + for (i = 1; i < n_pages; ++i)
> + if (pages[i] != pages[i - 1] + 1)
> + ++chunks;
> +
> + ret = sg_alloc_table(sgt, chunks, GFP_KERNEL);
> + if (ret) {
> + kfree(sgt);
> + return ERR_PTR(-ENOMEM);
> + }
> +
> + /* merging chunks and putting them into the scatterlist */
> + cur_page = 0;
> + for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
> + size_t size = PAGE_SIZE;
> +
> + for (j = cur_page + 1; j < n_pages; ++j) {
> + if (pages[j] != pages[j - 1] + 1)
> + break;
> + size += PAGE_SIZE;
> + }
> +
> + /* cut offset if chunk starts at the first page */
> + if (cur_page == 0)
> + size -= offset;
> + /* cut offset2 if chunk ends at the last page */
> + if (j == n_pages)
> + size -= offset2;
> +
> + sg_set_page(s, pages[cur_page], size, offset);
> + offset = 0;
> + cur_page = j;
> + }
> +
> + return sgt;
> +}
> +
> +static void vb2_dc_release_sgtable(struct sg_table *sgt)
> +{
> + sg_free_table(sgt);
> + kfree(sgt);
> +}
> +
> +static void vb2_dc_put_sgtable(struct sg_table *sgt, int dirty)
> +{
> + struct scatterlist *s;
> + int i, j;
> +
> + for_each_sg(sgt->sgl, s, sgt->nents, i) {
> + struct page *page = sg_page(s);
> + int n_pages = PAGE_ALIGN(s->offset + s->length) >> PAGE_SHIFT;
> +
> + for (j = 0; j < n_pages; ++j, ++page) {
> + if (dirty)
> + set_page_dirty_lock(page);
> + put_page(page);
> + }
> + }
> +
> + vb2_dc_release_sgtable(sgt);
> +}
> +
> +static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
> +{
> + struct scatterlist *s;
> + dma_addr_t expected = sg_dma_address(sgt->sgl);
> + int i;
> + unsigned long size = 0;
> +
> + for_each_sg(sgt->sgl, s, sgt->nents, i) {
> + if (sg_dma_address(s) != expected)
> + break;
> + expected = sg_dma_address(s) + sg_dma_len(s);
> + size += sg_dma_len(s);
> + }
> + return size;
> +}
> +
> +/*********************************************/
> +/* callbacks for all buffers */
> +/*********************************************/
> +
> +static void *vb2_dc_cookie(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> +
> + return &buf->dma_addr;
> +}
> +
> +static void *vb2_dc_vaddr(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> +
> + return buf->vaddr;
> +}
> +
> +static unsigned int vb2_dc_num_users(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> +
> + return atomic_read(&buf->refcount);
> +}
> +
> +static void vb2_dc_prepare(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + if (!sgt)
> + return;
> +
> + dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +}
> +
> +static void vb2_dc_finish(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + if (!sgt)
> + return;
> +
> + dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +}
> +
> +/*********************************************/
> +/* callbacks for MMAP buffers */
> +/*********************************************/
> +
> +static void vb2_dc_put(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> +
> + if (!atomic_dec_and_test(&buf->refcount))
> + return;
> +
> + if (buf->dma_buf)
> + dma_buf_put(buf->dma_buf);
> + vb2_dc_release_sgtable(buf->sgt_base);
> + dma_free_coherent(buf->dev, buf->size, buf->vaddr,
> + buf->dma_addr);
> + kfree(buf);
> +}
> +
> +static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
> +{
> + struct device *dev = alloc_ctx;
> + struct vb2_dc_buf *buf;
> + int ret;
> + int n_pages;
> + struct page **pages = NULL;
> +
> + buf = kzalloc(sizeof *buf, GFP_KERNEL);
> + if (!buf)
> + return ERR_PTR(-ENOMEM);
> +
> + buf->dev = dev;
> + buf->size = size;
> + buf->vaddr = dma_alloc_coherent(buf->dev, buf->size, &buf->dma_addr,
> + GFP_KERNEL);
> +
> + ret = -ENOMEM;
> + if (!buf->vaddr) {
> + dev_err(dev, "dma_alloc_coherent of size %ld failed\n",
> + size);
> + goto fail_buf;
> + }
> +
> + WARN_ON((unsigned long)buf->vaddr & ~PAGE_MASK);
> + WARN_ON(buf->dma_addr & ~PAGE_MASK);
> +
> + n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
> +
> + pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL);
> + if (!pages) {
> + printk(KERN_ERR "failed to alloc page table\n");
> + goto fail_dma;
> + }
> +
> + ret = dma_get_pages(dev, buf->vaddr, buf->dma_addr, pages, n_pages);
> + if (ret < 0) {
> + printk(KERN_ERR "failed to get buffer pages from DMA API\n");
> + goto fail_pages;
> + }
> + if (ret != n_pages) {
> + ret = -EFAULT;
> + printk(KERN_ERR "failed to get all pages from DMA API\n");
> + goto fail_pages;
> + }
> +
> + buf->sgt_base = vb2_dc_pages_to_sgt(pages, n_pages, 0, 0);
> + if (IS_ERR(buf->sgt_base)) {
> + ret = PTR_ERR(buf->sgt_base);
> + printk(KERN_ERR "failed to prepare sg table\n");
> + goto fail_pages;
> + }
> +
> + /* pages are no longer needed */
> + kfree(pages);
> +
> + buf->handler.refcount = &buf->refcount;
> + buf->handler.put = vb2_dc_put;
> + buf->handler.arg = buf;
> +
> + atomic_inc(&buf->refcount);
> +
> + return buf;
> +
> +fail_pages:
> + kfree(pages);
> +
> +fail_dma:
> + dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
> +
> +fail_buf:
> + kfree(buf);
> +
> + return ERR_PTR(ret);
> +}
> +
> +static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> + int ret;
> +
> + /*
> + * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
> + * map whole buffer
> + */
> + vma->vm_pgoff = 0;
> +
> + ret = dma_mmap_writecombine(buf->dev, vma, buf->vaddr,
> + buf->dma_addr, buf->size);
> +
> + if (ret) {
> + printk(KERN_ERR "Remapping memory failed, error: %d\n", ret);
> + return ret;
> + }
> +
> + vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
> + vma->vm_private_data = &buf->handler;
> + vma->vm_ops = &vb2_common_vm_ops;
> +
> + vma->vm_ops->open(vma);
> +
> + printk(KERN_DEBUG "%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
> + __func__, (unsigned long)buf->dma_addr, vma->vm_start,
> + buf->size);
> +
> + return 0;
> +}
> +
> +/*********************************************/
> +/* DMABUF ops for exporters */
> +/*********************************************/
> +
> +struct vb2_dc_attachment {
> + struct sg_table sgt;
> + enum dma_data_direction dir;
> +};
> +
> +static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
> + struct dma_buf_attachment *dbuf_attach)
> +{
> + /* nothing to be done */
> + return 0;
> +}
> +
> +static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
> + struct dma_buf_attachment *db_attach)
> +{
> + struct vb2_dc_attachment *attach = db_attach->priv;
> + struct sg_table *sgt;
> +
> + if (!attach)
> + return;
> +
> + sgt = &attach->sgt;
> +
> + dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->nents, attach->dir);
> + sg_free_table(sgt);
> + kfree(attach);
> + db_attach->priv = NULL;
> +}
> +
> +static struct sg_table *vb2_dc_dmabuf_ops_map(
> + struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
> +{
> + struct dma_buf *dbuf = db_attach->dmabuf;
> + struct vb2_dc_buf *buf = dbuf->priv;
> + struct vb2_dc_attachment *attach = db_attach->priv;
> + struct sg_table *sgt;
> + struct scatterlist *rd, *wr;
> + int i, ret;
> +
> + /* return previously mapped sg table */
> + if (attach)
> + return &attach->sgt;
> +
> + attach = kzalloc(sizeof *attach, GFP_KERNEL);
> + if (!attach)
> + return ERR_PTR(-ENOMEM);
> +
> + sgt = &attach->sgt;
> + attach->dir = dir;
> +
> + /* copying the buf->base_sgt to attachment */
> + ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
> + if (ret) {
> + kfree(attach);
> + return ERR_PTR(-ENOMEM);
> + }
> +
> + rd = buf->sgt_base->sgl;
> + wr = sgt->sgl;
> + for (i = 0; i < sgt->orig_nents; ++i) {
> + sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
> + rd = sg_next(rd);
> + wr = sg_next(wr);
> + }
> +
> + /* mapping new sglist to the client */
> + ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
> + if (ret <= 0) {
> + printk(KERN_ERR "failed to map scatterlist\n");
> + sg_free_table(sgt);
> + kfree(attach);
> + return ERR_PTR(-EIO);
> + }
> +
> + db_attach->priv = attach;
> +
> + return sgt;
> +}
> +
> +static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
> + struct sg_table *sgt)
> +{
> + /* nothing to be done here */
> +}
> +
> +static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
> +{
> + /* drop reference obtained in vb2_dc_get_dmabuf */
> + vb2_dc_put(dbuf->priv);
> +}
> +
> +static struct dma_buf_ops vb2_dc_dmabuf_ops = {
> + .attach = vb2_dc_dmabuf_ops_attach,
> + .detach = vb2_dc_dmabuf_ops_detach,
> + .map_dma_buf = vb2_dc_dmabuf_ops_map,
> + .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
> + .release = vb2_dc_dmabuf_ops_release,
> +};
> +
> +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> + struct dma_buf *dbuf;
> +
> + if (buf->dma_buf)
> + return buf->dma_buf;
> +
> + /* dmabuf keeps reference to vb2 buffer */
> + atomic_inc(&buf->refcount);
> + dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
> + if (IS_ERR(dbuf)) {
> + atomic_dec(&buf->refcount);
> + return NULL;
> + }
> +
> + buf->dma_buf = dbuf;
> +
> + return dbuf;
> +}
> +
> +/*********************************************/
> +/* callbacks for USERPTR buffers */
> +/*********************************************/
> +
> +static inline int vma_is_io(struct vm_area_struct *vma)
> +{
> + return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
> +}
> +
> +static int vb2_dc_get_pages(unsigned long start, struct page **pages,
> + int n_pages, struct vm_area_struct **copy_vma, int write)
> +{
> + struct vm_area_struct *vma;
> + int n = 0; /* number of get pages */
> + int ret = -EFAULT;
> +
> + /* entering critical section for mm access */
> + down_read(¤t->mm->mmap_sem);
> +
> + vma = find_vma(current->mm, start);
> + if (!vma) {
> + printk(KERN_ERR "no vma for address %lu\n", start);
> + goto cleanup;
> + }
> +
> + if (vma_is_io(vma)) {
> + unsigned long pfn;
> +
> + if (vma->vm_end - start < n_pages * PAGE_SIZE) {
> + printk(KERN_ERR "vma is too small\n");
> + goto cleanup;
> + }
> +
> + for (n = 0; n < n_pages; ++n, start += PAGE_SIZE) {
> + ret = follow_pfn(vma, start, &pfn);
> + if (ret) {
> + printk(KERN_ERR "no page for address %lu\n",
> + start);
> + goto cleanup;
> + }
> + pages[n] = pfn_to_page(pfn);
> + get_page(pages[n]);
> + }
> + } else {
> + n = get_user_pages(current, current->mm, start & PAGE_MASK,
> + n_pages, write, 1, pages, NULL);
> + if (n != n_pages) {
> + printk(KERN_ERR "got only %d of %d user pages\n",
> + n, n_pages);
> + goto cleanup;
> + }
> + }
> +
> + *copy_vma = vb2_get_vma(vma);
> + if (!*copy_vma) {
> + printk(KERN_ERR "failed to copy vma\n");
> + ret = -ENOMEM;
> + goto cleanup;
> + }
> +
> + /* leaving critical section for mm access */
> + up_read(¤t->mm->mmap_sem);
> +
> + return 0;
> +
> +cleanup:
> + up_read(¤t->mm->mmap_sem);
> +
> + /* putting user pages if used, can be done wothout the lock */
> + while (n)
> + put_page(pages[--n]);
> +
> + return ret;
> +}
> +
> +static void vb2_dc_put_userptr(void *buf_priv)
> +{
> + struct vb2_dc_buf *buf = buf_priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
> + vb2_dc_put_sgtable(sgt, !vma_is_io(buf->vma));
> + vb2_put_vma(buf->vma);
> + kfree(buf);
> +}
> +
> +static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
> + unsigned long size, int write)
> +{
> + struct vb2_dc_buf *buf;
> + unsigned long start, end, offset, offset2;
> + struct page **pages;
> + int n_pages;
> + int ret = 0;
> + struct sg_table *sgt;
> + unsigned long contig_size;
> +
> + buf = kzalloc(sizeof *buf, GFP_KERNEL);
> + if (!buf)
> + return ERR_PTR(-ENOMEM);
> +
> + buf->dev = alloc_ctx;
> + buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> +
> + start = (unsigned long)vaddr & PAGE_MASK;
> + offset = (unsigned long)vaddr & ~PAGE_MASK;
> + end = PAGE_ALIGN((unsigned long)vaddr + size);
> + offset2 = end - (unsigned long)vaddr - size;
> + n_pages = (end - start) >> PAGE_SHIFT;
> +
> + pages = kmalloc(n_pages * sizeof pages[0], GFP_KERNEL);
> + if (!pages) {
> + ret = -ENOMEM;
> + printk(KERN_ERR "failed to allocate pages table\n");
> + goto fail_buf;
> + }
> +
> + /* extract page list from userspace mapping */
> + ret = vb2_dc_get_pages(start, pages, n_pages, &buf->vma, write);
> + if (ret) {
> + printk(KERN_ERR "failed to get user pages\n");
> + goto fail_pages;
> + }
> +
> + sgt = vb2_dc_pages_to_sgt(pages, n_pages, offset, offset2);
> + if (!sgt) {
> + printk(KERN_ERR "failed to create scatterlist table\n");
> + ret = -ENOMEM;
> + goto fail_get_pages;
> + }
> +
> + /* pages are no longer needed */
> + kfree(pages);
> + pages = NULL;
> +
> + sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
> + buf->dma_dir);
> + if (sgt->nents <= 0) {
> + printk(KERN_ERR "failed to map scatterlist\n");
> + ret = -EIO;
> + goto fail_sgt;
> + }
> +
> + contig_size = vb2_dc_get_contiguous_size(sgt);
> + if (contig_size < size) {
> + printk(KERN_ERR "contiguous mapping is too small %lu/%lu\n",
> + contig_size, size);
> + ret = -EFAULT;
> + goto fail_map_sg;
> + }
> +
> + buf->dma_addr = sg_dma_address(sgt->sgl);
> + buf->size = size;
> + buf->dma_sgt = sgt;
> +
> + atomic_inc(&buf->refcount);
> +
> + return buf;
> +
> +fail_map_sg:
> + dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
> +
> +fail_sgt:
> + vb2_dc_put_sgtable(sgt, 0);
> +
> +fail_get_pages:
> + while (pages && n_pages)
> + put_page(pages[--n_pages]);
> + vb2_put_vma(buf->vma);
> +
> +fail_pages:
> + kfree(pages); /* kfree is NULL-proof */
> +
> +fail_buf:
> + kfree(buf);
> +
> + return ERR_PTR(ret);
> +}
> +
> +/*********************************************/
> +/* callbacks for DMABUF buffers */
> +/*********************************************/
> +
> +static int vb2_dc_map_dmabuf(void *mem_priv)
> +{
> + struct vb2_dc_buf *buf = mem_priv;
> + struct sg_table *sgt;
> + unsigned long contig_size;
> + int ret = 0;
> +
> + if (WARN_ON(!buf->db_attach)) {
> + printk(KERN_ERR "trying to pin a non attached buffer\n");
> + return -EINVAL;
> + }
> +
> + if (WARN_ON(buf->dma_sgt)) {
> + printk(KERN_ERR "dmabuf buffer is already pinned\n");
> + return 0;
> + }
> +
> + /* get the associated scatterlist for this buffer */
> + sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
> + if (IS_ERR_OR_NULL(sgt)) {
> + printk(KERN_ERR "Error getting dmabuf scatterlist\n");
> + return -EINVAL;
> + }
> +
> + /* checking if dmabuf is big enough to store contiguous chunk */
> + contig_size = vb2_dc_get_contiguous_size(sgt);
> + if (contig_size < buf->size) {
> + printk(KERN_ERR "contiguous chunk of dmabuf is too small\n");
> + ret = -EFAULT;
> + goto fail_map;
> + }
> +
> + buf->dma_addr = sg_dma_address(sgt->sgl);
> + buf->dma_sgt = sgt;
> +
> + return 0;
> +
> +fail_map:
> + dma_buf_unmap_attachment(buf->db_attach, sgt);
> +
> + return ret;
> +}
> +
> +static void vb2_dc_unmap_dmabuf(void *mem_priv)
> +{
> + struct vb2_dc_buf *buf = mem_priv;
> + struct sg_table *sgt = buf->dma_sgt;
> +
> + if (WARN_ON(!buf->db_attach)) {
> + printk(KERN_ERR "trying to unpin a not attached buffer\n");
> + return;
> + }
> +
> + if (WARN_ON(!sgt)) {
> + printk(KERN_ERR "dmabuf buffer is already unpinned\n");
> + return;
> + }
> +
> + dma_buf_unmap_attachment(buf->db_attach, sgt);
> +
> + buf->dma_addr = 0;
> + buf->dma_sgt = NULL;
> +}
> +
> +static void vb2_dc_detach_dmabuf(void *mem_priv)
> +{
> + struct vb2_dc_buf *buf = mem_priv;
> +
> + if (buf->dma_addr)
> + vb2_dc_unmap_dmabuf(buf);
> +
> + /* detach this attachment */
> + dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
> + kfree(buf);
> +}
> +
> +static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
> + unsigned long size, int write)
> +{
> + struct vb2_dc_buf *buf;
> + struct dma_buf_attachment *dba;
> +
> + if (dbuf->size < size)
> + return ERR_PTR(-EFAULT);
> +
> + buf = kzalloc(sizeof *buf, GFP_KERNEL);
> + if (!buf)
> + return ERR_PTR(-ENOMEM);
> +
> + buf->dev = alloc_ctx;
> + /* create attachment for the dmabuf with the user device */
> + dba = dma_buf_attach(dbuf, buf->dev);
> + if (IS_ERR(dba)) {
> + printk(KERN_ERR "failed to attach dmabuf\n");
> + kfree(buf);
> + return dba;
> + }
> +
> + buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
> + buf->size = size;
> + buf->db_attach = dba;
> +
> + return buf;
> +}
> +
> +/*********************************************/
> +/* DMA CONTIG exported functions */
> +/*********************************************/
> +
> +const struct vb2_mem_ops vb2_dma_contig_memops = {
> + .alloc = vb2_dc_alloc,
> + .put = vb2_dc_put,
> + .get_dmabuf = vb2_dc_get_dmabuf,
> + .cookie = vb2_dc_cookie,
> + .vaddr = vb2_dc_vaddr,
> + .mmap = vb2_dc_mmap,
> + .get_userptr = vb2_dc_get_userptr,
> + .put_userptr = vb2_dc_put_userptr,
> + .prepare = vb2_dc_prepare,
> + .finish = vb2_dc_finish,
> + .map_dmabuf = vb2_dc_map_dmabuf,
> + .unmap_dmabuf = vb2_dc_unmap_dmabuf,
> + .attach_dmabuf = vb2_dc_attach_dmabuf,
> + .detach_dmabuf = vb2_dc_detach_dmabuf,
> + .num_users = vb2_dc_num_users,
> +};
> +EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
> +
> +void *vb2_dma_contig_init_ctx(struct device *dev)
> +{
> + return dev;
> +}
> +EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
> +
> +void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
> +{
> +}
> +EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
> +
> +MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
> +MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
> +MODULE_LICENSE("GPL");
next prev parent reply other threads:[~2012-01-23 14:26 UTC|newest]
Thread overview: 46+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-01-23 13:51 [PATCH 00/10] Integration of videobuf2 with dmabuf Tomasz Stanislawski
2012-01-23 13:51 ` [PATCH 01/10] arm: dma: support for dma_get_pages Tomasz Stanislawski
2012-01-23 13:51 ` [PATCH 02/10] [media] media: vb2: remove plane argument from call_memop and cleanup mempriv usage Tomasz Stanislawski
2012-01-23 13:51 ` [PATCH 03/10] media: vb2: add prepare/finish callbacks to allocators Tomasz Stanislawski
2012-02-03 15:42 ` Pawel Osciak
2012-01-23 13:51 ` [PATCH 04/10] v4l: vb2: fixes for DMABUF support Tomasz Stanislawski
2012-01-23 14:22 ` Mauro Carvalho Chehab
2012-01-23 14:32 ` Tomasz Stanislawski
2012-01-23 14:52 ` Mauro Carvalho Chehab
2012-01-23 15:25 ` Tomasz Stanislawski
2012-01-23 16:06 ` Mauro Carvalho Chehab
2012-01-23 16:37 ` Tomasz Stanislawski
2012-01-23 16:51 ` Mauro Carvalho Chehab
2012-01-25 5:35 ` Semwal, Sumit
2012-01-25 10:34 ` Tomasz Stanislawski
2012-01-25 14:09 ` Semwal, Sumit
2012-01-23 13:51 ` [PATCH 05/10] v4l: add buffer exporting via dmabuf Tomasz Stanislawski
2012-01-23 14:32 ` Mauro Carvalho Chehab
2012-01-23 14:37 ` Laurent Pinchart
2012-01-23 14:42 ` Tomasz Stanislawski
2012-01-23 15:04 ` Mauro Carvalho Chehab
2012-01-23 15:56 ` Tomasz Stanislawski
2012-01-23 16:42 ` Mauro Carvalho Chehab
2012-01-23 16:57 ` V4L2 Overlay mode replacement by dma-buf - was: " Mauro Carvalho Chehab
2012-01-24 0:11 ` Clark, Rob
2012-01-24 9:44 ` Laurent Pinchart
2012-01-24 11:07 ` [Linaro-mm-sig] " Subash Patel
2012-01-24 12:06 ` Tomasz Stanislawski
2012-01-27 13:40 ` Subash Patel
2012-01-30 14:16 ` Laurent Pinchart
2012-01-26 9:48 ` Tomasz Stanislawski
2012-02-03 15:47 ` Pawel Osciak
2012-02-03 15:50 ` Pawel Osciak
2012-01-23 13:51 ` [PATCH 06/10] v4l: vb2: " Tomasz Stanislawski
2012-01-23 13:51 ` [PATCH 07/10] v4l: vb2: remove dma-contig allocator Tomasz Stanislawski
2012-01-23 14:24 ` Mauro Carvalho Chehab
2012-01-23 13:51 ` [PATCH 08/10] v4l: vb2-dma-contig: code refactoring, support for DMABUF exporting Tomasz Stanislawski
2012-01-23 14:26 ` Mauro Carvalho Chehab [this message]
2012-01-23 14:35 ` Tomasz Stanislawski
2012-01-23 14:43 ` Mauro Carvalho Chehab
2012-01-23 14:35 ` [PATCH] media: vb2-memops: Export vb2_get_vma symbol Laurent Pinchart
2012-01-23 14:44 ` Tomasz Stanislawski
2012-03-21 11:12 ` Laurent Pinchart
2012-01-23 13:51 ` [PATCH 09/10] v4l: fimc: integrate capture i-face with dmabuf Tomasz Stanislawski
2012-01-23 13:51 ` [PATCH 10/10] v4l: s5p-tv: mixer: integrate " Tomasz Stanislawski
2012-01-23 14:37 ` [PATCH 00/10] Integration of videobuf2 " Mauro Carvalho Chehab
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4F1D6E06.7000409@redhat.com \
--to=mchehab@redhat.com \
--cc=andrzej.p@samsung.com \
--cc=daniel@ffwll.ch \
--cc=hverkuil@xs4all.nl \
--cc=jesse.barker@linaro.org \
--cc=kyungmin.park@samsung.com \
--cc=laurent.pinchart@ideasonboard.com \
--cc=linaro-mm-sig@lists.linaro.org \
--cc=linux-media@vger.kernel.org \
--cc=m.szyprowski@samsung.com \
--cc=pawel@osciak.com \
--cc=rob@ti.com \
--cc=sumit.semwal@linaro.org \
--cc=sumit.semwal@ti.com \
--cc=t.stanislaws@samsung.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).