* [PATCH 0/5] First cut of prime patches for drm-next
@ 2013-08-08 7:10 Daniel Vetter
2013-08-08 7:10 ` [PATCH 1/5] drm: use common drm_gem_dmabuf_release in i915/exynos drivers Daniel Vetter
` (4 more replies)
0 siblings, 5 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie; +Cc: Daniel Vetter, Intel Graphics Development, DRI Development
Hi Dave,
Inki supplied a patch to convert exynos, so I think these prep patches are ready
to go in. I want a common drm_gem_dmabuf_release function across all drivers
since th oops fix around the teardown of the obj->export_dma_buf pointer needs
to have changed code in there, and duplicating tricky locking stuff accross all
drivers isn't great. Please consider merging into drm-next.
Cheers, Daniel
Daniel Vetter (4):
drm: use common drm_gem_dmabuf_release in i915/exynos drivers
drm/i915: unpin backing storage in dmabuf_unmap
drm/i915: explicit store base gem object in dma_buf->priv
drm/prime: remove cargo-cult locking from map_sg helper
Inki Dae (1):
drm/exynos: explicit store base gem object in dma_buf->priv
drivers/gpu/drm/drm_prime.c | 6 ++---
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 35 ++++++++---------------------
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 36 +++++++++++++++---------------
include/drm/drmP.h | 1 +
4 files changed, 30 insertions(+), 48 deletions(-)
--
1.8.3.2
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/5] drm: use common drm_gem_dmabuf_release in i915/exynos drivers
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
@ 2013-08-08 7:10 ` Daniel Vetter
2013-08-08 7:10 ` [PATCH 2/5] drm/i915: unpin backing storage in dmabuf_unmap Daniel Vetter
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie; +Cc: Daniel Vetter, Intel Graphics Development, DRI Development
Note that this is slightly tricky since both drivers store their
native objects in dma_buf->priv. But both also embed the base
drm_gem_object at the first position, so the implicit cast is ok.
To use the release helper we need to export it, too.
Cc: Inki Dae <inki.dae@samsung.com>
Cc: Intel Graphics Development <intel-gfx@lists.freedesktop.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/drm_prime.c | 3 ++-
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 23 +----------------------
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 13 +------------
include/drm/drmP.h | 1 +
4 files changed, 5 insertions(+), 35 deletions(-)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e..a35f206 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -192,7 +192,7 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* nothing to be done here */
}
-static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
@@ -202,6 +202,7 @@ static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
drm_gem_object_unreference_unlocked(obj);
}
}
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e..3cd56e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -127,27 +127,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* Nothing to do. */
}
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
- /*
- * exynos_dmabuf_release() call means that file object's
- * f_count is 0 and it calls drm_gem_object_handle_unreference()
- * to drop the references that these values had been increased
- * at drm_prime_handle_to_fd()
- */
- if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
- exynos_gem_obj->base.export_dma_buf = NULL;
-
- /*
- * drop this gem object refcount to release allocated buffer
- * and resources.
- */
- drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
- }
-}
-
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -193,7 +172,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.mmap = exynos_gem_dmabuf_mmap,
- .release = exynos_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
};
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index f2e185c..63ee1a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -90,17 +90,6 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
kfree(sg);
}
-static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct drm_i915_gem_object *obj = dma_buf->priv;
-
- if (obj->base.export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&obj->base);
- }
-}
-
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct drm_i915_gem_object *obj = dma_buf->priv;
@@ -211,7 +200,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
- .release = i915_gem_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
.kmap = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fba5473..69bf832 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1540,6 +1540,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
--
1.8.3.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/5] drm/i915: unpin backing storage in dmabuf_unmap
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
2013-08-08 7:10 ` [PATCH 1/5] drm: use common drm_gem_dmabuf_release in i915/exynos drivers Daniel Vetter
@ 2013-08-08 7:10 ` Daniel Vetter
2013-08-08 7:10 ` [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv Daniel Vetter
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie
Cc: Maarten Lankhorst, Daniel Vetter, Intel Graphics Development,
DRI Development
This fixes a WARN in i915_gem_free_object when the
obj->pages_pin_count isn't 0.
v2: Add locking to unmap, noticed by Chris Wilson. Note that even
though we call unmap with our own dev->struct_mutex held that won't
result in an immediate deadlock since we never go through the dma_buf
interfaces for our own, reimported buffers. But it's still easy to
blow up and anger lockdep, but that's already the case with our ->map
implementation. Fixing this for real will involve per dma-buf ww mutex
locking by the callers. And lots of fun. So go with the duct-tape
approach for now.
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reported-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Tested-by: Armin K. <krejzi@email.com> (v1)
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 63ee1a9..f7e1682 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
+ struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
+
+ i915_gem_object_unpin_pages(obj);
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
--
1.8.3.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
2013-08-08 7:10 ` [PATCH 1/5] drm: use common drm_gem_dmabuf_release in i915/exynos drivers Daniel Vetter
2013-08-08 7:10 ` [PATCH 2/5] drm/i915: unpin backing storage in dmabuf_unmap Daniel Vetter
@ 2013-08-08 7:10 ` Daniel Vetter
2013-08-14 21:37 ` Daniel Vetter
2013-08-08 7:10 ` [PATCH 4/5] drm/exynos: " Daniel Vetter
2013-08-08 7:10 ` [PATCH 5/5] drm/prime: remove cargo-cult locking from map_sg helper Daniel Vetter
4 siblings, 1 reply; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie; +Cc: Daniel Vetter, Intel Graphics Development, DRI Development
Makes it more obviously correct what tricks we play by reusing the drm
prime release helper.
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/i915/i915_gem_dmabuf.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index f7e1682..e918b05 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
#include "i915_drv.h"
#include <linux/dma-buf.h>
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_intel_bo(buf->priv);
+}
+
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
@@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
mutex_lock(&obj->base.dev->struct_mutex);
@@ -100,7 +105,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter;
struct page **pages;
@@ -148,7 +153,7 @@ error:
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
@@ -191,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -222,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -261,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
/* is this one of own objects? */
if (dma_buf->ops == &i915_dmabuf_ops) {
- obj = dma_buf->priv;
+ obj = dma_buf_to_obj(dma_buf);
/* is it from our device? */
if (obj->base.dev == dev) {
/*
--
1.8.3.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/5] drm/exynos: explicit store base gem object in dma_buf->priv
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
` (2 preceding siblings ...)
2013-08-08 7:10 ` [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv Daniel Vetter
@ 2013-08-08 7:10 ` Daniel Vetter
2013-08-08 7:10 ` [PATCH 5/5] drm/prime: remove cargo-cult locking from map_sg helper Daniel Vetter
4 siblings, 0 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie
Cc: Inki Dae, Daniel Vetter, Intel Graphics Development,
Kyungmin Park, DRI Development
From: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index 3cd56e1..fd76449 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -22,6 +22,11 @@ struct exynos_drm_dmabuf_attachment {
bool is_mapped;
};
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_exynos_gem_obj(buf->priv);
+}
+
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
@@ -63,7 +68,7 @@ static struct sg_table *
enum dma_data_direction dir)
{
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
@@ -180,7 +185,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ return dma_buf_export(obj, &exynos_dmabuf_ops,
exynos_gem_obj->base.size, flags);
}
@@ -198,8 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
- exynos_gem_obj = dma_buf->priv;
- obj = &exynos_gem_obj->base;
+ obj = dma_buf->priv;
/* is it from our device? */
if (obj->dev == drm_dev) {
--
1.8.3.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 5/5] drm/prime: remove cargo-cult locking from map_sg helper
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
` (3 preceding siblings ...)
2013-08-08 7:10 ` [PATCH 4/5] drm/exynos: " Daniel Vetter
@ 2013-08-08 7:10 ` Daniel Vetter
4 siblings, 0 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-08 7:10 UTC (permalink / raw)
To: Dave Airlie
Cc: Maarten Lankhorst, Daniel Vetter, Intel Graphics Development,
Laurent Pinchart, DRI Development
I've checked both implementations (radeon/nouveau) and they both grab
the page array from ttm simply by dereferencing it and then wrapping
it up with drm_prime_pages_to_sg in the callback and map it with
dma_map_sg (in the helper).
Only the grabbing of the underlying page array is anything we need to
be concerned about, and either those pages are pinned independently,
or we're screwed no matter what.
And indeed, nouveau/radeon pin the backing storage in their
attach/detach functions.
Since I've created this patch cma prime support for dma_buf was added.
drm_gem_cma_prime_get_sg_table only calls kzalloc and the creates&maps
the sg table with dma_get_sgtable. It doesn't touch any gem object
state otherwise. So the cma helpers also look safe.
The only thing we might claim it does is prevent concurrent mapping of
dma_buf attachments. But a) that's not allowed and b) the current code
is racy already since it checks whether the sg mapping exists _before_
grabbing the lock.
So the dev->struct_mutex locking here does absolutely nothing useful,
but only distracts. Remove it.
This should also help Maarten's work to eventually pin the backing
storage more dynamically by preventing locking inversions around
dev->struct_mutex.
v2: Add analysis for recently added cma helper prime code.
Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Acked-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/drm_prime.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a35f206..f115962 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -167,8 +167,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- mutex_lock(&obj->dev->struct_mutex);
-
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
@@ -182,7 +180,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
}
- mutex_unlock(&obj->dev->struct_mutex);
return sgt;
}
--
1.8.3.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv
2013-08-08 7:10 ` [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv Daniel Vetter
@ 2013-08-14 21:37 ` Daniel Vetter
0 siblings, 0 replies; 7+ messages in thread
From: Daniel Vetter @ 2013-08-14 21:37 UTC (permalink / raw)
To: Dave Airlie; +Cc: Daniel Vetter, Intel Graphics Development, DRI Development
On Thu, Aug 08, 2013 at 09:10:38AM +0200, Daniel Vetter wrote:
> Makes it more obviously correct what tricks we play by reusing the drm
> prime release helper.
>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Ok, to get things going I've merged the two i915 patches to dinq.
-Daniel
> ---
> drivers/gpu/drm/i915/i915_gem_dmabuf.c | 21 ++++++++++++---------
> 1 file changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index f7e1682..e918b05 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -27,10 +27,15 @@
> #include "i915_drv.h"
> #include <linux/dma-buf.h>
>
> +static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
> +{
> + return to_intel_bo(buf->priv);
> +}
> +
> static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
> enum dma_data_direction dir)
> {
> - struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
> + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
> struct sg_table *st;
> struct scatterlist *src, *dst;
> int ret, i;
> @@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
> struct sg_table *sg,
> enum dma_data_direction dir)
> {
> - struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
> + struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
>
> mutex_lock(&obj->base.dev->struct_mutex);
>
> @@ -100,7 +105,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
>
> static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
> {
> - struct drm_i915_gem_object *obj = dma_buf->priv;
> + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
> struct drm_device *dev = obj->base.dev;
> struct sg_page_iter sg_iter;
> struct page **pages;
> @@ -148,7 +153,7 @@ error:
>
> static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
> {
> - struct drm_i915_gem_object *obj = dma_buf->priv;
> + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
> struct drm_device *dev = obj->base.dev;
> int ret;
>
> @@ -191,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
>
> static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
> {
> - struct drm_i915_gem_object *obj = dma_buf->priv;
> + struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
> struct drm_device *dev = obj->base.dev;
> int ret;
> bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
> @@ -222,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
> struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
> struct drm_gem_object *gem_obj, int flags)
> {
> - struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> -
> - return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
> + return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
> }
>
> static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
> @@ -261,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
>
> /* is this one of own objects? */
> if (dma_buf->ops == &i915_dmabuf_ops) {
> - obj = dma_buf->priv;
> + obj = dma_buf_to_obj(dma_buf);
> /* is it from our device? */
> if (obj->base.dev == dev) {
> /*
> --
> 1.8.3.2
>
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2013-08-14 21:37 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-08-08 7:10 [PATCH 0/5] First cut of prime patches for drm-next Daniel Vetter
2013-08-08 7:10 ` [PATCH 1/5] drm: use common drm_gem_dmabuf_release in i915/exynos drivers Daniel Vetter
2013-08-08 7:10 ` [PATCH 2/5] drm/i915: unpin backing storage in dmabuf_unmap Daniel Vetter
2013-08-08 7:10 ` [PATCH 3/5] drm/i915: explicit store base gem object in dma_buf->priv Daniel Vetter
2013-08-14 21:37 ` Daniel Vetter
2013-08-08 7:10 ` [PATCH 4/5] drm/exynos: " Daniel Vetter
2013-08-08 7:10 ` [PATCH 5/5] drm/prime: remove cargo-cult locking from map_sg helper Daniel Vetter
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox