* [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-07 18:58 [PATCH 0/5] kill omap_gem_helpers Rob Clark
@ 2013-07-07 18:58 ` Rob Clark
2013-07-08 8:45 ` Patrik Jakobsson
0 siblings, 1 reply; 7+ messages in thread
From: Rob Clark @ 2013-07-07 18:58 UTC (permalink / raw)
To: dri-devel
Basically just extracting some code duplicated in gma500, omapdrm, udl,
and upcoming msm driver.
Signed-off-by: Rob Clark <robdclark@gmail.com>
---
drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
include/drm/drmP.h | 4 +++
2 files changed, 95 insertions(+)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 443eeff..853dea6 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
+
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3cb1672..7ec3fa4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
--
1.8.1.4
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-07 18:58 ` [PATCH 2/5] drm/gem: add shmem get/put page helpers Rob Clark
@ 2013-07-08 8:45 ` Patrik Jakobsson
2013-07-08 18:56 ` Rob Clark
0 siblings, 1 reply; 7+ messages in thread
From: Patrik Jakobsson @ 2013-07-08 8:45 UTC (permalink / raw)
To: Rob Clark; +Cc: dri-devel
On Sun, Jul 7, 2013 at 8:58 PM, Rob Clark <robdclark@gmail.com> wrote:
> Basically just extracting some code duplicated in gma500, omapdrm, udl,
> and upcoming msm driver.
>
> Signed-off-by: Rob Clark <robdclark@gmail.com>
> ---
> drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
> include/drm/drmP.h | 4 +++
> 2 files changed, 95 insertions(+)
>
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 443eeff..853dea6 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
> }
> EXPORT_SYMBOL(drm_gem_create_mmap_offset);
>
> +/**
> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
> + * from shmem
> + * @obj: obj in question
> + * @gfpmask: gfp mask of requested pages
> + */
> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
> +{
> + struct inode *inode;
> + struct address_space *mapping;
> + struct page *p, **pages;
> + int i, npages;
> +
> + /* This is the shared memory object that backs the GEM resource */
> + inode = file_inode(obj->filp);
> + mapping = inode->i_mapping;
> +
> + npages = obj->size >> PAGE_SHIFT;
Theoretical issue, but what if obj->size is not page aligned? Perhaps put a
roundup(obj->size, PAGE_SIZE) here?
> +
> + pages = drm_malloc_ab(npages, sizeof(struct page *));
> + if (pages == NULL)
> + return ERR_PTR(-ENOMEM);
> +
> + gfpmask |= mapping_gfp_mask(mapping);
> +
> + for (i = 0; i < npages; i++) {
> + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
> + if (IS_ERR(p))
> + goto fail;
> + pages[i] = p;
> +
> + /* There is a hypothetical issue w/ drivers that require
> + * buffer memory in the low 4GB.. if the pages are un-
> + * pinned, and swapped out, they can end up swapped back
> + * in above 4GB. If pages are already in memory, then
> + * shmem_read_mapping_page_gfp will ignore the gfpmask,
> + * even if the already in-memory page disobeys the mask.
> + *
> + * It is only a theoretical issue today, because none of
> + * the devices with this limitation can be populated with
> + * enough memory to trigger the issue. But this BUG_ON()
> + * is here as a reminder in case the problem with
> + * shmem_read_mapping_page_gfp() isn't solved by the time
> + * it does become a real issue.
> + *
> + * See this thread: http://lkml.org/lkml/2011/7/11/238
> + */
> + BUG_ON((gfpmask & __GFP_DMA32) &&
> + (page_to_pfn(p) >= 0x00100000UL));
> + }
> +
> + return pages;
> +
> +fail:
> + while (i--)
> + page_cache_release(pages[i]);
> +
> + drm_free_large(pages);
> + return ERR_CAST(p);
> +}
> +EXPORT_SYMBOL(drm_gem_get_pages);
> +
> +/**
> + * drm_gem_put_pages - helper to free backing pages for a GEM object
> + * @obj: obj in question
> + * @pages: pages to free
> + * @dirty: if true, pages will be marked as dirty
> + * @accessed: if true, the pages will be marked as accessed
> + */
> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> + bool dirty, bool accessed)
> +{
> + int i, npages;
> +
> + npages = obj->size >> PAGE_SHIFT;
Same thing here.
> +
> + for (i = 0; i < npages; i++) {
> + if (dirty)
> + set_page_dirty(pages[i]);
> +
> + if (accessed)
> + mark_page_accessed(pages[i]);
> +
> + /* Undo the reference we took when populating the table */
> + page_cache_release(pages[i]);
> + }
> +
> + drm_free_large(pages);
> +}
> +EXPORT_SYMBOL(drm_gem_put_pages);
> +
> /** Returns a reference to the object named by the handle. */
> struct drm_gem_object *
> drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
> index 3cb1672..7ec3fa4 100644
> --- a/include/drm/drmP.h
> +++ b/include/drm/drmP.h
> @@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
> int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
> int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
>
> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> + bool dirty, bool accessed);
> +
> struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
> struct drm_file *filp,
> u32 handle);
> --
> 1.8.1.4
Looks good otherwise, so for all 5 patches:
Reviewed-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-08 8:45 ` Patrik Jakobsson
@ 2013-07-08 18:56 ` Rob Clark
2013-07-08 20:18 ` Daniel Vetter
0 siblings, 1 reply; 7+ messages in thread
From: Rob Clark @ 2013-07-08 18:56 UTC (permalink / raw)
To: Patrik Jakobsson; +Cc: dri-devel
On Mon, Jul 8, 2013 at 4:45 AM, Patrik Jakobsson
<patrik.r.jakobsson@gmail.com> wrote:
> On Sun, Jul 7, 2013 at 8:58 PM, Rob Clark <robdclark@gmail.com> wrote:
>> Basically just extracting some code duplicated in gma500, omapdrm, udl,
>> and upcoming msm driver.
>>
>> Signed-off-by: Rob Clark <robdclark@gmail.com>
>> ---
>> drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
>> include/drm/drmP.h | 4 +++
>> 2 files changed, 95 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> index 443eeff..853dea6 100644
>> --- a/drivers/gpu/drm/drm_gem.c
>> +++ b/drivers/gpu/drm/drm_gem.c
>> @@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
>> }
>> EXPORT_SYMBOL(drm_gem_create_mmap_offset);
>>
>> +/**
>> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>> + * from shmem
>> + * @obj: obj in question
>> + * @gfpmask: gfp mask of requested pages
>> + */
>> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>> +{
>> + struct inode *inode;
>> + struct address_space *mapping;
>> + struct page *p, **pages;
>> + int i, npages;
>> +
>> + /* This is the shared memory object that backs the GEM resource */
>> + inode = file_inode(obj->filp);
>> + mapping = inode->i_mapping;
>> +
>> + npages = obj->size >> PAGE_SHIFT;
>
> Theoretical issue, but what if obj->size is not page aligned? Perhaps put a
> roundup(obj->size, PAGE_SIZE) here?
so, drm_gem_object_init() does have:
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
so I was kinda assuming that we can count on the size already being
aligned. But I guess in case someone somehow bypasses
drm_gem_object_init() it wouldn't hurt to round up the size..
BR,
-R
>> +
>> + pages = drm_malloc_ab(npages, sizeof(struct page *));
>> + if (pages == NULL)
>> + return ERR_PTR(-ENOMEM);
>> +
>> + gfpmask |= mapping_gfp_mask(mapping);
>> +
>> + for (i = 0; i < npages; i++) {
>> + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
>> + if (IS_ERR(p))
>> + goto fail;
>> + pages[i] = p;
>> +
>> + /* There is a hypothetical issue w/ drivers that require
>> + * buffer memory in the low 4GB.. if the pages are un-
>> + * pinned, and swapped out, they can end up swapped back
>> + * in above 4GB. If pages are already in memory, then
>> + * shmem_read_mapping_page_gfp will ignore the gfpmask,
>> + * even if the already in-memory page disobeys the mask.
>> + *
>> + * It is only a theoretical issue today, because none of
>> + * the devices with this limitation can be populated with
>> + * enough memory to trigger the issue. But this BUG_ON()
>> + * is here as a reminder in case the problem with
>> + * shmem_read_mapping_page_gfp() isn't solved by the time
>> + * it does become a real issue.
>> + *
>> + * See this thread: http://lkml.org/lkml/2011/7/11/238
>> + */
>> + BUG_ON((gfpmask & __GFP_DMA32) &&
>> + (page_to_pfn(p) >= 0x00100000UL));
>> + }
>> +
>> + return pages;
>> +
>> +fail:
>> + while (i--)
>> + page_cache_release(pages[i]);
>> +
>> + drm_free_large(pages);
>> + return ERR_CAST(p);
>> +}
>> +EXPORT_SYMBOL(drm_gem_get_pages);
>> +
>> +/**
>> + * drm_gem_put_pages - helper to free backing pages for a GEM object
>> + * @obj: obj in question
>> + * @pages: pages to free
>> + * @dirty: if true, pages will be marked as dirty
>> + * @accessed: if true, the pages will be marked as accessed
>> + */
>> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
>> + bool dirty, bool accessed)
>> +{
>> + int i, npages;
>> +
>> + npages = obj->size >> PAGE_SHIFT;
>
> Same thing here.
>
>> +
>> + for (i = 0; i < npages; i++) {
>> + if (dirty)
>> + set_page_dirty(pages[i]);
>> +
>> + if (accessed)
>> + mark_page_accessed(pages[i]);
>> +
>> + /* Undo the reference we took when populating the table */
>> + page_cache_release(pages[i]);
>> + }
>> +
>> + drm_free_large(pages);
>> +}
>> +EXPORT_SYMBOL(drm_gem_put_pages);
>> +
>> /** Returns a reference to the object named by the handle. */
>> struct drm_gem_object *
>> drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
>> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
>> index 3cb1672..7ec3fa4 100644
>> --- a/include/drm/drmP.h
>> +++ b/include/drm/drmP.h
>> @@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
>> int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
>> int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
>>
>> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
>> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
>> + bool dirty, bool accessed);
>> +
>> struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
>> struct drm_file *filp,
>> u32 handle);
>> --
>> 1.8.1.4
>
> Looks good otherwise, so for all 5 patches:
> Reviewed-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-08 18:56 ` Rob Clark
@ 2013-07-08 20:18 ` Daniel Vetter
2013-07-08 23:07 ` Rob Clark
0 siblings, 1 reply; 7+ messages in thread
From: Daniel Vetter @ 2013-07-08 20:18 UTC (permalink / raw)
To: Rob Clark; +Cc: dri-devel
On Mon, Jul 08, 2013 at 02:56:31PM -0400, Rob Clark wrote:
> On Mon, Jul 8, 2013 at 4:45 AM, Patrik Jakobsson
> <patrik.r.jakobsson@gmail.com> wrote:
> > On Sun, Jul 7, 2013 at 8:58 PM, Rob Clark <robdclark@gmail.com> wrote:
> >> Basically just extracting some code duplicated in gma500, omapdrm, udl,
> >> and upcoming msm driver.
> >>
> >> Signed-off-by: Rob Clark <robdclark@gmail.com>
> >> ---
> >> drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
> >> include/drm/drmP.h | 4 +++
> >> 2 files changed, 95 insertions(+)
> >>
> >> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> >> index 443eeff..853dea6 100644
> >> --- a/drivers/gpu/drm/drm_gem.c
> >> +++ b/drivers/gpu/drm/drm_gem.c
> >> @@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
> >> }
> >> EXPORT_SYMBOL(drm_gem_create_mmap_offset);
> >>
> >> +/**
> >> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
> >> + * from shmem
> >> + * @obj: obj in question
> >> + * @gfpmask: gfp mask of requested pages
> >> + */
> >> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
> >> +{
> >> + struct inode *inode;
> >> + struct address_space *mapping;
> >> + struct page *p, **pages;
> >> + int i, npages;
> >> +
> >> + /* This is the shared memory object that backs the GEM resource */
> >> + inode = file_inode(obj->filp);
> >> + mapping = inode->i_mapping;
> >> +
> >> + npages = obj->size >> PAGE_SHIFT;
> >
> > Theoretical issue, but what if obj->size is not page aligned? Perhaps put a
> > roundup(obj->size, PAGE_SIZE) here?
>
> so, drm_gem_object_init() does have:
>
> BUG_ON((size & (PAGE_SIZE - 1)) != 0);
>
> so I was kinda assuming that we can count on the size already being
> aligned. But I guess in case someone somehow bypasses
> drm_gem_object_init() it wouldn't hurt to round up the size..
Would look funny to me to allow it in one place and not in another one.
Maybe just throw a new WARN_ON in here (WARN since it's not fatal)?
-Daniel
>
> BR,
> -R
>
> >> +
> >> + pages = drm_malloc_ab(npages, sizeof(struct page *));
> >> + if (pages == NULL)
> >> + return ERR_PTR(-ENOMEM);
> >> +
> >> + gfpmask |= mapping_gfp_mask(mapping);
> >> +
> >> + for (i = 0; i < npages; i++) {
> >> + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
> >> + if (IS_ERR(p))
> >> + goto fail;
> >> + pages[i] = p;
> >> +
> >> + /* There is a hypothetical issue w/ drivers that require
> >> + * buffer memory in the low 4GB.. if the pages are un-
> >> + * pinned, and swapped out, they can end up swapped back
> >> + * in above 4GB. If pages are already in memory, then
> >> + * shmem_read_mapping_page_gfp will ignore the gfpmask,
> >> + * even if the already in-memory page disobeys the mask.
> >> + *
> >> + * It is only a theoretical issue today, because none of
> >> + * the devices with this limitation can be populated with
> >> + * enough memory to trigger the issue. But this BUG_ON()
> >> + * is here as a reminder in case the problem with
> >> + * shmem_read_mapping_page_gfp() isn't solved by the time
> >> + * it does become a real issue.
> >> + *
> >> + * See this thread: http://lkml.org/lkml/2011/7/11/238
> >> + */
> >> + BUG_ON((gfpmask & __GFP_DMA32) &&
> >> + (page_to_pfn(p) >= 0x00100000UL));
> >> + }
> >> +
> >> + return pages;
> >> +
> >> +fail:
> >> + while (i--)
> >> + page_cache_release(pages[i]);
> >> +
> >> + drm_free_large(pages);
> >> + return ERR_CAST(p);
> >> +}
> >> +EXPORT_SYMBOL(drm_gem_get_pages);
> >> +
> >> +/**
> >> + * drm_gem_put_pages - helper to free backing pages for a GEM object
> >> + * @obj: obj in question
> >> + * @pages: pages to free
> >> + * @dirty: if true, pages will be marked as dirty
> >> + * @accessed: if true, the pages will be marked as accessed
> >> + */
> >> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> >> + bool dirty, bool accessed)
> >> +{
> >> + int i, npages;
> >> +
> >> + npages = obj->size >> PAGE_SHIFT;
> >
> > Same thing here.
> >
> >> +
> >> + for (i = 0; i < npages; i++) {
> >> + if (dirty)
> >> + set_page_dirty(pages[i]);
> >> +
> >> + if (accessed)
> >> + mark_page_accessed(pages[i]);
> >> +
> >> + /* Undo the reference we took when populating the table */
> >> + page_cache_release(pages[i]);
> >> + }
> >> +
> >> + drm_free_large(pages);
> >> +}
> >> +EXPORT_SYMBOL(drm_gem_put_pages);
> >> +
> >> /** Returns a reference to the object named by the handle. */
> >> struct drm_gem_object *
> >> drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
> >> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
> >> index 3cb1672..7ec3fa4 100644
> >> --- a/include/drm/drmP.h
> >> +++ b/include/drm/drmP.h
> >> @@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
> >> int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
> >> int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
> >>
> >> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
> >> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> >> + bool dirty, bool accessed);
> >> +
> >> struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
> >> struct drm_file *filp,
> >> u32 handle);
> >> --
> >> 1.8.1.4
> >
> > Looks good otherwise, so for all 5 patches:
> > Reviewed-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-08 20:18 ` Daniel Vetter
@ 2013-07-08 23:07 ` Rob Clark
2013-07-09 9:03 ` Patrik Jakobsson
0 siblings, 1 reply; 7+ messages in thread
From: Rob Clark @ 2013-07-08 23:07 UTC (permalink / raw)
To: Daniel Vetter; +Cc: dri-devel
On Mon, Jul 8, 2013 at 4:18 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Mon, Jul 08, 2013 at 02:56:31PM -0400, Rob Clark wrote:
>> On Mon, Jul 8, 2013 at 4:45 AM, Patrik Jakobsson
>> <patrik.r.jakobsson@gmail.com> wrote:
>> > On Sun, Jul 7, 2013 at 8:58 PM, Rob Clark <robdclark@gmail.com> wrote:
>> >> Basically just extracting some code duplicated in gma500, omapdrm, udl,
>> >> and upcoming msm driver.
>> >>
>> >> Signed-off-by: Rob Clark <robdclark@gmail.com>
>> >> ---
>> >> drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
>> >> include/drm/drmP.h | 4 +++
>> >> 2 files changed, 95 insertions(+)
>> >>
>> >> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>> >> index 443eeff..853dea6 100644
>> >> --- a/drivers/gpu/drm/drm_gem.c
>> >> +++ b/drivers/gpu/drm/drm_gem.c
>> >> @@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
>> >> }
>> >> EXPORT_SYMBOL(drm_gem_create_mmap_offset);
>> >>
>> >> +/**
>> >> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>> >> + * from shmem
>> >> + * @obj: obj in question
>> >> + * @gfpmask: gfp mask of requested pages
>> >> + */
>> >> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>> >> +{
>> >> + struct inode *inode;
>> >> + struct address_space *mapping;
>> >> + struct page *p, **pages;
>> >> + int i, npages;
>> >> +
>> >> + /* This is the shared memory object that backs the GEM resource */
>> >> + inode = file_inode(obj->filp);
>> >> + mapping = inode->i_mapping;
>> >> +
>> >> + npages = obj->size >> PAGE_SHIFT;
>> >
>> > Theoretical issue, but what if obj->size is not page aligned? Perhaps put a
>> > roundup(obj->size, PAGE_SIZE) here?
>>
>> so, drm_gem_object_init() does have:
>>
>> BUG_ON((size & (PAGE_SIZE - 1)) != 0);
>>
>> so I was kinda assuming that we can count on the size already being
>> aligned. But I guess in case someone somehow bypasses
>> drm_gem_object_init() it wouldn't hurt to round up the size..
>
> Would look funny to me to allow it in one place and not in another one.
> Maybe just throw a new WARN_ON in here (WARN since it's not fatal)?
> -Daniel
sounds good, I'll toss in a WARN_ON()
BR,
-R
>>
>> BR,
>> -R
>>
>> >> +
>> >> + pages = drm_malloc_ab(npages, sizeof(struct page *));
>> >> + if (pages == NULL)
>> >> + return ERR_PTR(-ENOMEM);
>> >> +
>> >> + gfpmask |= mapping_gfp_mask(mapping);
>> >> +
>> >> + for (i = 0; i < npages; i++) {
>> >> + p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
>> >> + if (IS_ERR(p))
>> >> + goto fail;
>> >> + pages[i] = p;
>> >> +
>> >> + /* There is a hypothetical issue w/ drivers that require
>> >> + * buffer memory in the low 4GB.. if the pages are un-
>> >> + * pinned, and swapped out, they can end up swapped back
>> >> + * in above 4GB. If pages are already in memory, then
>> >> + * shmem_read_mapping_page_gfp will ignore the gfpmask,
>> >> + * even if the already in-memory page disobeys the mask.
>> >> + *
>> >> + * It is only a theoretical issue today, because none of
>> >> + * the devices with this limitation can be populated with
>> >> + * enough memory to trigger the issue. But this BUG_ON()
>> >> + * is here as a reminder in case the problem with
>> >> + * shmem_read_mapping_page_gfp() isn't solved by the time
>> >> + * it does become a real issue.
>> >> + *
>> >> + * See this thread: http://lkml.org/lkml/2011/7/11/238
>> >> + */
>> >> + BUG_ON((gfpmask & __GFP_DMA32) &&
>> >> + (page_to_pfn(p) >= 0x00100000UL));
>> >> + }
>> >> +
>> >> + return pages;
>> >> +
>> >> +fail:
>> >> + while (i--)
>> >> + page_cache_release(pages[i]);
>> >> +
>> >> + drm_free_large(pages);
>> >> + return ERR_CAST(p);
>> >> +}
>> >> +EXPORT_SYMBOL(drm_gem_get_pages);
>> >> +
>> >> +/**
>> >> + * drm_gem_put_pages - helper to free backing pages for a GEM object
>> >> + * @obj: obj in question
>> >> + * @pages: pages to free
>> >> + * @dirty: if true, pages will be marked as dirty
>> >> + * @accessed: if true, the pages will be marked as accessed
>> >> + */
>> >> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
>> >> + bool dirty, bool accessed)
>> >> +{
>> >> + int i, npages;
>> >> +
>> >> + npages = obj->size >> PAGE_SHIFT;
>> >
>> > Same thing here.
>> >
>> >> +
>> >> + for (i = 0; i < npages; i++) {
>> >> + if (dirty)
>> >> + set_page_dirty(pages[i]);
>> >> +
>> >> + if (accessed)
>> >> + mark_page_accessed(pages[i]);
>> >> +
>> >> + /* Undo the reference we took when populating the table */
>> >> + page_cache_release(pages[i]);
>> >> + }
>> >> +
>> >> + drm_free_large(pages);
>> >> +}
>> >> +EXPORT_SYMBOL(drm_gem_put_pages);
>> >> +
>> >> /** Returns a reference to the object named by the handle. */
>> >> struct drm_gem_object *
>> >> drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
>> >> diff --git a/include/drm/drmP.h b/include/drm/drmP.h
>> >> index 3cb1672..7ec3fa4 100644
>> >> --- a/include/drm/drmP.h
>> >> +++ b/include/drm/drmP.h
>> >> @@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
>> >> int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
>> >> int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
>> >>
>> >> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
>> >> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
>> >> + bool dirty, bool accessed);
>> >> +
>> >> struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
>> >> struct drm_file *filp,
>> >> u32 handle);
>> >> --
>> >> 1.8.1.4
>> >
>> > Looks good otherwise, so for all 5 patches:
>> > Reviewed-by: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
>> _______________________________________________
>> dri-devel mailing list
>> dri-devel@lists.freedesktop.org
>> http://lists.freedesktop.org/mailman/listinfo/dri-devel
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5] drm/gem: add shmem get/put page helpers
2013-07-08 23:07 ` Rob Clark
@ 2013-07-09 9:03 ` Patrik Jakobsson
0 siblings, 0 replies; 7+ messages in thread
From: Patrik Jakobsson @ 2013-07-09 9:03 UTC (permalink / raw)
To: Rob Clark; +Cc: dri-devel
On Tue, Jul 9, 2013 at 1:07 AM, Rob Clark <robdclark@gmail.com> wrote:
> On Mon, Jul 8, 2013 at 4:18 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Mon, Jul 08, 2013 at 02:56:31PM -0400, Rob Clark wrote:
>>> On Mon, Jul 8, 2013 at 4:45 AM, Patrik Jakobsson
>>> <patrik.r.jakobsson@gmail.com> wrote:
>>> > On Sun, Jul 7, 2013 at 8:58 PM, Rob Clark <robdclark@gmail.com> wrote:
>>> >> Basically just extracting some code duplicated in gma500, omapdrm, udl,
>>> >> and upcoming msm driver.
>>> >>
>>> >> Signed-off-by: Rob Clark <robdclark@gmail.com>
>>> >> ---
>>> >> drivers/gpu/drm/drm_gem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++++
>>> >> include/drm/drmP.h | 4 +++
>>> >> 2 files changed, 95 insertions(+)
>>> >>
>>> >> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
>>> >> index 443eeff..853dea6 100644
>>> >> --- a/drivers/gpu/drm/drm_gem.c
>>> >> +++ b/drivers/gpu/drm/drm_gem.c
>>> >> @@ -406,6 +406,97 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
>>> >> }
>>> >> EXPORT_SYMBOL(drm_gem_create_mmap_offset);
>>> >>
>>> >> +/**
>>> >> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>>> >> + * from shmem
>>> >> + * @obj: obj in question
>>> >> + * @gfpmask: gfp mask of requested pages
>>> >> + */
>>> >> +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>>> >> +{
>>> >> + struct inode *inode;
>>> >> + struct address_space *mapping;
>>> >> + struct page *p, **pages;
>>> >> + int i, npages;
>>> >> +
>>> >> + /* This is the shared memory object that backs the GEM resource */
>>> >> + inode = file_inode(obj->filp);
>>> >> + mapping = inode->i_mapping;
>>> >> +
>>> >> + npages = obj->size >> PAGE_SHIFT;
>>> >
>>> > Theoretical issue, but what if obj->size is not page aligned? Perhaps put a
>>> > roundup(obj->size, PAGE_SIZE) here?
>>>
>>> so, drm_gem_object_init() does have:
>>>
>>> BUG_ON((size & (PAGE_SIZE - 1)) != 0);
>>>
>>> so I was kinda assuming that we can count on the size already being
>>> aligned. But I guess in case someone somehow bypasses
>>> drm_gem_object_init() it wouldn't hurt to round up the size..
>>
>> Would look funny to me to allow it in one place and not in another one.
>> Maybe just throw a new WARN_ON in here (WARN since it's not fatal)?
>> -Daniel
>
> sounds good, I'll toss in a WARN_ON()
Yes, sounds good.
Patrik
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/5] drm/gem: add shmem get/put page helpers
@ 2013-07-13 22:39 Rob Clark
0 siblings, 0 replies; 7+ messages in thread
From: Rob Clark @ 2013-07-13 22:39 UTC (permalink / raw)
To: dri-devel
Basically just extracting some code duplicated in gma500, omapdrm, udl,
and upcoming msm driver.
Signed-off-by: Rob Clark <robdclark@gmail.com>
CC: Daniel Vetter <daniel@ffwll.ch>
CC: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
---
v1: original
v2: add WARN_ON()'s for non-page-aligned sizes as suggested by Patrik
Jakobsson and Daniel Vetter
drivers/gpu/drm/drm_gem.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++
include/drm/drmP.h | 4 ++
2 files changed, 107 insertions(+)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 7995466..bf299b3 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -408,6 +408,109 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
+
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
+
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fefbbda..853557a 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1730,6 +1730,10 @@ void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
u32 handle);
--
1.8.3.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
end of thread, other threads:[~2013-07-13 22:39 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-07-13 22:39 [PATCH 2/5] drm/gem: add shmem get/put page helpers Rob Clark
-- strict thread matches above, loose matches on Subject: below --
2013-07-07 18:58 [PATCH 0/5] kill omap_gem_helpers Rob Clark
2013-07-07 18:58 ` [PATCH 2/5] drm/gem: add shmem get/put page helpers Rob Clark
2013-07-08 8:45 ` Patrik Jakobsson
2013-07-08 18:56 ` Rob Clark
2013-07-08 20:18 ` Daniel Vetter
2013-07-08 23:07 ` Rob Clark
2013-07-09 9:03 ` Patrik Jakobsson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).