* [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M]
@ 2011-04-17 16:37 Chris Wilson
2011-04-21 0:21 ` Eric Anholt
0 siblings, 1 reply; 3+ messages in thread
From: Chris Wilson @ 2011-04-17 16:37 UTC (permalink / raw)
To: intel-gfx; +Cc: Daniel Vetter
The 965G (Broadwater) and 965GM (Crestline) chipsets had many errata in
handling pages allocation above 4GiB. So we should be careful never to
allocate and attempt to pass such through to the GPU and so limit
ourselves to GFP_DMA32 on those chipsets.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
---
drivers/gpu/drm/i915/i915_gem.c | 61 ++++++++++++++++++++-------------------
1 files changed, 31 insertions(+), 30 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1949048..d9c6ef2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -59,6 +59,29 @@ static int i915_gem_inactive_shrink(struct shrinker *shrinker,
int nr_to_scan,
gfp_t gfp_mask);
+static int i915_gem_get_gfp(struct drm_device *dev)
+{
+ gfp_t gfp = GFP_HIGHUSER | __GFP_RECLAIMABLE | __GFP_COLD;
+
+ /* The Broadwater/Crestline chipset (965G[M]) has many bugs in
+ * handling pages located above 4GiB, such as returning undefined
+ * results when accessed by the Command Streamer. So prevent us
+ * from ever utilizing high pages on those chipsets.
+ */
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ gfp |= GFP_DMA32;
+
+ return gfp;
+}
+
+static struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj,
+ int page_index,
+ gfp_t gfp)
+{
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ return read_cache_page_gfp(mapping, page_index,
+ i915_gem_get_gfp(obj->base.dev) | gfp);
+}
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -385,7 +408,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -411,8 +433,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -447,7 +468,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_i915_gem_pread *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct page **user_pages;
ssize_t remain;
loff_t offset;
@@ -505,8 +525,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -799,7 +818,6 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
loff_t offset;
char __user *user_data;
@@ -826,8 +844,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -869,7 +886,6 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct page **user_pages;
ssize_t remain;
loff_t first_data_page, last_data_page, offset;
@@ -925,8 +941,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT, 0);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1250,9 +1265,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
mutex_unlock(&dev->struct_mutex);
- page = read_cache_page_gfp(obj->base.filp->f_path.dentry->d_inode->i_mapping,
- page_offset,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, page_offset, 0);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1589,8 +1602,6 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask)
{
int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
struct page *page;
/* Get the list of pages out of our struct file. They'll be pinned
@@ -1602,14 +1613,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->pages == NULL)
return -ENOMEM;
- inode = obj->base.filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = i915_gem_object_get_page(obj, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -4072,7 +4077,6 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
char *vaddr;
int i;
int page_count;
@@ -4083,8 +4087,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = i915_gem_object_get_page(obj, i, 0);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4109,7 +4112,6 @@ i915_gem_attach_phys_object(struct drm_device *dev,
int id,
int align)
{
- struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret = 0;
int page_count;
@@ -4145,8 +4147,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = i915_gem_object_get_page(obj, i, 0);
if (IS_ERR(page))
return PTR_ERR(page);
--
1.7.4.4
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M]
2011-04-17 16:37 [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M] Chris Wilson
@ 2011-04-21 0:21 ` Eric Anholt
2011-04-21 6:03 ` Chris Wilson
0 siblings, 1 reply; 3+ messages in thread
From: Eric Anholt @ 2011-04-21 0:21 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: Daniel Vetter
[-- Attachment #1.1: Type: text/plain, Size: 577 bytes --]
On Sun, 17 Apr 2011 17:37:32 +0100, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> The 965G (Broadwater) and 965GM (Crestline) chipsets had many errata in
> handling pages allocation above 4GiB. So we should be careful never to
> allocate and attempt to pass such through to the GPU and so limit
> ourselves to GFP_DMA32 on those chipsets.
Unfortunate to see this -- were there more particular bugs in >4GB
handling found, or is it just "I'm tired of worrying about it, let's
avoid this class of problem"? I'm not saying "no" to this patch for
either answer, just curious.
[-- Attachment #1.2: Type: application/pgp-signature, Size: 197 bytes --]
[-- Attachment #2: Type: text/plain, Size: 159 bytes --]
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M]
2011-04-21 0:21 ` Eric Anholt
@ 2011-04-21 6:03 ` Chris Wilson
0 siblings, 0 replies; 3+ messages in thread
From: Chris Wilson @ 2011-04-21 6:03 UTC (permalink / raw)
To: Eric Anholt, intel-gfx; +Cc: Daniel Vetter
On Wed, 20 Apr 2011 17:21:03 -0700, Eric Anholt <eric@anholt.net> wrote:
> On Sun, 17 Apr 2011 17:37:32 +0100, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > The 965G (Broadwater) and 965GM (Crestline) chipsets had many errata in
> > handling pages allocation above 4GiB. So we should be careful never to
> > allocate and attempt to pass such through to the GPU and so limit
> > ourselves to GFP_DMA32 on those chipsets.
>
> Unfortunate to see this -- were there more particular bugs in >4GB
> handling found, or is it just "I'm tired of worrying about it, let's
> avoid this class of problem"? I'm not saying "no" to this patch for
> either answer, just curious.
It's an investigatory patch because 965GM is still misbehaving, and those
warnings in the docs scared me.
A solution in search of a problem, i.e. it may help someone and that would
be very useful to know.
-Chris
--
Chris Wilson, Intel Open Source Technology Centre
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2011-04-21 6:03 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-04-17 16:37 [PATCH] drm/i915: Prevent use of pages >4GiB on 965G[M] Chris Wilson
2011-04-21 0:21 ` Eric Anholt
2011-04-21 6:03 ` Chris Wilson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox