* [PATCH 2/6] drm/i915: Getter/setter for object attributes
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
@ 2013-07-03 21:45 ` Ben Widawsky
2013-07-03 22:55 ` Daniel Vetter
2013-07-03 21:45 ` [PATCH 3/6] drm/i915: Kill obj->gtt_offset Ben Widawsky
` (6 subsequent siblings)
7 siblings, 1 reply; 19+ messages in thread
From: Ben Widawsky @ 2013-07-03 21:45 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky
Soon we want to gut a lot of our existing assumptions how many address
spaces an object can live in, and in doing so, embed the drm_mm_node in
the object (and later the VMA).
It's possible in the future we'll want to add more getter/setter
methods, but for now this is enough to enable the VMAs.
v2: Reworked commit message (Ben)
Added comments to the main functions (Ben)
sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch]
sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch]
(Daniel)
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/i915/i915_debugfs.c | 26 ++++----
drivers/gpu/drm/i915/i915_drv.h | 35 +++++++++-
drivers/gpu/drm/i915/i915_gem.c | 101 +++++++++++++++--------------
drivers/gpu/drm/i915/i915_gem_context.c | 2 +-
drivers/gpu/drm/i915/i915_gem_evict.c | 6 +-
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 +++---
drivers/gpu/drm/i915/i915_gem_gtt.c | 24 +++----
drivers/gpu/drm/i915/i915_gem_stolen.c | 10 +--
drivers/gpu/drm/i915/i915_gem_tiling.c | 14 ++--
drivers/gpu/drm/i915/i915_irq.c | 15 ++---
drivers/gpu/drm/i915/i915_trace.h | 8 +--
drivers/gpu/drm/i915/intel_display.c | 28 ++++----
drivers/gpu/drm/i915/intel_fb.c | 8 +--
drivers/gpu/drm/i915/intel_overlay.c | 14 ++--
drivers/gpu/drm/i915/intel_pm.c | 8 +--
drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++--
drivers/gpu/drm/i915/intel_sprite.c | 8 ++-
17 files changed, 188 insertions(+), 150 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3e36756..396387e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -122,9 +122,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (i915_gem_obj_ggtt_bound(obj))
+ seq_printf(m, " (gtt offset: %08lx, size: %08x)",
+ i915_gem_obj_ggtt_offset(obj), (unsigned int)i915_gem_obj_ggtt_size(obj));
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -175,7 +175,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -187,10 +187,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space->size; \
+ size += i915_gem_obj_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space->size; \
+ mappable_size += i915_gem_obj_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -209,7 +209,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
@@ -267,11 +267,11 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
- size += obj->gtt_space->size;
+ size += i915_gem_obj_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += obj->gtt_space->size;
+ mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -333,7 +333,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
@@ -379,12 +379,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fd0f589..496ed3a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
/** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ struct drm_mm_node *ggtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
@@ -1333,7 +1333,7 @@ struct drm_i915_gem_object {
*
* This is the same as gtt_space->start
*/
- uint32_t gtt_offset;
+ uint32_t ggtt_offset;
struct intel_ring_buffer *ring;
@@ -1360,6 +1360,37 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+/* Offset of the first PTE pointing to this object */
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
+{
+ return o->ggtt_space->start;
+}
+
+/* Whether or not this object is currently mapped by the translation tables */
+static inline bool
+i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
+{
+ return o->ggtt_space != NULL;
+}
+
+/* The size used in the translation tables may be larger than the actual size of
+ * the object on GEN2/GEN3 because of the way tiling is handled. See
+ * i915_gem_get_gtt_size() for more details.
+ */
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
+{
+ return o->ggtt_space->size;
+}
+
+static inline void
+i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
+ enum i915_cache_level color)
+{
+ o->ggtt_space->color = color;
+}
+
/**
* Request queue structure.
*
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4200c32..edd5b6d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active;
+ return i915_gem_obj_ggtt_bound(obj) && !obj->active;
}
int
@@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
- pinned += obj->gtt_space->size;
+ pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.total;
@@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = obj->gtt_offset + args->offset;
+ offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
@@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
- page_offset;
+ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn >>= PAGE_SHIFT;
+ pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(obj->gtt_space);
+ BUG_ON(i915_gem_obj_ggtt_bound(obj));
if (obj->pages_pin_count)
return -EBUSY;
@@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
{
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return true;
return false;
@@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
if (ring->hangcheck.action != wait &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
request->batch_obj ?
- request->batch_obj->gtt_offset : 0,
+ i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2581,7 +2582,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_ggtt_bound(obj))
return 0;
if (obj->pin_count)
@@ -2620,9 +2621,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- obj->gtt_offset = 0;
+ drm_mm_put_block(obj->ggtt_space);
+ obj->ggtt_space = NULL;
+ obj->ggtt_offset = 0;
return 0;
}
@@ -2664,11 +2665,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
}
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2688,15 +2689,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2707,7 +2708,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2732,19 +2733,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
- WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -3033,8 +3034,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
@@ -3045,8 +3046,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
@@ -3151,15 +3152,15 @@ search_free:
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->gtt_space = node;
- obj->gtt_offset = node->start;
+ obj->ggtt_space = node;
+ obj->ggtt_offset = node->start;
fenceable =
node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0;
- mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+ mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
+ dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3261,7 +3262,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_ggtt_bound(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3320,13 +3321,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3349,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
- obj->gtt_space->color = cache_level;
+ i915_gem_obj_ggtt_set_color(obj, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3630,14 +3631,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (obj->gtt_space != NULL) {
- if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ if (i915_gem_obj_ggtt_bound(obj)) {
+ if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- obj->gtt_offset, alignment,
+ i915_gem_obj_ggtt_offset(obj), alignment,
map_and_fenceable,
obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
@@ -3646,7 +3647,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
}
- if (obj->gtt_space == NULL) {
+ if (!i915_gem_obj_ggtt_bound(obj)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
ret = i915_gem_object_bind_to_gtt(obj, alignment,
@@ -3672,7 +3673,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(obj->gtt_space == NULL);
+ BUG_ON(!i915_gem_obj_ggtt_bound(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3722,7 +3723,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj->gtt_offset;
+ args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a21..2074544 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, new_context->obj->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9..5bbdea4 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
return false;
list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(obj->gtt_space);
+ return drm_mm_scan_add_block(obj->ggtt_space);
}
int
@@ -107,7 +107,7 @@ none:
struct drm_i915_gem_object,
exec_list);
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ ret = drm_mm_scan_remove_block(obj->ggtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
@@ -127,7 +127,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ if (drm_mm_scan_remove_block(obj->ggtt_space)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227..5aeb447 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = target_i915_obj->gtt_offset;
+ target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
+ reloc->offset += i915_gem_obj_ggtt_offset(obj);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
@@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != obj->gtt_offset) {
- entry->offset = obj->gtt_offset;
+ if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
+ entry->offset = i915_gem_obj_ggtt_offset(obj);
*need_reloc = true;
}
@@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_ggtt_bound(obj))
return;
entry = obj->exec_entry;
@@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_ggtt_bound(obj))
continue;
need_fence =
@@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ if ((entry->alignment &&
+ i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
@@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
+ if (i915_gem_obj_ggtt_bound(obj))
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
@@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5c6fc0e..1eefba7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
enum i915_cache_level cache_level)
{
ppgtt->insert_entries(ppgtt, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level);
}
@@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
ppgtt->clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
@@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
cache_level);
obj->has_global_gtt_mapping = 1;
@@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
@@ -630,22 +630,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
int ret;
- DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
- obj->gtt_offset, obj->base.size);
+ DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+ i915_gem_obj_ggtt_offset(obj), obj->base.size);
- BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
- if (!obj->gtt_space) {
+ BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
+ obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
+ if (!obj->ggtt_space) {
DRM_ERROR("Failed to preserve all objects\n");
break;
}
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_space,
- obj->gtt_offset,
+ obj->ggtt_space,
+ i915_gem_obj_ggtt_offset(obj),
obj->base.size);
if (ret) {
DRM_DEBUG_KMS("Reservation failed\n");
- kfree(obj->gtt_space);
+ kfree(obj->ggtt_space);
}
obj->has_global_gtt_mapping = 1;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f9db84a..cf0d0e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -374,23 +374,23 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
- if (!obj->gtt_space) {
+ obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
+ if (!obj->ggtt_space) {
DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
goto unref_out;
}
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_space,
+ obj->ggtt_space,
gtt_offset, size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto unref_out;
}
} else
- obj->gtt_space = I915_GTT_RESERVED;
+ obj->ggtt_space = I915_GTT_RESERVED;
- obj->gtt_offset = gtt_offset;
+ obj->ggtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545b..92a8d27 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (obj->gtt_space->size != size)
+ if (i915_gem_obj_ggtt_size(obj) != size)
return false;
- if (obj->gtt_offset & (size - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
obj->map_and_fenceable =
- obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
@@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (obj->gtt_offset & (unfenced_alignment - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4c1b1e3..d2cf26f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1516,7 +1516,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL)
return NULL;
- reloc_offset = src->gtt_offset;
+ reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -1568,7 +1568,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
reloc_offset += PAGE_SIZE;
}
dst->page_count = num_pages;
- dst->gtt_offset = src->gtt_offset;
return dst;
@@ -1622,7 +1621,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
- err->gtt_offset = obj->gtt_offset;
+ err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
@@ -1720,8 +1719,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
return NULL;
obj = ring->private;
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
@@ -1802,7 +1801,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
return;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
break;
@@ -2156,10 +2155,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- obj->gtt_offset;
+ i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a68..7d283b5 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = i915_gem_obj_ggtt_offset(obj);
+ __entry->size = i915_gem_obj_ggtt_size(obj);
__entry->mappable = mappable;
),
@@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->offset = i915_gem_obj_ggtt_offset(obj);
+ __entry->size = i915_gem_obj_ggtt_size(obj);
),
TP_printk("obj=%p, offset=%08x size=%x",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 6b0013c..f7cacc0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_crtc->dspaddr_offset = linear_offset;
}
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -6566,7 +6568,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = obj->gtt_offset;
+ addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -7338,7 +7340,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7379,7 +7381,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7419,7 +7421,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7462,7 +7464,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7527,7 +7529,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e..f3c97e0 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- obj->gtt_offset, obj);
+ i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a369881..81c3ca1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U);
- iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->gtt_offset;
+ overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = overlay->reg_bo->gtt_offset;
+ error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 5b4ade6..d06648d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -3707,7 +3707,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3730,7 +3730,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e51ab55..54495df 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == obj->gtt_offset &&
+ I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
if (ret)
goto err_unref;
- pc->gtt_offset = obj->gtt_offset;
+ pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) {
ret = -ENOMEM;
@@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = obj->gtt_offset;
+ u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err_unref;
}
- ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612..55bdf70 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH 2/6] drm/i915: Getter/setter for object attributes
2013-07-03 21:45 ` [PATCH 2/6] drm/i915: Getter/setter for object attributes Ben Widawsky
@ 2013-07-03 22:55 ` Daniel Vetter
2013-07-04 1:07 ` Ben Widawsky
0 siblings, 1 reply; 19+ messages in thread
From: Daniel Vetter @ 2013-07-03 22:55 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX
On Wed, Jul 03, 2013 at 02:45:22PM -0700, Ben Widawsky wrote:
> Soon we want to gut a lot of our existing assumptions how many address
> spaces an object can live in, and in doing so, embed the drm_mm_node in
> the object (and later the VMA).
>
> It's possible in the future we'll want to add more getter/setter
> methods, but for now this is enough to enable the VMAs.
>
> v2: Reworked commit message (Ben)
> Added comments to the main functions (Ben)
> sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch]
> (Daniel)
>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[snip]
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index fd0f589..496ed3a 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
> const struct drm_i915_gem_object_ops *ops;
>
> /** Current space allocated to this object in the GTT, if any. */
> - struct drm_mm_node *gtt_space;
> + struct drm_mm_node *ggtt_space;
Is this ...
> /** Stolen memory for this object, instead of being backed by shmem. */
> struct drm_mm_node *stolen;
> struct list_head global_list;
> @@ -1333,7 +1333,7 @@ struct drm_i915_gem_object {
> *
> * This is the same as gtt_space->start
> */
> - uint32_t gtt_offset;
> + uint32_t ggtt_offset;
... and this intentional? Feels a bit like needless churn if we move the
entire thing into the vma rsn anyway. I can bikeshed while applying if
you're ok, need a solid testcase to improve my patch apply scripts anyway
;-)
Cheers, Daniel
>
> struct intel_ring_buffer *ring;
>
> @@ -1360,6 +1360,37 @@ struct drm_i915_gem_object {
>
> #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>
> +/* Offset of the first PTE pointing to this object */
> +static inline unsigned long
> +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space->start;
> +}
> +
> +/* Whether or not this object is currently mapped by the translation tables */
> +static inline bool
> +i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space != NULL;
> +}
> +
> +/* The size used in the translation tables may be larger than the actual size of
> + * the object on GEN2/GEN3 because of the way tiling is handled. See
> + * i915_gem_get_gtt_size() for more details.
> + */
> +static inline unsigned long
> +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space->size;
> +}
> +
> +static inline void
> +i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
> + enum i915_cache_level color)
> +{
> + o->ggtt_space->color = color;
> +}
> +
> /**
> * Request queue structure.
> *
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4200c32..edd5b6d 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
> static inline bool
> i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
> {
> - return obj->gtt_space && !obj->active;
> + return i915_gem_obj_ggtt_bound(obj) && !obj->active;
> }
>
> int
> @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
> mutex_lock(&dev->struct_mutex);
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> if (obj->pin_count)
> - pinned += obj->gtt_space->size;
> + pinned += i915_gem_obj_ggtt_size(obj);
> mutex_unlock(&dev->struct_mutex);
>
> args->aper_size = dev_priv->gtt.total;
> @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
> * anyway again before the next pread happens. */
> if (obj->cache_level == I915_CACHE_NONE)
> needs_clflush = 1;
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_set_to_gtt_domain(obj, false);
> if (ret)
> return ret;
> @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
> user_data = to_user_ptr(args->data_ptr);
> remain = args->size;
>
> - offset = obj->gtt_offset + args->offset;
> + offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
>
> while (remain > 0) {
> /* Operation in this page
> @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
> * right away and we therefore have to clflush anyway. */
> if (obj->cache_level == I915_CACHE_NONE)
> needs_clflush_after = 1;
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_set_to_gtt_domain(obj, true);
> if (ret)
> return ret;
> @@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>
> obj->fault_mappable = true;
>
> - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
> - page_offset;
> + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
> + pfn >>= PAGE_SHIFT;
> + pfn += page_offset;
>
> /* Finally, remap it using the new GTT offset */
> ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
> @@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> if (obj->pages == NULL)
> return 0;
>
> - BUG_ON(obj->gtt_space);
> + BUG_ON(i915_gem_obj_ggtt_bound(obj));
>
> if (obj->pages_pin_count)
> return -EBUSY;
> @@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
>
> static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
> {
> - if (acthd >= obj->gtt_offset &&
> - acthd < obj->gtt_offset + obj->base.size)
> + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> return true;
>
> return false;
> @@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
>
> if (ring->hangcheck.action != wait &&
> i915_request_guilty(request, acthd, &inside)) {
> - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
> + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
> ring->name,
> inside ? "inside" : "flushing",
> request->batch_obj ?
> - request->batch_obj->gtt_offset : 0,
> + i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
> request->ctx ? request->ctx->id : 0,
> acthd);
>
> @@ -2581,7 +2582,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> int ret;
>
> - if (obj->gtt_space == NULL)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return 0;
>
> if (obj->pin_count)
> @@ -2620,9 +2621,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> /* Avoid an unnecessary call to unbind on rebind. */
> obj->map_and_fenceable = true;
>
> - drm_mm_put_block(obj->gtt_space);
> - obj->gtt_space = NULL;
> - obj->gtt_offset = 0;
> + drm_mm_put_block(obj->ggtt_space);
> + obj->ggtt_space = NULL;
> + obj->ggtt_offset = 0;
>
> return 0;
> }
> @@ -2664,11 +2665,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
> }
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
>
> - val = (uint64_t)((obj->gtt_offset + size - 4096) &
> + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
> 0xfffff000) << 32;
> - val |= obj->gtt_offset & 0xfffff000;
> + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
> val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I965_FENCE_TILING_Y_SHIFT;
> @@ -2688,15 +2689,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> u32 val;
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
> int pitch_val;
> int tile_width;
>
> - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
> + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
> (size & -size) != size ||
> - (obj->gtt_offset & (size - 1)),
> - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> - obj->gtt_offset, obj->map_and_fenceable, size);
> + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
>
> if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
> tile_width = 128;
> @@ -2707,7 +2708,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> pitch_val = obj->stride / tile_width;
> pitch_val = ffs(pitch_val) - 1;
>
> - val = obj->gtt_offset;
> + val = i915_gem_obj_ggtt_offset(obj);
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> val |= I915_FENCE_SIZE_BITS(size);
> @@ -2732,19 +2733,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
> uint32_t val;
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
> uint32_t pitch_val;
>
> - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
> + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
> (size & -size) != size ||
> - (obj->gtt_offset & (size - 1)),
> - "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
> - obj->gtt_offset, size);
> + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
> + i915_gem_obj_ggtt_offset(obj), size);
>
> pitch_val = obj->stride / 128;
> pitch_val = ffs(pitch_val) - 1;
>
> - val = obj->gtt_offset;
> + val = i915_gem_obj_ggtt_offset(obj);
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> val |= I830_FENCE_SIZE_BITS(size);
> @@ -3033,8 +3034,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
>
> if (obj->cache_level != obj->gtt_space->color) {
> printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
> - obj->gtt_space->start,
> - obj->gtt_space->start + obj->gtt_space->size,
> + i915_gem_obj_ggtt_offset(obj),
> + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> obj->cache_level,
> obj->gtt_space->color);
> err++;
> @@ -3045,8 +3046,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
> obj->gtt_space,
> obj->cache_level)) {
> printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
> - obj->gtt_space->start,
> - obj->gtt_space->start + obj->gtt_space->size,
> + i915_gem_obj_ggtt_offset(obj),
> + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> obj->cache_level);
> err++;
> continue;
> @@ -3151,15 +3152,15 @@ search_free:
> list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> - obj->gtt_space = node;
> - obj->gtt_offset = node->start;
> + obj->ggtt_space = node;
> + obj->ggtt_offset = node->start;
>
> fenceable =
> node->size == fence_size &&
> (node->start & (fence_alignment - 1)) == 0;
>
> - mappable =
> - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
> + mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
> + dev_priv->gtt.mappable_end;
>
> obj->map_and_fenceable = mappable && fenceable;
>
> @@ -3261,7 +3262,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> int ret;
>
> /* Not valid to be called on unbound objects. */
> - if (obj->gtt_space == NULL)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return -EINVAL;
>
> if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
> @@ -3320,13 +3321,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> return -EBUSY;
> }
>
> - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
> + if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
> ret = i915_gem_object_unbind(obj);
> if (ret)
> return ret;
> }
>
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_finish_gpu(obj);
> if (ret)
> return ret;
> @@ -3349,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> obj, cache_level);
>
> - obj->gtt_space->color = cache_level;
> + i915_gem_obj_ggtt_set_color(obj, cache_level);
> }
>
> if (cache_level == I915_CACHE_NONE) {
> @@ -3630,14 +3631,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
> return -EBUSY;
>
> - if (obj->gtt_space != NULL) {
> - if ((alignment && obj->gtt_offset & (alignment - 1)) ||
> + if (i915_gem_obj_ggtt_bound(obj)) {
> + if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
> (map_and_fenceable && !obj->map_and_fenceable)) {
> WARN(obj->pin_count,
> "bo is already pinned with incorrect alignment:"
> - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
> + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
> " obj->map_and_fenceable=%d\n",
> - obj->gtt_offset, alignment,
> + i915_gem_obj_ggtt_offset(obj), alignment,
> map_and_fenceable,
> obj->map_and_fenceable);
> ret = i915_gem_object_unbind(obj);
> @@ -3646,7 +3647,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> }
> }
>
> - if (obj->gtt_space == NULL) {
> + if (!i915_gem_obj_ggtt_bound(obj)) {
> struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>
> ret = i915_gem_object_bind_to_gtt(obj, alignment,
> @@ -3672,7 +3673,7 @@ void
> i915_gem_object_unpin(struct drm_i915_gem_object *obj)
> {
> BUG_ON(obj->pin_count == 0);
> - BUG_ON(obj->gtt_space == NULL);
> + BUG_ON(!i915_gem_obj_ggtt_bound(obj));
>
> if (--obj->pin_count == 0)
> obj->pin_mappable = false;
> @@ -3722,7 +3723,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
> * as the X server doesn't manage domains yet
> */
> i915_gem_object_flush_cpu_write_domain(obj);
> - args->offset = obj->gtt_offset;
> + args->offset = i915_gem_obj_ggtt_offset(obj);
> out:
> drm_gem_object_unreference(&obj->base);
> unlock:
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 51b7a21..2074544 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
>
> intel_ring_emit(ring, MI_NOOP);
> intel_ring_emit(ring, MI_SET_CONTEXT);
> - intel_ring_emit(ring, new_context->obj->gtt_offset |
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
> MI_MM_SPACE_GTT |
> MI_SAVE_EXT_STATE_EN |
> MI_RESTORE_EXT_STATE_EN |
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index c86d5d9..5bbdea4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
> return false;
>
> list_add(&obj->exec_list, unwind);
> - return drm_mm_scan_add_block(obj->gtt_space);
> + return drm_mm_scan_add_block(obj->ggtt_space);
> }
>
> int
> @@ -107,7 +107,7 @@ none:
> struct drm_i915_gem_object,
> exec_list);
>
> - ret = drm_mm_scan_remove_block(obj->gtt_space);
> + ret = drm_mm_scan_remove_block(obj->ggtt_space);
> BUG_ON(ret);
>
> list_del_init(&obj->exec_list);
> @@ -127,7 +127,7 @@ found:
> obj = list_first_entry(&unwind_list,
> struct drm_i915_gem_object,
> exec_list);
> - if (drm_mm_scan_remove_block(obj->gtt_space)) {
> + if (drm_mm_scan_remove_block(obj->ggtt_space)) {
> list_move(&obj->exec_list, &eviction_list);
> drm_gem_object_reference(&obj->base);
> continue;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 87a3227..5aeb447 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> return -ENOENT;
>
> target_i915_obj = to_intel_bo(target_obj);
> - target_offset = target_i915_obj->gtt_offset;
> + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
>
> /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
> * pipe_control writes because the gpu doesn't properly redirect them
> @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> return ret;
>
> /* Map the page containing the relocation we're going to perform. */
> - reloc->offset += obj->gtt_offset;
> + reloc->offset += i915_gem_obj_ggtt_offset(obj);
> reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> reloc->offset & PAGE_MASK);
> reloc_entry = (uint32_t __iomem *)
> @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
> obj->has_aliasing_ppgtt_mapping = 1;
> }
>
> - if (entry->offset != obj->gtt_offset) {
> - entry->offset = obj->gtt_offset;
> + if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
> + entry->offset = i915_gem_obj_ggtt_offset(obj);
> *need_reloc = true;
> }
>
> @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
> {
> struct drm_i915_gem_exec_object2 *entry;
>
> - if (!obj->gtt_space)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return;
>
> entry = obj->exec_entry;
> @@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
> bool need_fence, need_mappable;
>
> - if (!obj->gtt_space)
> + if (!i915_gem_obj_ggtt_bound(obj))
> continue;
>
> need_fence =
> @@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> obj->tiling_mode != I915_TILING_NONE;
> need_mappable = need_fence || need_reloc_mappable(obj);
>
> - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
> + if ((entry->alignment &&
> + i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
> (need_mappable && !obj->map_and_fenceable))
> ret = i915_gem_object_unbind(obj);
> else
> @@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>
> /* Bind fresh objects */
> list_for_each_entry(obj, objects, exec_list) {
> - if (obj->gtt_space)
> + if (i915_gem_obj_ggtt_bound(obj))
> continue;
>
> ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
> @@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
> goto err;
> }
>
> - exec_start = batch_obj->gtt_offset + args->batch_start_offset;
> + exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
> exec_len = args->batch_len;
> if (cliprects) {
> for (i = 0; i < args->num_cliprects; i++) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 5c6fc0e..1eefba7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
> enum i915_cache_level cache_level)
> {
> ppgtt->insert_entries(ppgtt, obj->pages,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> cache_level);
> }
>
> @@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
> struct drm_i915_gem_object *obj)
> {
> ppgtt->clear_range(ppgtt,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> obj->base.size >> PAGE_SHIFT);
> }
>
> @@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
> struct drm_i915_private *dev_priv = dev->dev_private;
>
> dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> cache_level);
>
> obj->has_global_gtt_mapping = 1;
> @@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
> struct drm_i915_private *dev_priv = dev->dev_private;
>
> dev_priv->gtt.gtt_clear_range(obj->base.dev,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> obj->base.size >> PAGE_SHIFT);
>
> obj->has_global_gtt_mapping = 0;
> @@ -630,22 +630,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> /* Mark any preallocated objects as occupied */
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> int ret;
> - DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> - obj->gtt_offset, obj->base.size);
> + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
> + i915_gem_obj_ggtt_offset(obj), obj->base.size);
>
> - BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> - if (!obj->gtt_space) {
> + BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
> + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> + if (!obj->ggtt_space) {
> DRM_ERROR("Failed to preserve all objects\n");
> break;
> }
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_space,
> - obj->gtt_offset,
> + obj->ggtt_space,
> + i915_gem_obj_ggtt_offset(obj),
> obj->base.size);
> if (ret) {
> DRM_DEBUG_KMS("Reservation failed\n");
> - kfree(obj->gtt_space);
> + kfree(obj->ggtt_space);
> }
> obj->has_global_gtt_mapping = 1;
> }
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index f9db84a..cf0d0e0 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -374,23 +374,23 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> - if (!obj->gtt_space) {
> + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> + if (!obj->ggtt_space) {
> DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> goto unref_out;
> }
>
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_space,
> + obj->ggtt_space,
> gtt_offset, size);
> if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> goto unref_out;
> }
> } else
> - obj->gtt_space = I915_GTT_RESERVED;
> + obj->ggtt_space = I915_GTT_RESERVED;
>
> - obj->gtt_offset = gtt_offset;
> + obj->ggtt_offset = gtt_offset;
> obj->has_global_gtt_mapping = 1;
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 537545b..92a8d27 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
> return true;
>
> if (INTEL_INFO(obj->base.dev)->gen == 3) {
> - if (obj->gtt_offset & ~I915_FENCE_START_MASK)
> + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> return false;
> } else {
> - if (obj->gtt_offset & ~I830_FENCE_START_MASK)
> + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
> return false;
> }
>
> size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
> - if (obj->gtt_space->size != size)
> + if (i915_gem_obj_ggtt_size(obj) != size)
> return false;
>
> - if (obj->gtt_offset & (size - 1))
> + if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
> return false;
>
> return true;
> @@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> */
>
> obj->map_and_fenceable =
> - obj->gtt_space == NULL ||
> - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
> + !i915_gem_obj_ggtt_bound(obj) ||
> + (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
> i915_gem_object_fence_ok(obj, args->tiling_mode));
>
> /* Rebind if we need a change of alignment */
> @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> i915_gem_get_gtt_alignment(dev, obj->base.size,
> args->tiling_mode,
> false);
> - if (obj->gtt_offset & (unfenced_alignment - 1))
> + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
> ret = i915_gem_object_unbind(obj);
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 4c1b1e3..d2cf26f 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -1516,7 +1516,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> if (dst == NULL)
> return NULL;
>
> - reloc_offset = src->gtt_offset;
> + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
> for (i = 0; i < num_pages; i++) {
> unsigned long flags;
> void *d;
> @@ -1568,7 +1568,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> reloc_offset += PAGE_SIZE;
> }
> dst->page_count = num_pages;
> - dst->gtt_offset = src->gtt_offset;
>
> return dst;
>
> @@ -1622,7 +1621,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
> err->name = obj->base.name;
> err->rseqno = obj->last_read_seqno;
> err->wseqno = obj->last_write_seqno;
> - err->gtt_offset = obj->gtt_offset;
> + err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> err->read_domains = obj->base.read_domains;
> err->write_domain = obj->base.write_domain;
> err->fence_reg = obj->fence_reg;
> @@ -1720,8 +1719,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> return NULL;
>
> obj = ring->private;
> - if (acthd >= obj->gtt_offset &&
> - acthd < obj->gtt_offset + obj->base.size)
> + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> return i915_error_object_create(dev_priv, obj);
> }
>
> @@ -1802,7 +1801,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
> return;
>
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
> + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
> ering->ctx = i915_error_object_create_sized(dev_priv,
> obj, 1);
> break;
> @@ -2156,10 +2155,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
> if (INTEL_INFO(dev)->gen >= 4) {
> int dspsurf = DSPSURF(intel_crtc->plane);
> stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
> - obj->gtt_offset;
> + i915_gem_obj_ggtt_offset(obj);
> } else {
> int dspaddr = DSPADDR(intel_crtc->plane);
> - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
> + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
> crtc->y * crtc->fb->pitches[0] +
> crtc->x * crtc->fb->bits_per_pixel/8);
> }
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 3db4a68..7d283b5 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
>
> TP_fast_assign(
> __entry->obj = obj;
> - __entry->offset = obj->gtt_space->start;
> - __entry->size = obj->gtt_space->size;
> + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> + __entry->size = i915_gem_obj_ggtt_size(obj);
> __entry->mappable = mappable;
> ),
>
> @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
>
> TP_fast_assign(
> __entry->obj = obj;
> - __entry->offset = obj->gtt_space->start;
> - __entry->size = obj->gtt_space->size;
> + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> + __entry->size = i915_gem_obj_ggtt_size(obj);
> ),
>
> TP_printk("obj=%p, offset=%08x size=%x",
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 6b0013c..f7cacc0 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
> intel_crtc->dspaddr_offset = linear_offset;
> }
>
> - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> + fb->pitches[0]);
> I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> if (INTEL_INFO(dev)->gen >= 4) {
> I915_MODIFY_DISPBASE(DSPSURF(plane),
> - obj->gtt_offset + intel_crtc->dspaddr_offset);
> + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
> I915_WRITE(DSPLINOFF(plane), linear_offset);
> } else
> - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
> + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
> POSTING_READ(reg);
>
> return 0;
> @@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
> fb->pitches[0]);
> linear_offset -= intel_crtc->dspaddr_offset;
>
> - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> + fb->pitches[0]);
> I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> I915_MODIFY_DISPBASE(DSPSURF(plane),
> - obj->gtt_offset + intel_crtc->dspaddr_offset);
> + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> if (IS_HASWELL(dev)) {
> I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
> } else {
> @@ -6566,7 +6568,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
> goto fail_unpin;
> }
>
> - addr = obj->gtt_offset;
> + addr = i915_gem_obj_ggtt_offset(obj);
> } else {
> int align = IS_I830(dev) ? 16 * 1024 : 256;
> ret = i915_gem_attach_phys_object(dev, obj,
> @@ -7338,7 +7340,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, 0); /* aux display base address, unused */
>
> intel_mark_page_flip_active(intel_crtc);
> @@ -7379,7 +7381,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, MI_NOOP);
>
> intel_mark_page_flip_active(intel_crtc);
> @@ -7419,7 +7421,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> intel_ring_emit(ring,
> - (obj->gtt_offset + intel_crtc->dspaddr_offset) |
> + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
> obj->tiling_mode);
>
> /* XXX Enabling the panel-fitter across page-flip is so far
> @@ -7462,7 +7464,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
>
> /* Contrary to the suggestions in the documentation,
> * "Enable Panel Fitter" does not seem to be required when page
> @@ -7527,7 +7529,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>
> intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
> intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, (MI_NOOP));
>
> intel_mark_page_flip_active(intel_crtc);
> diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
> index dff669e..f3c97e0 100644
> --- a/drivers/gpu/drm/i915/intel_fb.c
> +++ b/drivers/gpu/drm/i915/intel_fb.c
> @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
> info->apertures->ranges[0].base = dev->mode_config.fb_base;
> info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
>
> - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
> + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
> info->fix.smem_len = size;
>
> info->screen_base =
> - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> size);
> if (!info->screen_base) {
> ret = -ENOSPC;
> @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
>
> /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
>
> - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
> + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
> fb->width, fb->height,
> - obj->gtt_offset, obj);
> + i915_gem_obj_ggtt_offset(obj), obj);
>
>
> mutex_unlock(&dev->struct_mutex);
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index a369881..81c3ca1 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
> regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
> else
> regs = io_mapping_map_wc(dev_priv->gtt.mappable,
> - overlay->reg_bo->gtt_offset);
> + i915_gem_obj_ggtt_offset(overlay->reg_bo));
>
> return regs;
> }
> @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> swidth = params->src_w;
> swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
> sheight = params->src_h;
> - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
> ostride = params->stride_Y;
>
> if (params->format & I915_OVERLAY_YUV_PLANAR) {
> @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> params->src_w/uv_hscale);
> swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
> sheight |= (params->src_h/uv_vscale) << 16;
> - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U);
> - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
> ostride |= params->stride_UV << 16;
> }
>
> @@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
> DRM_ERROR("failed to pin overlay register bo\n");
> goto out_free_bo;
> }
> - overlay->flip_addr = reg_bo->gtt_offset;
> + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
>
> ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
> if (ret) {
> @@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
> overlay->reg_bo->phys_obj->handle->vaddr;
> else
> regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> - overlay->reg_bo->gtt_offset);
> + i915_gem_obj_ggtt_offset(overlay->reg_bo));
>
> return regs;
> }
> @@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
> if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
> error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
> else
> - error->base = overlay->reg_bo->gtt_offset;
> + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
>
> regs = intel_overlay_map_regs_atomic(overlay);
> if (!regs)
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 5b4ade6..d06648d 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
> (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
> I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
> - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
> + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
> /* enable it... */
> I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
>
> @@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> struct drm_i915_gem_object *obj = intel_fb->obj;
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>
> - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
> + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
>
> I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
> IVB_DPFC_CTL_FENCE_EN |
> @@ -3707,7 +3707,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
>
> intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
> intel_ring_emit(ring, MI_SET_CONTEXT);
> - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
> MI_MM_SPACE_GTT |
> MI_SAVE_EXT_STATE_EN |
> MI_RESTORE_EXT_STATE_EN |
> @@ -3730,7 +3730,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
> return;
> }
>
> - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
> + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
> I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
> }
>
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index e51ab55..54495df 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
> * registers with the above sequence (the readback of the HEAD registers
> * also enforces ordering), otherwise the hw might lose the new ring
> * register values. */
> - I915_WRITE_START(ring, obj->gtt_offset);
> + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
> I915_WRITE_CTL(ring,
> ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
> | RING_VALID);
>
> /* If the head is still not zero, the ring is dead */
> if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
> - I915_READ_START(ring) == obj->gtt_offset &&
> + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
> (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
> DRM_ERROR("%s initialization failed "
> "ctl %08x head %08x tail %08x start %08x\n",
> @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
> if (ret)
> goto err_unref;
>
> - pc->gtt_offset = obj->gtt_offset;
> + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> pc->cpu_page = kmap(sg_page(obj->pages->sgl));
> if (pc->cpu_page == NULL) {
> ret = -ENOMEM;
> @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
> intel_ring_advance(ring);
> } else {
> struct drm_i915_gem_object *obj = ring->private;
> - u32 cs_offset = obj->gtt_offset;
> + u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
>
> if (len > I830_BATCH_LIMIT)
> return -ENOSPC;
> @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
> goto err_unref;
> }
>
> - ring->status_page.gfx_addr = obj->gtt_offset;
> + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
> ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
> if (ring->status_page.page_addr == NULL) {
> ret = -ENOMEM;
> @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
> goto err_unpin;
>
> ring->virtual_start =
> - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> ring->size);
> if (ring->virtual_start == NULL) {
> DRM_ERROR("Failed to map ringbuffer.\n");
> diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
> index 1fa5612..55bdf70 100644
> --- a/drivers/gpu/drm/i915/intel_sprite.c
> +++ b/drivers/gpu/drm/i915/intel_sprite.c
> @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
>
> I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
> I915_WRITE(SPCNTR(pipe, plane), sprctl);
> - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
> + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
> sprsurf_offset);
> POSTING_READ(SPSURF(pipe, plane));
> }
> @@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> if (intel_plane->can_scale)
> I915_WRITE(SPRSCALE(pipe), sprscale);
> I915_WRITE(SPRCTL(pipe), sprctl);
> - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
> + I915_MODIFY_DISPBASE(SPRSURF(pipe),
> + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
> POSTING_READ(SPRSURF(pipe));
>
> /* potentially re-enable LP watermarks */
> @@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
> I915_WRITE(DVSSCALE(pipe), dvsscale);
> I915_WRITE(DVSCNTR(pipe), dvscntr);
> - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
> + I915_MODIFY_DISPBASE(DVSSURF(pipe),
> + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
> POSTING_READ(DVSSURF(pipe));
> }
>
> --
> 1.8.3.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 19+ messages in thread* Re: [PATCH 2/6] drm/i915: Getter/setter for object attributes
2013-07-03 22:55 ` Daniel Vetter
@ 2013-07-04 1:07 ` Ben Widawsky
0 siblings, 0 replies; 19+ messages in thread
From: Ben Widawsky @ 2013-07-04 1:07 UTC (permalink / raw)
To: Daniel Vetter; +Cc: Intel GFX
On Thu, Jul 04, 2013 at 12:55:51AM +0200, Daniel Vetter wrote:
> On Wed, Jul 03, 2013 at 02:45:22PM -0700, Ben Widawsky wrote:
> > Soon we want to gut a lot of our existing assumptions how many address
> > spaces an object can live in, and in doing so, embed the drm_mm_node in
> > the object (and later the VMA).
> >
> > It's possible in the future we'll want to add more getter/setter
> > methods, but for now this is enough to enable the VMAs.
> >
> > v2: Reworked commit message (Ben)
> > Added comments to the main functions (Ben)
> > sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch]
> > sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch]
> > sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch]
> > sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch]
> > (Daniel)
> >
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
>
> [snip]
>
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index fd0f589..496ed3a 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
> > const struct drm_i915_gem_object_ops *ops;
> >
> > /** Current space allocated to this object in the GTT, if any. */
> > - struct drm_mm_node *gtt_space;
> > + struct drm_mm_node *ggtt_space;
>
> Is this ...
>
> > /** Stolen memory for this object, instead of being backed by shmem. */
> > struct drm_mm_node *stolen;
> > struct list_head global_list;
> > @@ -1333,7 +1333,7 @@ struct drm_i915_gem_object {
> > *
> > * This is the same as gtt_space->start
> > */
> > - uint32_t gtt_offset;
> > + uint32_t ggtt_offset;
>
> ... and this intentional? Feels a bit like needless churn if we move the
> entire thing into the vma rsn anyway. I can bikeshed while applying if
> you're ok, need a solid testcase to improve my patch apply scripts anyway
> ;-)
>
Yeah. With everything else being converted to ggtt, I liked naming the
node ggtt. As for the offset, I used that to find all the places that
needing fixing up, and then decided not to change it back since I killed
it in a future patch anyway.
If you want to fix it up on merge, I am fine with that - though I guess
after the fact, I do prefer ggtt.
> Cheers, Daniel
>
> >
> > struct intel_ring_buffer *ring;
> >
> > @@ -1360,6 +1360,37 @@ struct drm_i915_gem_object {
> >
> > #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
> >
> > +/* Offset of the first PTE pointing to this object */
> > +static inline unsigned long
> > +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> > +{
> > + return o->ggtt_space->start;
> > +}
> > +
> > +/* Whether or not this object is currently mapped by the translation tables */
> > +static inline bool
> > +i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
> > +{
> > + return o->ggtt_space != NULL;
> > +}
> > +
> > +/* The size used in the translation tables may be larger than the actual size of
> > + * the object on GEN2/GEN3 because of the way tiling is handled. See
> > + * i915_gem_get_gtt_size() for more details.
> > + */
> > +static inline unsigned long
> > +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
> > +{
> > + return o->ggtt_space->size;
> > +}
> > +
> > +static inline void
> > +i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
> > + enum i915_cache_level color)
> > +{
> > + o->ggtt_space->color = color;
> > +}
> > +
> > /**
> > * Request queue structure.
> > *
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 4200c32..edd5b6d 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
> > static inline bool
> > i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
> > {
> > - return obj->gtt_space && !obj->active;
> > + return i915_gem_obj_ggtt_bound(obj) && !obj->active;
> > }
> >
> > int
> > @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
> > mutex_lock(&dev->struct_mutex);
> > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> > if (obj->pin_count)
> > - pinned += obj->gtt_space->size;
> > + pinned += i915_gem_obj_ggtt_size(obj);
> > mutex_unlock(&dev->struct_mutex);
> >
> > args->aper_size = dev_priv->gtt.total;
> > @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
> > * anyway again before the next pread happens. */
> > if (obj->cache_level == I915_CACHE_NONE)
> > needs_clflush = 1;
> > - if (obj->gtt_space) {
> > + if (i915_gem_obj_ggtt_bound(obj)) {
> > ret = i915_gem_object_set_to_gtt_domain(obj, false);
> > if (ret)
> > return ret;
> > @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
> > user_data = to_user_ptr(args->data_ptr);
> > remain = args->size;
> >
> > - offset = obj->gtt_offset + args->offset;
> > + offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
> >
> > while (remain > 0) {
> > /* Operation in this page
> > @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
> > * right away and we therefore have to clflush anyway. */
> > if (obj->cache_level == I915_CACHE_NONE)
> > needs_clflush_after = 1;
> > - if (obj->gtt_space) {
> > + if (i915_gem_obj_ggtt_bound(obj)) {
> > ret = i915_gem_object_set_to_gtt_domain(obj, true);
> > if (ret)
> > return ret;
> > @@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> >
> > obj->fault_mappable = true;
> >
> > - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
> > - page_offset;
> > + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
> > + pfn >>= PAGE_SHIFT;
> > + pfn += page_offset;
> >
> > /* Finally, remap it using the new GTT offset */
> > ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
> > @@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> > if (obj->pages == NULL)
> > return 0;
> >
> > - BUG_ON(obj->gtt_space);
> > + BUG_ON(i915_gem_obj_ggtt_bound(obj));
> >
> > if (obj->pages_pin_count)
> > return -EBUSY;
> > @@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
> >
> > static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
> > {
> > - if (acthd >= obj->gtt_offset &&
> > - acthd < obj->gtt_offset + obj->base.size)
> > + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> > + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> > return true;
> >
> > return false;
> > @@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
> >
> > if (ring->hangcheck.action != wait &&
> > i915_request_guilty(request, acthd, &inside)) {
> > - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
> > + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
> > ring->name,
> > inside ? "inside" : "flushing",
> > request->batch_obj ?
> > - request->batch_obj->gtt_offset : 0,
> > + i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
> > request->ctx ? request->ctx->id : 0,
> > acthd);
> >
> > @@ -2581,7 +2582,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> > drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> > int ret;
> >
> > - if (obj->gtt_space == NULL)
> > + if (!i915_gem_obj_ggtt_bound(obj))
> > return 0;
> >
> > if (obj->pin_count)
> > @@ -2620,9 +2621,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> > /* Avoid an unnecessary call to unbind on rebind. */
> > obj->map_and_fenceable = true;
> >
> > - drm_mm_put_block(obj->gtt_space);
> > - obj->gtt_space = NULL;
> > - obj->gtt_offset = 0;
> > + drm_mm_put_block(obj->ggtt_space);
> > + obj->ggtt_space = NULL;
> > + obj->ggtt_offset = 0;
> >
> > return 0;
> > }
> > @@ -2664,11 +2665,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
> > }
> >
> > if (obj) {
> > - u32 size = obj->gtt_space->size;
> > + u32 size = i915_gem_obj_ggtt_size(obj);
> >
> > - val = (uint64_t)((obj->gtt_offset + size - 4096) &
> > + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
> > 0xfffff000) << 32;
> > - val |= obj->gtt_offset & 0xfffff000;
> > + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
> > val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
> > if (obj->tiling_mode == I915_TILING_Y)
> > val |= 1 << I965_FENCE_TILING_Y_SHIFT;
> > @@ -2688,15 +2689,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> > u32 val;
> >
> > if (obj) {
> > - u32 size = obj->gtt_space->size;
> > + u32 size = i915_gem_obj_ggtt_size(obj);
> > int pitch_val;
> > int tile_width;
> >
> > - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
> > + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
> > (size & -size) != size ||
> > - (obj->gtt_offset & (size - 1)),
> > - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> > - obj->gtt_offset, obj->map_and_fenceable, size);
> > + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> > + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> > + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
> >
> > if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
> > tile_width = 128;
> > @@ -2707,7 +2708,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> > pitch_val = obj->stride / tile_width;
> > pitch_val = ffs(pitch_val) - 1;
> >
> > - val = obj->gtt_offset;
> > + val = i915_gem_obj_ggtt_offset(obj);
> > if (obj->tiling_mode == I915_TILING_Y)
> > val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> > val |= I915_FENCE_SIZE_BITS(size);
> > @@ -2732,19 +2733,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
> > uint32_t val;
> >
> > if (obj) {
> > - u32 size = obj->gtt_space->size;
> > + u32 size = i915_gem_obj_ggtt_size(obj);
> > uint32_t pitch_val;
> >
> > - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
> > + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
> > (size & -size) != size ||
> > - (obj->gtt_offset & (size - 1)),
> > - "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
> > - obj->gtt_offset, size);
> > + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> > + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
> > + i915_gem_obj_ggtt_offset(obj), size);
> >
> > pitch_val = obj->stride / 128;
> > pitch_val = ffs(pitch_val) - 1;
> >
> > - val = obj->gtt_offset;
> > + val = i915_gem_obj_ggtt_offset(obj);
> > if (obj->tiling_mode == I915_TILING_Y)
> > val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> > val |= I830_FENCE_SIZE_BITS(size);
> > @@ -3033,8 +3034,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
> >
> > if (obj->cache_level != obj->gtt_space->color) {
> > printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
> > - obj->gtt_space->start,
> > - obj->gtt_space->start + obj->gtt_space->size,
> > + i915_gem_obj_ggtt_offset(obj),
> > + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> > obj->cache_level,
> > obj->gtt_space->color);
> > err++;
> > @@ -3045,8 +3046,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
> > obj->gtt_space,
> > obj->cache_level)) {
> > printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
> > - obj->gtt_space->start,
> > - obj->gtt_space->start + obj->gtt_space->size,
> > + i915_gem_obj_ggtt_offset(obj),
> > + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> > obj->cache_level);
> > err++;
> > continue;
> > @@ -3151,15 +3152,15 @@ search_free:
> > list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> > list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> >
> > - obj->gtt_space = node;
> > - obj->gtt_offset = node->start;
> > + obj->ggtt_space = node;
> > + obj->ggtt_offset = node->start;
> >
> > fenceable =
> > node->size == fence_size &&
> > (node->start & (fence_alignment - 1)) == 0;
> >
> > - mappable =
> > - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
> > + mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
> > + dev_priv->gtt.mappable_end;
> >
> > obj->map_and_fenceable = mappable && fenceable;
> >
> > @@ -3261,7 +3262,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> > int ret;
> >
> > /* Not valid to be called on unbound objects. */
> > - if (obj->gtt_space == NULL)
> > + if (!i915_gem_obj_ggtt_bound(obj))
> > return -EINVAL;
> >
> > if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
> > @@ -3320,13 +3321,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> > return -EBUSY;
> > }
> >
> > - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
> > + if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
> > ret = i915_gem_object_unbind(obj);
> > if (ret)
> > return ret;
> > }
> >
> > - if (obj->gtt_space) {
> > + if (i915_gem_obj_ggtt_bound(obj)) {
> > ret = i915_gem_object_finish_gpu(obj);
> > if (ret)
> > return ret;
> > @@ -3349,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> > i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> > obj, cache_level);
> >
> > - obj->gtt_space->color = cache_level;
> > + i915_gem_obj_ggtt_set_color(obj, cache_level);
> > }
> >
> > if (cache_level == I915_CACHE_NONE) {
> > @@ -3630,14 +3631,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> > if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
> > return -EBUSY;
> >
> > - if (obj->gtt_space != NULL) {
> > - if ((alignment && obj->gtt_offset & (alignment - 1)) ||
> > + if (i915_gem_obj_ggtt_bound(obj)) {
> > + if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
> > (map_and_fenceable && !obj->map_and_fenceable)) {
> > WARN(obj->pin_count,
> > "bo is already pinned with incorrect alignment:"
> > - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
> > + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
> > " obj->map_and_fenceable=%d\n",
> > - obj->gtt_offset, alignment,
> > + i915_gem_obj_ggtt_offset(obj), alignment,
> > map_and_fenceable,
> > obj->map_and_fenceable);
> > ret = i915_gem_object_unbind(obj);
> > @@ -3646,7 +3647,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> > }
> > }
> >
> > - if (obj->gtt_space == NULL) {
> > + if (!i915_gem_obj_ggtt_bound(obj)) {
> > struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
> >
> > ret = i915_gem_object_bind_to_gtt(obj, alignment,
> > @@ -3672,7 +3673,7 @@ void
> > i915_gem_object_unpin(struct drm_i915_gem_object *obj)
> > {
> > BUG_ON(obj->pin_count == 0);
> > - BUG_ON(obj->gtt_space == NULL);
> > + BUG_ON(!i915_gem_obj_ggtt_bound(obj));
> >
> > if (--obj->pin_count == 0)
> > obj->pin_mappable = false;
> > @@ -3722,7 +3723,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
> > * as the X server doesn't manage domains yet
> > */
> > i915_gem_object_flush_cpu_write_domain(obj);
> > - args->offset = obj->gtt_offset;
> > + args->offset = i915_gem_obj_ggtt_offset(obj);
> > out:
> > drm_gem_object_unreference(&obj->base);
> > unlock:
> > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> > index 51b7a21..2074544 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> > @@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
> >
> > intel_ring_emit(ring, MI_NOOP);
> > intel_ring_emit(ring, MI_SET_CONTEXT);
> > - intel_ring_emit(ring, new_context->obj->gtt_offset |
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
> > MI_MM_SPACE_GTT |
> > MI_SAVE_EXT_STATE_EN |
> > MI_RESTORE_EXT_STATE_EN |
> > diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> > index c86d5d9..5bbdea4 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> > @@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
> > return false;
> >
> > list_add(&obj->exec_list, unwind);
> > - return drm_mm_scan_add_block(obj->gtt_space);
> > + return drm_mm_scan_add_block(obj->ggtt_space);
> > }
> >
> > int
> > @@ -107,7 +107,7 @@ none:
> > struct drm_i915_gem_object,
> > exec_list);
> >
> > - ret = drm_mm_scan_remove_block(obj->gtt_space);
> > + ret = drm_mm_scan_remove_block(obj->ggtt_space);
> > BUG_ON(ret);
> >
> > list_del_init(&obj->exec_list);
> > @@ -127,7 +127,7 @@ found:
> > obj = list_first_entry(&unwind_list,
> > struct drm_i915_gem_object,
> > exec_list);
> > - if (drm_mm_scan_remove_block(obj->gtt_space)) {
> > + if (drm_mm_scan_remove_block(obj->ggtt_space)) {
> > list_move(&obj->exec_list, &eviction_list);
> > drm_gem_object_reference(&obj->base);
> > continue;
> > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > index 87a3227..5aeb447 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> > @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> > return -ENOENT;
> >
> > target_i915_obj = to_intel_bo(target_obj);
> > - target_offset = target_i915_obj->gtt_offset;
> > + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
> >
> > /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
> > * pipe_control writes because the gpu doesn't properly redirect them
> > @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> > return ret;
> >
> > /* Map the page containing the relocation we're going to perform. */
> > - reloc->offset += obj->gtt_offset;
> > + reloc->offset += i915_gem_obj_ggtt_offset(obj);
> > reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> > reloc->offset & PAGE_MASK);
> > reloc_entry = (uint32_t __iomem *)
> > @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
> > obj->has_aliasing_ppgtt_mapping = 1;
> > }
> >
> > - if (entry->offset != obj->gtt_offset) {
> > - entry->offset = obj->gtt_offset;
> > + if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
> > + entry->offset = i915_gem_obj_ggtt_offset(obj);
> > *need_reloc = true;
> > }
> >
> > @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
> > {
> > struct drm_i915_gem_exec_object2 *entry;
> >
> > - if (!obj->gtt_space)
> > + if (!i915_gem_obj_ggtt_bound(obj))
> > return;
> >
> > entry = obj->exec_entry;
> > @@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> > struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
> > bool need_fence, need_mappable;
> >
> > - if (!obj->gtt_space)
> > + if (!i915_gem_obj_ggtt_bound(obj))
> > continue;
> >
> > need_fence =
> > @@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> > obj->tiling_mode != I915_TILING_NONE;
> > need_mappable = need_fence || need_reloc_mappable(obj);
> >
> > - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
> > + if ((entry->alignment &&
> > + i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
> > (need_mappable && !obj->map_and_fenceable))
> > ret = i915_gem_object_unbind(obj);
> > else
> > @@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> >
> > /* Bind fresh objects */
> > list_for_each_entry(obj, objects, exec_list) {
> > - if (obj->gtt_space)
> > + if (i915_gem_obj_ggtt_bound(obj))
> > continue;
> >
> > ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
> > @@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
> > goto err;
> > }
> >
> > - exec_start = batch_obj->gtt_offset + args->batch_start_offset;
> > + exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
> > exec_len = args->batch_len;
> > if (cliprects) {
> > for (i = 0; i < args->num_cliprects; i++) {
> > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > index 5c6fc0e..1eefba7 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > @@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
> > enum i915_cache_level cache_level)
> > {
> > ppgtt->insert_entries(ppgtt, obj->pages,
> > - obj->gtt_space->start >> PAGE_SHIFT,
> > + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > cache_level);
> > }
> >
> > @@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
> > struct drm_i915_gem_object *obj)
> > {
> > ppgtt->clear_range(ppgtt,
> > - obj->gtt_space->start >> PAGE_SHIFT,
> > + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > obj->base.size >> PAGE_SHIFT);
> > }
> >
> > @@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
> > struct drm_i915_private *dev_priv = dev->dev_private;
> >
> > dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
> > - obj->gtt_space->start >> PAGE_SHIFT,
> > + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > cache_level);
> >
> > obj->has_global_gtt_mapping = 1;
> > @@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
> > struct drm_i915_private *dev_priv = dev->dev_private;
> >
> > dev_priv->gtt.gtt_clear_range(obj->base.dev,
> > - obj->gtt_space->start >> PAGE_SHIFT,
> > + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> > obj->base.size >> PAGE_SHIFT);
> >
> > obj->has_global_gtt_mapping = 0;
> > @@ -630,22 +630,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> > /* Mark any preallocated objects as occupied */
> > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> > int ret;
> > - DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> > - obj->gtt_offset, obj->base.size);
> > + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
> > + i915_gem_obj_ggtt_offset(obj), obj->base.size);
> >
> > - BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> > - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> > - if (!obj->gtt_space) {
> > + BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
> > + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> > + if (!obj->ggtt_space) {
> > DRM_ERROR("Failed to preserve all objects\n");
> > break;
> > }
> > ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > - obj->gtt_space,
> > - obj->gtt_offset,
> > + obj->ggtt_space,
> > + i915_gem_obj_ggtt_offset(obj),
> > obj->base.size);
> > if (ret) {
> > DRM_DEBUG_KMS("Reservation failed\n");
> > - kfree(obj->gtt_space);
> > + kfree(obj->ggtt_space);
> > }
> > obj->has_global_gtt_mapping = 1;
> > }
> > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > index f9db84a..cf0d0e0 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > @@ -374,23 +374,23 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> > * later.
> > */
> > if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> > - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> > - if (!obj->gtt_space) {
> > + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> > + if (!obj->ggtt_space) {
> > DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> > goto unref_out;
> > }
> >
> > ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > - obj->gtt_space,
> > + obj->ggtt_space,
> > gtt_offset, size);
> > if (ret) {
> > DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> > goto unref_out;
> > }
> > } else
> > - obj->gtt_space = I915_GTT_RESERVED;
> > + obj->ggtt_space = I915_GTT_RESERVED;
> >
> > - obj->gtt_offset = gtt_offset;
> > + obj->ggtt_offset = gtt_offset;
> > obj->has_global_gtt_mapping = 1;
> >
> > list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> > diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> > index 537545b..92a8d27 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> > @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
> > return true;
> >
> > if (INTEL_INFO(obj->base.dev)->gen == 3) {
> > - if (obj->gtt_offset & ~I915_FENCE_START_MASK)
> > + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> > return false;
> > } else {
> > - if (obj->gtt_offset & ~I830_FENCE_START_MASK)
> > + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
> > return false;
> > }
> >
> > size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
> > - if (obj->gtt_space->size != size)
> > + if (i915_gem_obj_ggtt_size(obj) != size)
> > return false;
> >
> > - if (obj->gtt_offset & (size - 1))
> > + if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
> > return false;
> >
> > return true;
> > @@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> > */
> >
> > obj->map_and_fenceable =
> > - obj->gtt_space == NULL ||
> > - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
> > + !i915_gem_obj_ggtt_bound(obj) ||
> > + (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
> > i915_gem_object_fence_ok(obj, args->tiling_mode));
> >
> > /* Rebind if we need a change of alignment */
> > @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> > i915_gem_get_gtt_alignment(dev, obj->base.size,
> > args->tiling_mode,
> > false);
> > - if (obj->gtt_offset & (unfenced_alignment - 1))
> > + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
> > ret = i915_gem_object_unbind(obj);
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> > index 4c1b1e3..d2cf26f 100644
> > --- a/drivers/gpu/drm/i915/i915_irq.c
> > +++ b/drivers/gpu/drm/i915/i915_irq.c
> > @@ -1516,7 +1516,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> > if (dst == NULL)
> > return NULL;
> >
> > - reloc_offset = src->gtt_offset;
> > + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
> > for (i = 0; i < num_pages; i++) {
> > unsigned long flags;
> > void *d;
> > @@ -1568,7 +1568,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> > reloc_offset += PAGE_SIZE;
> > }
> > dst->page_count = num_pages;
> > - dst->gtt_offset = src->gtt_offset;
> >
> > return dst;
> >
> > @@ -1622,7 +1621,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
> > err->name = obj->base.name;
> > err->rseqno = obj->last_read_seqno;
> > err->wseqno = obj->last_write_seqno;
> > - err->gtt_offset = obj->gtt_offset;
> > + err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> > err->read_domains = obj->base.read_domains;
> > err->write_domain = obj->base.write_domain;
> > err->fence_reg = obj->fence_reg;
> > @@ -1720,8 +1719,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> > return NULL;
> >
> > obj = ring->private;
> > - if (acthd >= obj->gtt_offset &&
> > - acthd < obj->gtt_offset + obj->base.size)
> > + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> > + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> > return i915_error_object_create(dev_priv, obj);
> > }
> >
> > @@ -1802,7 +1801,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
> > return;
> >
> > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> > - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
> > + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
> > ering->ctx = i915_error_object_create_sized(dev_priv,
> > obj, 1);
> > break;
> > @@ -2156,10 +2155,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
> > if (INTEL_INFO(dev)->gen >= 4) {
> > int dspsurf = DSPSURF(intel_crtc->plane);
> > stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
> > - obj->gtt_offset;
> > + i915_gem_obj_ggtt_offset(obj);
> > } else {
> > int dspaddr = DSPADDR(intel_crtc->plane);
> > - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
> > + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
> > crtc->y * crtc->fb->pitches[0] +
> > crtc->x * crtc->fb->bits_per_pixel/8);
> > }
> > diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> > index 3db4a68..7d283b5 100644
> > --- a/drivers/gpu/drm/i915/i915_trace.h
> > +++ b/drivers/gpu/drm/i915/i915_trace.h
> > @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
> >
> > TP_fast_assign(
> > __entry->obj = obj;
> > - __entry->offset = obj->gtt_space->start;
> > - __entry->size = obj->gtt_space->size;
> > + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> > + __entry->size = i915_gem_obj_ggtt_size(obj);
> > __entry->mappable = mappable;
> > ),
> >
> > @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
> >
> > TP_fast_assign(
> > __entry->obj = obj;
> > - __entry->offset = obj->gtt_space->start;
> > - __entry->size = obj->gtt_space->size;
> > + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> > + __entry->size = i915_gem_obj_ggtt_size(obj);
> > ),
> >
> > TP_printk("obj=%p, offset=%08x size=%x",
> > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> > index 6b0013c..f7cacc0 100644
> > --- a/drivers/gpu/drm/i915/intel_display.c
> > +++ b/drivers/gpu/drm/i915/intel_display.c
> > @@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
> > intel_crtc->dspaddr_offset = linear_offset;
> > }
> >
> > - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> > - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> > + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> > + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> > + fb->pitches[0]);
> > I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> > if (INTEL_INFO(dev)->gen >= 4) {
> > I915_MODIFY_DISPBASE(DSPSURF(plane),
> > - obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> > I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
> > I915_WRITE(DSPLINOFF(plane), linear_offset);
> > } else
> > - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
> > + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
> > POSTING_READ(reg);
> >
> > return 0;
> > @@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
> > fb->pitches[0]);
> > linear_offset -= intel_crtc->dspaddr_offset;
> >
> > - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> > - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> > + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> > + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> > + fb->pitches[0]);
> > I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> > I915_MODIFY_DISPBASE(DSPSURF(plane),
> > - obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> > if (IS_HASWELL(dev)) {
> > I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
> > } else {
> > @@ -6566,7 +6568,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
> > goto fail_unpin;
> > }
> >
> > - addr = obj->gtt_offset;
> > + addr = i915_gem_obj_ggtt_offset(obj);
> > } else {
> > int align = IS_I830(dev) ? 16 * 1024 : 256;
> > ret = i915_gem_attach_phys_object(dev, obj,
> > @@ -7338,7 +7340,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
> > intel_ring_emit(ring, MI_DISPLAY_FLIP |
> > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> > intel_ring_emit(ring, fb->pitches[0]);
> > - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> > intel_ring_emit(ring, 0); /* aux display base address, unused */
> >
> > intel_mark_page_flip_active(intel_crtc);
> > @@ -7379,7 +7381,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
> > intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
> > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> > intel_ring_emit(ring, fb->pitches[0]);
> > - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> > intel_ring_emit(ring, MI_NOOP);
> >
> > intel_mark_page_flip_active(intel_crtc);
> > @@ -7419,7 +7421,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
> > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> > intel_ring_emit(ring, fb->pitches[0]);
> > intel_ring_emit(ring,
> > - (obj->gtt_offset + intel_crtc->dspaddr_offset) |
> > + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
> > obj->tiling_mode);
> >
> > /* XXX Enabling the panel-fitter across page-flip is so far
> > @@ -7462,7 +7464,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
> > intel_ring_emit(ring, MI_DISPLAY_FLIP |
> > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> > intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
> > - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> >
> > /* Contrary to the suggestions in the documentation,
> > * "Enable Panel Fitter" does not seem to be required when page
> > @@ -7527,7 +7529,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
> >
> > intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
> > intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
> > - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> > intel_ring_emit(ring, (MI_NOOP));
> >
> > intel_mark_page_flip_active(intel_crtc);
> > diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
> > index dff669e..f3c97e0 100644
> > --- a/drivers/gpu/drm/i915/intel_fb.c
> > +++ b/drivers/gpu/drm/i915/intel_fb.c
> > @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
> > info->apertures->ranges[0].base = dev->mode_config.fb_base;
> > info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
> >
> > - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
> > + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
> > info->fix.smem_len = size;
> >
> > info->screen_base =
> > - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> > + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> > size);
> > if (!info->screen_base) {
> > ret = -ENOSPC;
> > @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
> >
> > /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
> >
> > - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
> > + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
> > fb->width, fb->height,
> > - obj->gtt_offset, obj);
> > + i915_gem_obj_ggtt_offset(obj), obj);
> >
> >
> > mutex_unlock(&dev->struct_mutex);
> > diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> > index a369881..81c3ca1 100644
> > --- a/drivers/gpu/drm/i915/intel_overlay.c
> > +++ b/drivers/gpu/drm/i915/intel_overlay.c
> > @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
> > regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
> > else
> > regs = io_mapping_map_wc(dev_priv->gtt.mappable,
> > - overlay->reg_bo->gtt_offset);
> > + i915_gem_obj_ggtt_offset(overlay->reg_bo));
> >
> > return regs;
> > }
> > @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> > swidth = params->src_w;
> > swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
> > sheight = params->src_h;
> > - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y);
> > + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
> > ostride = params->stride_Y;
> >
> > if (params->format & I915_OVERLAY_YUV_PLANAR) {
> > @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> > params->src_w/uv_hscale);
> > swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
> > sheight |= (params->src_h/uv_vscale) << 16;
> > - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U);
> > - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V);
> > + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
> > + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
> > ostride |= params->stride_UV << 16;
> > }
> >
> > @@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
> > DRM_ERROR("failed to pin overlay register bo\n");
> > goto out_free_bo;
> > }
> > - overlay->flip_addr = reg_bo->gtt_offset;
> > + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
> >
> > ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
> > if (ret) {
> > @@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
> > overlay->reg_bo->phys_obj->handle->vaddr;
> > else
> > regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> > - overlay->reg_bo->gtt_offset);
> > + i915_gem_obj_ggtt_offset(overlay->reg_bo));
> >
> > return regs;
> > }
> > @@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
> > if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
> > error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
> > else
> > - error->base = overlay->reg_bo->gtt_offset;
> > + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
> >
> > regs = intel_overlay_map_regs_atomic(overlay);
> > if (!regs)
> > diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> > index 5b4ade6..d06648d 100644
> > --- a/drivers/gpu/drm/i915/intel_pm.c
> > +++ b/drivers/gpu/drm/i915/intel_pm.c
> > @@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> > (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
> > (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
> > I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
> > - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
> > + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
> > /* enable it... */
> > I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
> >
> > @@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> > struct drm_i915_gem_object *obj = intel_fb->obj;
> > struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
> >
> > - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
> > + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
> >
> > I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
> > IVB_DPFC_CTL_FENCE_EN |
> > @@ -3707,7 +3707,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
> >
> > intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
> > intel_ring_emit(ring, MI_SET_CONTEXT);
> > - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
> > + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
> > MI_MM_SPACE_GTT |
> > MI_SAVE_EXT_STATE_EN |
> > MI_RESTORE_EXT_STATE_EN |
> > @@ -3730,7 +3730,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
> > return;
> > }
> >
> > - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
> > + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
> > I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index e51ab55..54495df 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
> > * registers with the above sequence (the readback of the HEAD registers
> > * also enforces ordering), otherwise the hw might lose the new ring
> > * register values. */
> > - I915_WRITE_START(ring, obj->gtt_offset);
> > + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
> > I915_WRITE_CTL(ring,
> > ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
> > | RING_VALID);
> >
> > /* If the head is still not zero, the ring is dead */
> > if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
> > - I915_READ_START(ring) == obj->gtt_offset &&
> > + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
> > (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
> > DRM_ERROR("%s initialization failed "
> > "ctl %08x head %08x tail %08x start %08x\n",
> > @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
> > if (ret)
> > goto err_unref;
> >
> > - pc->gtt_offset = obj->gtt_offset;
> > + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> > pc->cpu_page = kmap(sg_page(obj->pages->sgl));
> > if (pc->cpu_page == NULL) {
> > ret = -ENOMEM;
> > @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
> > intel_ring_advance(ring);
> > } else {
> > struct drm_i915_gem_object *obj = ring->private;
> > - u32 cs_offset = obj->gtt_offset;
> > + u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
> >
> > if (len > I830_BATCH_LIMIT)
> > return -ENOSPC;
> > @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
> > goto err_unref;
> > }
> >
> > - ring->status_page.gfx_addr = obj->gtt_offset;
> > + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
> > ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
> > if (ring->status_page.page_addr == NULL) {
> > ret = -ENOMEM;
> > @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
> > goto err_unpin;
> >
> > ring->virtual_start =
> > - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> > + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> > ring->size);
> > if (ring->virtual_start == NULL) {
> > DRM_ERROR("Failed to map ringbuffer.\n");
> > diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
> > index 1fa5612..55bdf70 100644
> > --- a/drivers/gpu/drm/i915/intel_sprite.c
> > +++ b/drivers/gpu/drm/i915/intel_sprite.c
> > @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
> >
> > I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
> > I915_WRITE(SPCNTR(pipe, plane), sprctl);
> > - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
> > + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
> > sprsurf_offset);
> > POSTING_READ(SPSURF(pipe, plane));
> > }
> > @@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> > if (intel_plane->can_scale)
> > I915_WRITE(SPRSCALE(pipe), sprscale);
> > I915_WRITE(SPRCTL(pipe), sprctl);
> > - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
> > + I915_MODIFY_DISPBASE(SPRSURF(pipe),
> > + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
> > POSTING_READ(SPRSURF(pipe));
> >
> > /* potentially re-enable LP watermarks */
> > @@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> > I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
> > I915_WRITE(DVSSCALE(pipe), dvsscale);
> > I915_WRITE(DVSCNTR(pipe), dvscntr);
> > - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
> > + I915_MODIFY_DISPBASE(DVSSURF(pipe),
> > + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
> > POSTING_READ(DVSSURF(pipe));
> > }
> >
> > --
> > 1.8.3.2
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
--
Ben Widawsky, Intel Open Source Technology Center
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 3/6] drm/i915: Kill obj->gtt_offset
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
2013-07-03 21:45 ` [PATCH 2/6] drm/i915: Getter/setter for object attributes Ben Widawsky
@ 2013-07-03 21:45 ` Ben Widawsky
2013-07-04 12:20 ` Daniel Vetter
2013-07-03 21:45 ` [PATCH 4/6] drm/i915: Use gtt_space->start for stolen reservation Ben Widawsky
` (5 subsequent siblings)
7 siblings, 1 reply; 19+ messages in thread
From: Ben Widawsky @ 2013-07-03 21:45 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky
With the getters in place from the previous patch this members serves no
purpose other than saving one spare pointer chase, which will be killed
in the next patch anyway.
Moving to VMAs, this members adds unnecessary confusion since an object
may exist at different offsets in different VMs.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 7 -------
drivers/gpu/drm/i915/i915_gem.c | 2 --
drivers/gpu/drm/i915/i915_gem_stolen.c | 1 -
3 files changed, 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 496ed3a..d06886b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1328,13 +1328,6 @@ struct drm_i915_gem_object {
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t ggtt_offset;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index edd5b6d..e0568e3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2623,7 +2623,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
drm_mm_put_block(obj->ggtt_space);
obj->ggtt_space = NULL;
- obj->ggtt_offset = 0;
return 0;
}
@@ -3153,7 +3152,6 @@ search_free:
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
obj->ggtt_space = node;
- obj->ggtt_offset = node->start;
fenceable =
node->size == fence_size &&
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cf0d0e0..2f009e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -390,7 +390,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
} else
obj->ggtt_space = I915_GTT_RESERVED;
- obj->ggtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH 3/6] drm/i915: Kill obj->gtt_offset
2013-07-03 21:45 ` [PATCH 3/6] drm/i915: Kill obj->gtt_offset Ben Widawsky
@ 2013-07-04 12:20 ` Daniel Vetter
0 siblings, 0 replies; 19+ messages in thread
From: Daniel Vetter @ 2013-07-04 12:20 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX
On Wed, Jul 03, 2013 at 02:45:23PM -0700, Ben Widawsky wrote:
> With the getters in place from the previous patch this members serves no
> purpose other than saving one spare pointer chase, which will be killed
> in the next patch anyway.
>
> Moving to VMAs, this members adds unnecessary confusion since an object
> may exist at different offsets in different VMs.
>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Ok, I've merged this and the previous patch to dinq (I'm stalling on the
first one until David's comments are addressed).
-Daniel
> ---
> drivers/gpu/drm/i915/i915_drv.h | 7 -------
> drivers/gpu/drm/i915/i915_gem.c | 2 --
> drivers/gpu/drm/i915/i915_gem_stolen.c | 1 -
> 3 files changed, 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 496ed3a..d06886b 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1328,13 +1328,6 @@ struct drm_i915_gem_object {
> unsigned long exec_handle;
> struct drm_i915_gem_exec_object2 *exec_entry;
>
> - /**
> - * Current offset of the object in GTT space.
> - *
> - * This is the same as gtt_space->start
> - */
> - uint32_t ggtt_offset;
> -
> struct intel_ring_buffer *ring;
>
> /** Breadcrumb of last rendering to the buffer. */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index edd5b6d..e0568e3 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2623,7 +2623,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>
> drm_mm_put_block(obj->ggtt_space);
> obj->ggtt_space = NULL;
> - obj->ggtt_offset = 0;
>
> return 0;
> }
> @@ -3153,7 +3152,6 @@ search_free:
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> obj->ggtt_space = node;
> - obj->ggtt_offset = node->start;
>
> fenceable =
> node->size == fence_size &&
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index cf0d0e0..2f009e6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -390,7 +390,6 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> } else
> obj->ggtt_space = I915_GTT_RESERVED;
>
> - obj->ggtt_offset = gtt_offset;
> obj->has_global_gtt_mapping = 1;
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> --
> 1.8.3.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 4/6] drm/i915: Use gtt_space->start for stolen reservation
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
2013-07-03 21:45 ` [PATCH 2/6] drm/i915: Getter/setter for object attributes Ben Widawsky
2013-07-03 21:45 ` [PATCH 3/6] drm/i915: Kill obj->gtt_offset Ben Widawsky
@ 2013-07-03 21:45 ` Ben Widawsky
2013-07-04 12:23 ` Daniel Vetter
2013-07-03 21:45 ` [PATCH 5/6] drm/i915: Embed drm_mm_node in i915 gem obj Ben Widawsky
` (4 subsequent siblings)
7 siblings, 1 reply; 19+ messages in thread
From: Ben Widawsky @ 2013-07-03 21:45 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky
Shortly we'll want to switch to an embedded drm_mm_node per object, and
therefore using pointers as we're doing is unfeasible.
I've chosen an impossible gtt start offset as the way to denote a
reservation.
CC: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 2 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +-
drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d06886b..b6864ffb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1201,7 +1201,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_RESERVED (dev_priv->gtt.total + 1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 1eefba7..fbe2e72 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -633,7 +633,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
- BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
+ BUG_ON(obj->ggtt_space->start != I915_GTT_RESERVED);
obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
if (!obj->ggtt_space) {
DRM_ERROR("Failed to preserve all objects\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2f009e6..c9d7016 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -388,7 +388,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
goto unref_out;
}
} else
- obj->ggtt_space = I915_GTT_RESERVED;
+ obj->ggtt_space->start = I915_GTT_RESERVED;
obj->has_global_gtt_mapping = 1;
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH 4/6] drm/i915: Use gtt_space->start for stolen reservation
2013-07-03 21:45 ` [PATCH 4/6] drm/i915: Use gtt_space->start for stolen reservation Ben Widawsky
@ 2013-07-04 12:23 ` Daniel Vetter
0 siblings, 0 replies; 19+ messages in thread
From: Daniel Vetter @ 2013-07-04 12:23 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX
On Wed, Jul 03, 2013 at 02:45:24PM -0700, Ben Widawsky wrote:
> Shortly we'll want to switch to an embedded drm_mm_node per object, and
> therefore using pointers as we're doing is unfeasible.
>
> I've chosen an impossible gtt start offset as the way to denote a
> reservation.
>
> CC: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> ---
> drivers/gpu/drm/i915/i915_drv.h | 2 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 2 +-
> drivers/gpu/drm/i915/i915_gem_stolen.c | 2 +-
> 3 files changed, 3 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index d06886b..b6864ffb 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1201,7 +1201,7 @@ enum hdmi_force_audio {
> HDMI_AUDIO_ON, /* force turn on HDMI audio */
> };
>
> -#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
> +#define I915_GTT_RESERVED (dev_priv->gtt.total + 1)
This is _really_ fragile since if we every change gtt.total in the init
sequence this will blow up. I've switched over to an appropriate real
constant (ULONG_MAX).
-Daniel
>
> struct drm_i915_gem_object_ops {
> /* Interface between the GEM object and its backing storage.
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 1eefba7..fbe2e72 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -633,7 +633,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
> i915_gem_obj_ggtt_offset(obj), obj->base.size);
>
> - BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
> + BUG_ON(obj->ggtt_space->start != I915_GTT_RESERVED);
> obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> if (!obj->ggtt_space) {
> DRM_ERROR("Failed to preserve all objects\n");
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 2f009e6..c9d7016 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -388,7 +388,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> goto unref_out;
> }
> } else
> - obj->ggtt_space = I915_GTT_RESERVED;
> + obj->ggtt_space->start = I915_GTT_RESERVED;
>
> obj->has_global_gtt_mapping = 1;
>
> --
> 1.8.3.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 5/6] drm/i915: Embed drm_mm_node in i915 gem obj
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
` (2 preceding siblings ...)
2013-07-03 21:45 ` [PATCH 4/6] drm/i915: Use gtt_space->start for stolen reservation Ben Widawsky
@ 2013-07-03 21:45 ` Ben Widawsky
2013-07-04 12:27 ` Daniel Vetter
2013-07-03 21:45 ` [PATCH 6/6] drm: Optionally create mm blocks from top-to-bottom Ben Widawsky
` (3 subsequent siblings)
7 siblings, 1 reply; 19+ messages in thread
From: Ben Widawsky @ 2013-07-03 21:45 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky
Embedding the node in the obj is more natural in the transition to VMAs
which will also have embedded nodes. This change also helps transition
away from put_block to remove node.
Though it's quite an uncommon occurrence, it's somewhat convenient to not
fail at bind time because we cannot allocate the node. Though in
practice there are other allocations (like the request structure) which
would probably make this point not terribly useful.
Quoting Daniel:
Note that the only difference between put_block and remove_node is
that the former fills up the preallocation cache. Which we don't need
anyway and hence is just wasted space.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 10 +++++-----
drivers/gpu/drm/i915/i915_gem.c | 31 +++++++++++--------------------
drivers/gpu/drm/i915/i915_gem_evict.c | 6 +++---
drivers/gpu/drm/i915/i915_gem_gtt.c | 13 +++----------
drivers/gpu/drm/i915/i915_gem_stolen.c | 10 ++--------
5 files changed, 24 insertions(+), 46 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6864ffb..67bdcf1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
/** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *ggtt_space;
+ struct drm_mm_node ggtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
@@ -1357,14 +1357,14 @@ struct drm_i915_gem_object {
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
{
- return o->ggtt_space->start;
+ return o->ggtt_space.start;
}
/* Whether or not this object is currently mapped by the translation tables */
static inline bool
i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
{
- return o->ggtt_space != NULL;
+ return drm_mm_node_allocated(&o->ggtt_space);
}
/* The size used in the translation tables may be larger than the actual size of
@@ -1374,14 +1374,14 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- return o->ggtt_space->size;
+ return o->ggtt_space.size;
}
static inline void
i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
enum i915_cache_level color)
{
- o->ggtt_space->color = color;
+ o->ggtt_space.color = color;
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e0568e3..6cb9210 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2621,8 +2621,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- drm_mm_put_block(obj->ggtt_space);
- obj->ggtt_space = NULL;
+ drm_mm_remove_node(&obj->ggtt_space);
return 0;
}
@@ -3000,7 +2999,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
if (HAS_LLC(dev))
return true;
- if (gtt_space == NULL)
+ if (!drm_mm_node_allocated(gtt_space))
return true;
if (list_empty(>t_space->node_list))
@@ -3068,7 +3067,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
@@ -3113,14 +3111,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL) {
- i915_gem_object_unpin_pages(obj);
- return -ENOMEM;
- }
-
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
+ &obj->ggtt_space,
size, alignment,
obj->cache_level, 0, gtt_max);
if (ret) {
@@ -3132,30 +3125,28 @@ search_free:
goto search_free;
i915_gem_object_unpin_pages(obj);
- kfree(node);
return ret;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->ggtt_space,
+ obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
+ drm_mm_remove_node(&obj->ggtt_space);
return -EINVAL;
}
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
+ drm_mm_remove_node(&obj->ggtt_space);
return ret;
}
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- obj->ggtt_space = node;
-
fenceable =
- node->size == fence_size &&
- (node->start & (fence_alignment - 1)) == 0;
+ i915_gem_obj_ggtt_size(obj) == fence_size &&
+ (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
dev_priv->gtt.mappable_end;
@@ -3319,7 +3310,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
+ if (!i915_gem_valid_gtt_space(dev, &obj->ggtt_space, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5bbdea4..a1e7ec8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
return false;
list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(obj->ggtt_space);
+ return drm_mm_scan_add_block(&obj->ggtt_space);
}
int
@@ -107,7 +107,7 @@ none:
struct drm_i915_gem_object,
exec_list);
- ret = drm_mm_scan_remove_block(obj->ggtt_space);
+ ret = drm_mm_scan_remove_block(&obj->ggtt_space);
BUG_ON(ret);
list_del_init(&obj->exec_list);
@@ -127,7 +127,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- if (drm_mm_scan_remove_block(obj->ggtt_space)) {
+ if (drm_mm_scan_remove_block(&obj->ggtt_space)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
continue;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index fbe2e72..4df6159 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -633,20 +633,13 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size);
- BUG_ON(obj->ggtt_space->start != I915_GTT_RESERVED);
- obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
- if (!obj->ggtt_space) {
- DRM_ERROR("Failed to preserve all objects\n");
- break;
- }
+ BUG_ON(i915_gem_obj_ggtt_offset(obj) != I915_GTT_RESERVED);
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->ggtt_space,
+ &obj->ggtt_space,
i915_gem_obj_ggtt_offset(obj),
obj->base.size);
- if (ret) {
+ if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
- kfree(obj->ggtt_space);
- }
obj->has_global_gtt_mapping = 1;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index c9d7016..dc68b30 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -374,21 +374,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
- if (!obj->ggtt_space) {
- DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
- goto unref_out;
- }
-
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->ggtt_space,
+ &obj->ggtt_space,
gtt_offset, size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto unref_out;
}
} else
- obj->ggtt_space->start = I915_GTT_RESERVED;
+ obj->ggtt_space.start = I915_GTT_RESERVED;
obj->has_global_gtt_mapping = 1;
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH 5/6] drm/i915: Embed drm_mm_node in i915 gem obj
2013-07-03 21:45 ` [PATCH 5/6] drm/i915: Embed drm_mm_node in i915 gem obj Ben Widawsky
@ 2013-07-04 12:27 ` Daniel Vetter
0 siblings, 0 replies; 19+ messages in thread
From: Daniel Vetter @ 2013-07-04 12:27 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX
On Wed, Jul 03, 2013 at 02:45:25PM -0700, Ben Widawsky wrote:
> Embedding the node in the obj is more natural in the transition to VMAs
> which will also have embedded nodes. This change also helps transition
> away from put_block to remove node.
>
> Though it's quite an uncommon occurrence, it's somewhat convenient to not
> fail at bind time because we cannot allocate the node. Though in
> practice there are other allocations (like the request structure) which
> would probably make this point not terribly useful.
>
> Quoting Daniel:
> Note that the only difference between put_block and remove_node is
> that the former fills up the preallocation cache. Which we don't need
> anyway and hence is just wasted space.
>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
This one is blocked for now on getting the create_block drm_mm changes in.
-Daniel
> ---
> drivers/gpu/drm/i915/i915_drv.h | 10 +++++-----
> drivers/gpu/drm/i915/i915_gem.c | 31 +++++++++++--------------------
> drivers/gpu/drm/i915/i915_gem_evict.c | 6 +++---
> drivers/gpu/drm/i915/i915_gem_gtt.c | 13 +++----------
> drivers/gpu/drm/i915/i915_gem_stolen.c | 10 ++--------
> 5 files changed, 24 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b6864ffb..67bdcf1 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
> const struct drm_i915_gem_object_ops *ops;
>
> /** Current space allocated to this object in the GTT, if any. */
> - struct drm_mm_node *ggtt_space;
> + struct drm_mm_node ggtt_space;
> /** Stolen memory for this object, instead of being backed by shmem. */
> struct drm_mm_node *stolen;
> struct list_head global_list;
> @@ -1357,14 +1357,14 @@ struct drm_i915_gem_object {
> static inline unsigned long
> i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> {
> - return o->ggtt_space->start;
> + return o->ggtt_space.start;
> }
>
> /* Whether or not this object is currently mapped by the translation tables */
> static inline bool
> i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
> {
> - return o->ggtt_space != NULL;
> + return drm_mm_node_allocated(&o->ggtt_space);
> }
>
> /* The size used in the translation tables may be larger than the actual size of
> @@ -1374,14 +1374,14 @@ i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
> static inline unsigned long
> i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
> {
> - return o->ggtt_space->size;
> + return o->ggtt_space.size;
> }
>
> static inline void
> i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
> enum i915_cache_level color)
> {
> - o->ggtt_space->color = color;
> + o->ggtt_space.color = color;
> }
>
> /**
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index e0568e3..6cb9210 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2621,8 +2621,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> /* Avoid an unnecessary call to unbind on rebind. */
> obj->map_and_fenceable = true;
>
> - drm_mm_put_block(obj->ggtt_space);
> - obj->ggtt_space = NULL;
> + drm_mm_remove_node(&obj->ggtt_space);
>
> return 0;
> }
> @@ -3000,7 +2999,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
> if (HAS_LLC(dev))
> return true;
>
> - if (gtt_space == NULL)
> + if (!drm_mm_node_allocated(gtt_space))
> return true;
>
> if (list_empty(>t_space->node_list))
> @@ -3068,7 +3067,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
> {
> struct drm_device *dev = obj->base.dev;
> drm_i915_private_t *dev_priv = dev->dev_private;
> - struct drm_mm_node *node;
> u32 size, fence_size, fence_alignment, unfenced_alignment;
> bool mappable, fenceable;
> size_t gtt_max = map_and_fenceable ?
> @@ -3113,14 +3111,9 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
>
> i915_gem_object_pin_pages(obj);
>
> - node = kzalloc(sizeof(*node), GFP_KERNEL);
> - if (node == NULL) {
> - i915_gem_object_unpin_pages(obj);
> - return -ENOMEM;
> - }
> -
> search_free:
> - ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
> + ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
> + &obj->ggtt_space,
> size, alignment,
> obj->cache_level, 0, gtt_max);
> if (ret) {
> @@ -3132,30 +3125,28 @@ search_free:
> goto search_free;
>
> i915_gem_object_unpin_pages(obj);
> - kfree(node);
> return ret;
> }
> - if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
> + if (WARN_ON(!i915_gem_valid_gtt_space(dev, &obj->ggtt_space,
> + obj->cache_level))) {
> i915_gem_object_unpin_pages(obj);
> - drm_mm_put_block(node);
> + drm_mm_remove_node(&obj->ggtt_space);
> return -EINVAL;
> }
>
> ret = i915_gem_gtt_prepare_object(obj);
> if (ret) {
> i915_gem_object_unpin_pages(obj);
> - drm_mm_put_block(node);
> + drm_mm_remove_node(&obj->ggtt_space);
> return ret;
> }
>
> list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> - obj->ggtt_space = node;
> -
> fenceable =
> - node->size == fence_size &&
> - (node->start & (fence_alignment - 1)) == 0;
> + i915_gem_obj_ggtt_size(obj) == fence_size &&
> + (i915_gem_obj_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
>
> mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
> dev_priv->gtt.mappable_end;
> @@ -3319,7 +3310,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> return -EBUSY;
> }
>
> - if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
> + if (!i915_gem_valid_gtt_space(dev, &obj->ggtt_space, cache_level)) {
> ret = i915_gem_object_unbind(obj);
> if (ret)
> return ret;
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 5bbdea4..a1e7ec8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
> return false;
>
> list_add(&obj->exec_list, unwind);
> - return drm_mm_scan_add_block(obj->ggtt_space);
> + return drm_mm_scan_add_block(&obj->ggtt_space);
> }
>
> int
> @@ -107,7 +107,7 @@ none:
> struct drm_i915_gem_object,
> exec_list);
>
> - ret = drm_mm_scan_remove_block(obj->ggtt_space);
> + ret = drm_mm_scan_remove_block(&obj->ggtt_space);
> BUG_ON(ret);
>
> list_del_init(&obj->exec_list);
> @@ -127,7 +127,7 @@ found:
> obj = list_first_entry(&unwind_list,
> struct drm_i915_gem_object,
> exec_list);
> - if (drm_mm_scan_remove_block(obj->ggtt_space)) {
> + if (drm_mm_scan_remove_block(&obj->ggtt_space)) {
> list_move(&obj->exec_list, &eviction_list);
> drm_gem_object_reference(&obj->base);
> continue;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index fbe2e72..4df6159 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -633,20 +633,13 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
> i915_gem_obj_ggtt_offset(obj), obj->base.size);
>
> - BUG_ON(obj->ggtt_space->start != I915_GTT_RESERVED);
> - obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> - if (!obj->ggtt_space) {
> - DRM_ERROR("Failed to preserve all objects\n");
> - break;
> - }
> + BUG_ON(i915_gem_obj_ggtt_offset(obj) != I915_GTT_RESERVED);
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->ggtt_space,
> + &obj->ggtt_space,
> i915_gem_obj_ggtt_offset(obj),
> obj->base.size);
> - if (ret) {
> + if (ret)
> DRM_DEBUG_KMS("Reservation failed\n");
> - kfree(obj->ggtt_space);
> - }
> obj->has_global_gtt_mapping = 1;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index c9d7016..dc68b30 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -374,21 +374,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> - if (!obj->ggtt_space) {
> - DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> - goto unref_out;
> - }
> -
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->ggtt_space,
> + &obj->ggtt_space,
> gtt_offset, size);
> if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> goto unref_out;
> }
> } else
> - obj->ggtt_space->start = I915_GTT_RESERVED;
> + obj->ggtt_space.start = I915_GTT_RESERVED;
>
> obj->has_global_gtt_mapping = 1;
>
> --
> 1.8.3.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 6/6] drm: Optionally create mm blocks from top-to-bottom
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
` (3 preceding siblings ...)
2013-07-03 21:45 ` [PATCH 5/6] drm/i915: Embed drm_mm_node in i915 gem obj Ben Widawsky
@ 2013-07-03 21:45 ` Ben Widawsky
2013-07-04 9:19 ` [PATCH 1/6] drm: pre allocate node for create_block David Herrmann
` (2 subsequent siblings)
7 siblings, 0 replies; 19+ messages in thread
From: Ben Widawsky @ 2013-07-03 21:45 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky, dri-devel
From: Chris Wilson <chris@chris-wilson.co.uk>
Clients like i915 needs to segregate cache domains within the GTT which
can lead to small amounts of fragmentation. By allocating the uncached
buffers from the bottom and the cacheable buffers from the top, we can
reduce the amount of wasted space and also optimize allocation of the
mappable portion of the GTT to only those buffers that require CPU
access through the GTT.
v2 by Ben:
Update callers in i915_gem_object_bind_to_gtt()
Turn search flags and allocation flags into separate enums
Make checkpatch happy where logical/easy
v3: by Ben:
Rebased on create_block removal
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
CC: <dri-devel@lists.freedesktop.org>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Conflicts:
drivers/gpu/drm/drm_mm.c
include/drm/drm_mm.h
---
drivers/gpu/drm/drm_mm.c | 121 +++++++++++++++------------
drivers/gpu/drm/i915/i915_gem.c | 4 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 3 +-
drivers/gpu/drm/i915/i915_gem_stolen.c | 5 +-
include/drm/drm_mm.h | 148 ++++++++++++++++++++-------------
5 files changed, 166 insertions(+), 115 deletions(-)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 9e8dfbc..4a30d55 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,7 +49,7 @@
#define MM_UNUSED_TARGET 4
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
+static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, bool atomic)
{
struct drm_mm_node *child;
@@ -105,7 +105,8 @@ EXPORT_SYMBOL(drm_mm_pre_get);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -118,12 +119,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
if (alignment) {
unsigned tmp = adj_start % alignment;
- if (tmp)
- adj_start += alignment - tmp;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
}
+ BUG_ON(adj_start < hole_start);
+ BUG_ON(adj_end > hole_end);
+
if (adj_start == hole_start) {
hole_node->hole_follows = 0;
list_del(&hole_node->hole_stack);
@@ -148,7 +159,8 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long start, unsigned long size)
+ unsigned long start, unsigned long size,
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm_node *hole;
unsigned long end = start + size;
@@ -190,15 +202,15 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
unsigned long color,
- int atomic)
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm_node *node;
- node = drm_mm_kmalloc(hole_node->mm, atomic);
+ node = drm_mm_kmalloc(hole_node->mm, flags & DRM_MM_CREATE_ATOMIC);
if (unlikely(node == NULL))
return NULL;
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color, flags);
return node;
}
@@ -211,32 +223,28 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_allocator_flags aflags,
+ enum drm_mm_search_flags sflags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, 0);
+ color, sflags);
if (!hole_node)
return -ENOSPC;
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
+ drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
-{
- return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_search_flags flags)
{
struct drm_mm *mm = hole_node->mm;
unsigned long hole_start = drm_mm_hole_node_start(hole_node);
@@ -251,13 +259,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
if (adj_end > end)
adj_end = end;
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start = adj_end - size;
+
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
if (alignment) {
unsigned tmp = adj_start % alignment;
- if (tmp)
- adj_start += alignment - tmp;
+ if (tmp) {
+ if (flags & DRM_MM_CREATE_TOP)
+ adj_start -= tmp;
+ else
+ adj_start += alignment - tmp;
+ }
}
if (adj_start == hole_start) {
@@ -274,6 +289,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole_node->node_list);
+ BUG_ON(node->start < start);
+ BUG_ON(node->start < adj_start);
BUG_ON(node->start + node->size > adj_end);
BUG_ON(node->start + node->size > end);
@@ -284,22 +301,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic)
+struct drm_mm_node *
+drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_allocator_flags flags)
{
struct drm_mm_node *node;
- node = drm_mm_kmalloc(hole_node->mm, atomic);
+ node = drm_mm_kmalloc(hole_node->mm, flags & DRM_MM_CREATE_ATOMIC);
if (unlikely(node == NULL))
return NULL;
drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
- start, end);
+ start, end, flags);
return node;
}
@@ -312,31 +330,25 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_allocator_flags aflags,
+ enum drm_mm_search_flags sflags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
- start, end, 0);
+ start, end, sflags);
if (!hole_node)
return -ENOSPC;
drm_mm_insert_helper_range(hole_node, node,
size, alignment, color,
- start, end);
+ start, end, aflags);
return 0;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
-{
- return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
/**
* Remove a memory node from the allocator.
*/
@@ -412,7 +424,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -425,7 +437,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
@@ -435,7 +448,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if ((flags & DRM_MM_SEARCH_BEST) == 0)
return entry;
if (entry->size < best_size) {
@@ -448,13 +461,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free_generic);
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- bool best_match)
+struct drm_mm_node *
+drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -467,7 +481,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
best = NULL;
best_size = ~0UL;
- drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
+ __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
+ flags & DRM_MM_SEARCH_BELOW) {
if (adj_start < start)
adj_start = start;
if (adj_end > end)
@@ -482,7 +497,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if ((flags & DRM_MM_SEARCH_BEST) == 0)
return entry;
if (entry->size < best_size) {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6cb9210..9a0470b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3115,7 +3115,9 @@ search_free:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space,
&obj->ggtt_space,
size, alignment,
- obj->cache_level, 0, gtt_max);
+ obj->cache_level, 0, gtt_max,
+ DRM_MM_CREATE_DEFAULT,
+ DRM_MM_SEARCH_DEFAULT);
if (ret) {
ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4df6159..081f77e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -637,7 +637,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
&obj->ggtt_space,
i915_gem_obj_ggtt_offset(obj),
- obj->base.size);
+ obj->base.size,
+ DRM_MM_CREATE_DEFAULT);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index dc68b30..0d19710 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -350,7 +350,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
- size);
+ size, DRM_MM_CREATE_DEFAULT);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
kfree(stolen);
@@ -376,7 +376,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
&obj->ggtt_space,
- gtt_offset, size);
+ gtt_offset, size,
+ DRM_MM_CREATE_DEFAULT);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto unref_out;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index d8b56b7..2915c43 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -41,6 +41,21 @@
#include <linux/seq_file.h>
#endif
+enum drm_mm_allocator_flags {
+ DRM_MM_CREATE_DEFAULT = 0,
+ DRM_MM_CREATE_ATOMIC = 1<<0,
+ DRM_MM_CREATE_TOP = 1<<1,
+};
+
+enum drm_mm_search_flags {
+ DRM_MM_SEARCH_DEFAULT = 0,
+ DRM_MM_SEARCH_BEST = 1<<0,
+ DRM_MM_SEARCH_BELOW = 1<<1,
+};
+
+#define DRM_MM_BOTTOMUP DRM_MM_CREATE_DEFAULT, DRM_MM_SEARCH_DEFAULT
+#define DRM_MM_TOPDOWN DRM_MM_CREATE_TOP, DRM_MM_SEARCH_BELOW
+
struct drm_mm_node {
struct list_head node_list;
struct list_head hole_stack;
@@ -135,30 +150,41 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
1 : 0; \
entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
+#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
+ for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
+ &entry->hole_stack != &(mm)->hole_stack ? \
+ hole_start = drm_mm_hole_node_start(entry), \
+ hole_end = drm_mm_hole_node_end(entry), \
+ 1 : 0; \
+ entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
+
/*
* Basic range manager support (drm_mm.c)
*/
extern int drm_mm_create_block(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long start,
- unsigned long size);
-extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- int atomic);
-extern struct drm_mm_node *drm_mm_get_block_range_generic(
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic);
+ unsigned long size,
+ enum drm_mm_allocator_flags flags);
+extern struct drm_mm_node *
+drm_mm_get_block_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_allocator_flags flags);
+extern struct drm_mm_node *
+drm_mm_get_block_range_generic(struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_allocator_flags flags);
-static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment)
+static inline struct drm_mm_node *
+drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment)
{
return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
}
@@ -166,7 +192,8 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
unsigned long size,
unsigned alignment)
{
- return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
+ return drm_mm_get_block_generic(parent, size, alignment, 0,
+ DRM_MM_CREATE_ATOMIC);
}
static inline struct drm_mm_node *drm_mm_get_block_range(
struct drm_mm_node *parent,
@@ -197,39 +224,41 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
unsigned long end)
{
return drm_mm_get_block_range_generic(parent, size, alignment, 0,
- start, end, 1);
+ start, end,
+ DRM_MM_CREATE_ATOMIC);
}
-extern int drm_mm_insert_node(struct drm_mm *mm,
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment);
-extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end);
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
- unsigned long color);
-extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end);
+ unsigned long color,
+ enum drm_mm_allocator_flags aflags,
+ enum drm_mm_search_flags sflags);
+#define drm_mm_insert_node(mm, node, size, alignment) \
+ drm_mm_insert_node_generic(mm, node, size, alignment, 0, 0)
+extern int
+drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_allocator_flags aflags,
+ enum drm_mm_search_flags sflags);
+#define drm_mm_insert_node_in_range(mm, node, size, alignment, start, end) \
+ drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end, 0)
extern void drm_mm_put_block(struct drm_mm_node *cur);
extern void drm_mm_remove_node(struct drm_mm_node *node);
extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match);
+
+extern struct drm_mm_node *
+drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags);
extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
const struct drm_mm *mm,
unsigned long size,
@@ -237,13 +266,15 @@ extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
unsigned long color,
unsigned long start,
unsigned long end,
- bool best_match);
-static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- bool best_match)
+ enum drm_mm_search_flags flags);
+
+static inline struct drm_mm_node *
+drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ enum drm_mm_search_flags flags)
{
- return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
+ return drm_mm_search_free_generic(mm, size, alignment, 0, flags);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range(
const struct drm_mm *mm,
@@ -251,18 +282,19 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range(
unsigned alignment,
unsigned long start,
unsigned long end,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
- start, end, best_match);
+ start, end, flags);
}
-static inline struct drm_mm_node *drm_mm_search_free_color(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match)
+static inline struct drm_mm_node *
+drm_mm_search_free_color(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
- return drm_mm_search_free_generic(mm,size, alignment, color, best_match);
+ return drm_mm_search_free_generic(mm, size, alignment, color, flags);
}
static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
const struct drm_mm *mm,
@@ -271,10 +303,10 @@ static inline struct drm_mm_node *drm_mm_search_free_in_range_color(
unsigned long color,
unsigned long start,
unsigned long end,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
return drm_mm_search_free_in_range_generic(mm, size, alignment, color,
- start, end, best_match);
+ start, end, flags);
}
extern int drm_mm_init(struct drm_mm *mm,
unsigned long start,
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH 1/6] drm: pre allocate node for create_block
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
` (4 preceding siblings ...)
2013-07-03 21:45 ` [PATCH 6/6] drm: Optionally create mm blocks from top-to-bottom Ben Widawsky
@ 2013-07-04 9:19 ` David Herrmann
2013-07-04 20:03 ` Ben Widawsky
2013-07-04 9:22 ` David Herrmann
2013-07-04 20:14 ` [PATCH] [v3] " Ben Widawsky
7 siblings, 1 reply; 19+ messages in thread
From: David Herrmann @ 2013-07-04 9:19 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX, dri-devel@lists.freedesktop.org
Hi
On Wed, Jul 3, 2013 at 11:45 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> For an upcoming patch where we introduce the i915 VMA, it's ideal to
> have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> will break a bunch of code, but amongst them are 2 callers of
> drm_mm_create_block(), both related to stolen memory.
>
> It also allows us to embed the drm_mm_node into the object currently
> which provides a nice transition over to the new code.
>
> v2: Reordered to do before ripping out obj->gtt_offset.
> Some minor cleanups made available because of reordering.
>
> CC: <dri-devel@lists.freedesktop.org>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> ---
> drivers/gpu/drm/drm_mm.c | 16 +++++----------
> drivers/gpu/drm/i915/i915_gem_gtt.c | 18 +++++++++++++----
> drivers/gpu/drm/i915/i915_gem_stolen.c | 36 +++++++++++++++++++++++-----------
> include/drm/drm_mm.h | 9 +++++----
> 4 files changed, 49 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index 07cf99c..9e8dfbc 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> }
> }
>
> -struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic)
> +int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
> + unsigned long start, unsigned long size)
> {
> - struct drm_mm_node *hole, *node;
> + struct drm_mm_node *hole;
> unsigned long end = start + size;
> unsigned long hole_start;
> unsigned long hole_end;
> @@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> if (hole_start > start || hole_end < end)
> continue;
>
> - node = drm_mm_kmalloc(mm, atomic);
> - if (unlikely(node == NULL))
> - return NULL;
> -
> node->start = start;
> node->size = size;
> node->mm = mm;
> @@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> node->hole_follows = 1;
> }
>
> - return node;
> + return 0;
> }
>
> WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
> - return NULL;
> + return -ENOSPC;
> }
> EXPORT_SYMBOL(drm_mm_create_block);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 66929ea..5c6fc0e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -629,14 +629,24 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
>
> /* Mark any preallocated objects as occupied */
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> + int ret;
> DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> obj->gtt_offset, obj->base.size);
>
> BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_offset,
> - obj->base.size,
> - false);
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_ERROR("Failed to preserve all objects\n");
> + break;
> + }
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + obj->gtt_offset,
> + obj->base.size);
> + if (ret) {
> + DRM_DEBUG_KMS("Reservation failed\n");
> + kfree(obj->gtt_space);
Are you sure you don't need:
obj->gtt_space = NULL;
here?
I am no expert in i915 gem handling, but looking at i915_gem.c I think
you might run into bugs if not.
Also, why did you add the "break;" above, but not here? I am confused.
> + }
> obj->has_global_gtt_mapping = 1;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 8e02344..f9db84a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct drm_i915_gem_object *obj;
> struct drm_mm_node *stolen;
> + int ret;
>
> if (dev_priv->mm.stolen_base == 0)
> return NULL;
> @@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> if (WARN_ON(size == 0))
> return NULL;
>
> - stolen = drm_mm_create_block(&dev_priv->mm.stolen,
> - stolen_offset, size,
> - false);
> - if (stolen == NULL) {
> + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
> + if (!stolen)
> + return NULL;
> +
> + ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
> + size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen space\n");
> + kfree(stolen);
> return NULL;
> }
>
> @@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - gtt_offset, size,
> - false);
> - if (obj->gtt_space == NULL) {
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> + goto unref_out;
> + }
> +
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + gtt_offset, size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> - drm_gem_object_unreference(&obj->base);
> - return NULL;
> + goto unref_out;
Again:
kfree(obj->gtt_space);
obj->gtt_space = NULL;
Otherwise, if gem-cleanup calls drm_mm_put_block() on an already
removed node, you end up with NULL-derefs in drm_mm.c
> }
> } else
> obj->gtt_space = I915_GTT_RESERVED;
> @@ -385,8 +395,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> -
> return obj;
> +
> +unref_out:
> + drm_gem_object_unreference(&obj->base);
> + drm_mm_put_block(stolen);
"stolen" is already cleared by drm_gem_object_unreference(). So that's
a double-free here.
The drm_mm_create_block() change looks good.
Cheers
David
> + return NULL;
> }
>
> void
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index 88591ef..d8b56b7 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> /*
> * Basic range manager support (drm_mm.c)
> */
> -extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic);
> +extern int drm_mm_create_block(struct drm_mm *mm,
> + struct drm_mm_node *node,
> + unsigned long start,
> + unsigned long size);
> extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
> unsigned long size,
> unsigned alignment,
> @@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
> unsigned long start,
> unsigned long end,
> int atomic);
> +
> static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
> unsigned long size,
> unsigned alignment)
> --
> 1.8.3.2
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 19+ messages in thread* Re: [PATCH 1/6] drm: pre allocate node for create_block
2013-07-04 9:19 ` [PATCH 1/6] drm: pre allocate node for create_block David Herrmann
@ 2013-07-04 20:03 ` Ben Widawsky
0 siblings, 0 replies; 19+ messages in thread
From: Ben Widawsky @ 2013-07-04 20:03 UTC (permalink / raw)
To: David Herrmann; +Cc: Intel GFX, dri-devel@lists.freedesktop.org
On Thu, Jul 04, 2013 at 11:19:58AM +0200, David Herrmann wrote:
> Hi
>
> On Wed, Jul 3, 2013 at 11:45 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> > For an upcoming patch where we introduce the i915 VMA, it's ideal to
> > have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> > Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> > will break a bunch of code, but amongst them are 2 callers of
> > drm_mm_create_block(), both related to stolen memory.
> >
> > It also allows us to embed the drm_mm_node into the object currently
> > which provides a nice transition over to the new code.
> >
> > v2: Reordered to do before ripping out obj->gtt_offset.
> > Some minor cleanups made available because of reordering.
> >
> > CC: <dri-devel@lists.freedesktop.org>
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> > ---
> > drivers/gpu/drm/drm_mm.c | 16 +++++----------
> > drivers/gpu/drm/i915/i915_gem_gtt.c | 18 +++++++++++++----
> > drivers/gpu/drm/i915/i915_gem_stolen.c | 36 +++++++++++++++++++++++-----------
> > include/drm/drm_mm.h | 9 +++++----
> > 4 files changed, 49 insertions(+), 30 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> > index 07cf99c..9e8dfbc 100644
> > --- a/drivers/gpu/drm/drm_mm.c
> > +++ b/drivers/gpu/drm/drm_mm.c
> > @@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> > }
> > }
> >
> > -struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> > - unsigned long start,
> > - unsigned long size,
> > - bool atomic)
> > +int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
> > + unsigned long start, unsigned long size)
> > {
> > - struct drm_mm_node *hole, *node;
> > + struct drm_mm_node *hole;
> > unsigned long end = start + size;
> > unsigned long hole_start;
> > unsigned long hole_end;
> > @@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> > if (hole_start > start || hole_end < end)
> > continue;
> >
> > - node = drm_mm_kmalloc(mm, atomic);
> > - if (unlikely(node == NULL))
> > - return NULL;
> > -
> > node->start = start;
> > node->size = size;
> > node->mm = mm;
> > @@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> > node->hole_follows = 1;
> > }
> >
> > - return node;
> > + return 0;
> > }
> >
> > WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
> > - return NULL;
> > + return -ENOSPC;
> > }
> > EXPORT_SYMBOL(drm_mm_create_block);
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > index 66929ea..5c6fc0e 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> > @@ -629,14 +629,24 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> >
> > /* Mark any preallocated objects as occupied */
> > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> > + int ret;
> > DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> > obj->gtt_offset, obj->base.size);
> >
> > BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> > - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > - obj->gtt_offset,
> > - obj->base.size,
> > - false);
> > + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> > + if (!obj->gtt_space) {
> > + DRM_ERROR("Failed to preserve all objects\n");
> > + break;
> > + }
> > + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > + obj->gtt_space,
> > + obj->gtt_offset,
> > + obj->base.size);
> > + if (ret) {
> > + DRM_DEBUG_KMS("Reservation failed\n");
> > + kfree(obj->gtt_space);
>
> Are you sure you don't need:
> obj->gtt_space = NULL;
> here?
> I am no expert in i915 gem handling, but looking at i915_gem.c I think
> you might run into bugs if not.
I'm too lazy to actually check, but I believe you're probably right.
It's fixed in a later patch where I added the getters and use
node_allocated so I don't check obj->gtt_space != NULL anymore; but it
would potentially be a painful bisect point.
Thanks for catching it (and the following ones).
>
> Also, why did you add the "break;" above, but not here? I am confused.
The thought at the time was if kzalloc fails at this point, subsequent
kzallocs are really likely to fail also. drm_mm_create_block OTOH is
something I won't pretend to inquire about failure recurrence. I agree
it looks funny though, so I'll change the break to continue.
>
> > + }
> > obj->has_global_gtt_mapping = 1;
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > index 8e02344..f9db84a 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > @@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> > struct drm_i915_private *dev_priv = dev->dev_private;
> > struct drm_i915_gem_object *obj;
> > struct drm_mm_node *stolen;
> > + int ret;
> >
> > if (dev_priv->mm.stolen_base == 0)
> > return NULL;
> > @@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> > if (WARN_ON(size == 0))
> > return NULL;
> >
> > - stolen = drm_mm_create_block(&dev_priv->mm.stolen,
> > - stolen_offset, size,
> > - false);
> > - if (stolen == NULL) {
> > + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
> > + if (!stolen)
> > + return NULL;
> > +
> > + ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
> > + size);
> > + if (ret) {
> > DRM_DEBUG_KMS("failed to allocate stolen space\n");
> > + kfree(stolen);
> > return NULL;
> > }
> >
> > @@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> > * later.
> > */
> > if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> > - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > - gtt_offset, size,
> > - false);
> > - if (obj->gtt_space == NULL) {
> > + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> > + if (!obj->gtt_space) {
> > + DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> > + goto unref_out;
> > + }
> > +
> > + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> > + obj->gtt_space,
> > + gtt_offset, size);
> > + if (ret) {
> > DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> > - drm_gem_object_unreference(&obj->base);
> > - return NULL;
> > + goto unref_out;
>
> Again:
> kfree(obj->gtt_space);
> obj->gtt_space = NULL;
> Otherwise, if gem-cleanup calls drm_mm_put_block() on an already
> removed node, you end up with NULL-derefs in drm_mm.c
>
> > }
> > } else
> > obj->gtt_space = I915_GTT_RESERVED;
> > @@ -385,8 +395,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> >
> > list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> > list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> > -
> > return obj;
> > +
> > +unref_out:
> > + drm_gem_object_unreference(&obj->base);
> > + drm_mm_put_block(stolen);
>
> "stolen" is already cleared by drm_gem_object_unreference(). So that's
> a double-free here.
>
> The drm_mm_create_block() change looks good.
> Cheers
> David
>
Thanks for reviewing the i915 parts so thoroughly :D
>
> > + return NULL;
> > }
> >
> > void
> > diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> > index 88591ef..d8b56b7 100644
> > --- a/include/drm/drm_mm.h
> > +++ b/include/drm/drm_mm.h
> > @@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> > /*
> > * Basic range manager support (drm_mm.c)
> > */
> > -extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> > - unsigned long start,
> > - unsigned long size,
> > - bool atomic);
> > +extern int drm_mm_create_block(struct drm_mm *mm,
> > + struct drm_mm_node *node,
> > + unsigned long start,
> > + unsigned long size);
> > extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
> > unsigned long size,
> > unsigned alignment,
> > @@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
> > unsigned long start,
> > unsigned long end,
> > int atomic);
> > +
> > static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
> > unsigned long size,
> > unsigned alignment)
> > --
> > 1.8.3.2
> >
> > _______________________________________________
> > dri-devel mailing list
> > dri-devel@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/dri-devel
--
Ben Widawsky, Intel Open Source Technology Center
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 1/6] drm: pre allocate node for create_block
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
` (5 preceding siblings ...)
2013-07-04 9:19 ` [PATCH 1/6] drm: pre allocate node for create_block David Herrmann
@ 2013-07-04 9:22 ` David Herrmann
2013-07-04 20:14 ` [PATCH] [v3] " Ben Widawsky
7 siblings, 0 replies; 19+ messages in thread
From: David Herrmann @ 2013-07-04 9:22 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX, dri-devel@lists.freedesktop.org
Hi
On Wed, Jul 3, 2013 at 11:45 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> For an upcoming patch where we introduce the i915 VMA, it's ideal to
> have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> will break a bunch of code, but amongst them are 2 callers of
> drm_mm_create_block(), both related to stolen memory.
>
> It also allows us to embed the drm_mm_node into the object currently
> which provides a nice transition over to the new code.
>
> v2: Reordered to do before ripping out obj->gtt_offset.
> Some minor cleanups made available because of reordering.
>
> CC: <dri-devel@lists.freedesktop.org>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> ---
> drivers/gpu/drm/drm_mm.c | 16 +++++----------
> drivers/gpu/drm/i915/i915_gem_gtt.c | 18 +++++++++++++----
> drivers/gpu/drm/i915/i915_gem_stolen.c | 36 +++++++++++++++++++++++-----------
> include/drm/drm_mm.h | 9 +++++----
> 4 files changed, 49 insertions(+), 30 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index 07cf99c..9e8dfbc 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> }
> }
>
> -struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic)
> +int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
> + unsigned long start, unsigned long size)
> {
> - struct drm_mm_node *hole, *node;
> + struct drm_mm_node *hole;
> unsigned long end = start + size;
> unsigned long hole_start;
> unsigned long hole_end;
> @@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> if (hole_start > start || hole_end < end)
> continue;
>
> - node = drm_mm_kmalloc(mm, atomic);
> - if (unlikely(node == NULL))
> - return NULL;
> -
> node->start = start;
> node->size = size;
> node->mm = mm;
> @@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> node->hole_follows = 1;
> }
>
> - return node;
> + return 0;
> }
>
> WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
> - return NULL;
> + return -ENOSPC;
> }
> EXPORT_SYMBOL(drm_mm_create_block);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 66929ea..5c6fc0e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -629,14 +629,24 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
>
> /* Mark any preallocated objects as occupied */
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> + int ret;
> DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> obj->gtt_offset, obj->base.size);
>
> BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_offset,
> - obj->base.size,
> - false);
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_ERROR("Failed to preserve all objects\n");
> + break;
> + }
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + obj->gtt_offset,
> + obj->base.size);
> + if (ret) {
> + DRM_DEBUG_KMS("Reservation failed\n");
> + kfree(obj->gtt_space);
> + }
> obj->has_global_gtt_mapping = 1;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 8e02344..f9db84a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct drm_i915_gem_object *obj;
> struct drm_mm_node *stolen;
> + int ret;
>
> if (dev_priv->mm.stolen_base == 0)
> return NULL;
> @@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> if (WARN_ON(size == 0))
> return NULL;
>
> - stolen = drm_mm_create_block(&dev_priv->mm.stolen,
> - stolen_offset, size,
> - false);
> - if (stolen == NULL) {
> + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
> + if (!stolen)
> + return NULL;
> +
> + ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
> + size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen space\n");
> + kfree(stolen);
> return NULL;
> }
>
> @@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - gtt_offset, size,
> - false);
> - if (obj->gtt_space == NULL) {
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> + goto unref_out;
> + }
> +
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + gtt_offset, size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> - drm_gem_object_unreference(&obj->base);
> - return NULL;
> + goto unref_out;
> }
> } else
> obj->gtt_space = I915_GTT_RESERVED;
> @@ -385,8 +395,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
> -
> return obj;
> +
> +unref_out:
> + drm_gem_object_unreference(&obj->base);
> + drm_mm_put_block(stolen);
> + return NULL;
> }
>
> void
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index 88591ef..d8b56b7 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> /*
> * Basic range manager support (drm_mm.c)
> */
> -extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic);
> +extern int drm_mm_create_block(struct drm_mm *mm,
> + struct drm_mm_node *node,
> + unsigned long start,
> + unsigned long size);
> extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
> unsigned long size,
> unsigned alignment,
> @@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
> unsigned long start,
> unsigned long end,
> int atomic);
> +
Nitpick: This newline doesn't belong in this patch.
Cheers
David
> static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
> unsigned long size,
> unsigned alignment)
> --
> 1.8.3.2
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
^ permalink raw reply [flat|nested] 19+ messages in thread* [PATCH] [v3] drm: pre allocate node for create_block
2013-07-03 21:45 [PATCH 1/6] drm: pre allocate node for create_block Ben Widawsky
` (6 preceding siblings ...)
2013-07-04 9:22 ` David Herrmann
@ 2013-07-04 20:14 ` Ben Widawsky
2013-07-04 20:32 ` David Herrmann
7 siblings, 1 reply; 19+ messages in thread
From: Ben Widawsky @ 2013-07-04 20:14 UTC (permalink / raw)
To: Intel GFX; +Cc: Ben Widawsky, dri-devel
For an upcoming patch where we introduce the i915 VMA, it's ideal to
have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
will break a bunch of code, but amongst them are 2 callers of
drm_mm_create_block(), both related to stolen memory.
It also allows us to embed the drm_mm_node into the object currently
which provides a nice transition over to the new code.
v2: Reordered to do before ripping out obj->gtt_offset.
Some minor cleanups made available because of reordering.
v3: s/continue/break on failed stolen node allocation (David)
Set obj->gtt_space on failed node allocation (David)
Only unref stolen (fix double free) on failed create_stolen (David)
Free node, and NULL it in failed create_stolen (David)
Add back accidentally removed newline (David)
CC: <dri-devel@lists.freedesktop.org>
CC: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/drm_mm.c | 16 +++++----------
drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++++----
drivers/gpu/drm/i915/i915_gem_stolen.c | 37 +++++++++++++++++++++++++---------
include/drm/drm_mm.h | 9 +++++----
4 files changed, 53 insertions(+), 29 deletions(-)
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 07cf99c..9e8dfbc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic)
+int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
+ unsigned long start, unsigned long size)
{
- struct drm_mm_node *hole, *node;
+ struct drm_mm_node *hole;
unsigned long end = start + size;
unsigned long hole_start;
unsigned long hole_end;
@@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
if (hole_start > start || hole_end < end)
continue;
- node = drm_mm_kmalloc(mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
node->start = start;
node->size = size;
node->mm = mm;
@@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
- return node;
+ return 0;
}
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
- return NULL;
+ return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_create_block);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 66929ea..88180a5 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -629,14 +629,26 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ int ret;
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_offset,
- obj->base.size,
- false);
+ obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
+ if (!obj->gtt_space) {
+ DRM_ERROR("Failed to preserve object at offset %x\n",
+ obj->gtt_offset);
+ continue;
+ }
+ ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_space,
+ obj->gtt_offset,
+ obj->base.size);
+ if (ret) {
+ DRM_DEBUG_KMS("Reservation failed\n");
+ kfree(obj->gtt_space);
+ obj->gtt_space = NULL;
+ }
obj->has_global_gtt_mapping = 1;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e02344..fb19d00 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ int ret;
if (dev_priv->mm.stolen_base == 0)
return NULL;
@@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0))
return NULL;
- stolen = drm_mm_create_block(&dev_priv->mm.stolen,
- stolen_offset, size,
- false);
- if (stolen == NULL) {
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
+ size);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ kfree(stolen);
return NULL;
}
@@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* later.
*/
if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- gtt_offset, size,
- false);
- if (obj->gtt_space == NULL) {
+ obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
+ if (!obj->gtt_space) {
+ DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
+ goto unref_out;
+ }
+
+ ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
+ obj->gtt_space,
+ gtt_offset, size);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- drm_gem_object_unreference(&obj->base);
- return NULL;
+ goto free_out;
}
} else
obj->gtt_space = I915_GTT_RESERVED;
@@ -387,6 +397,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
return obj;
+
+free_out:
+ kfree(obj->gtt_space);
+ obj->gtt_space = NULL;
+unref_out:
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
}
void
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 88591ef..d8b56b7 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
/*
* Basic range manager support (drm_mm.c)
*/
-extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic);
+extern int drm_mm_create_block(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long start,
+ unsigned long size);
extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
@@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
unsigned long start,
unsigned long end,
int atomic);
+
static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
unsigned long size,
unsigned alignment)
--
1.8.3.2
^ permalink raw reply related [flat|nested] 19+ messages in thread* Re: [PATCH] [v3] drm: pre allocate node for create_block
2013-07-04 20:14 ` [PATCH] [v3] " Ben Widawsky
@ 2013-07-04 20:32 ` David Herrmann
2013-07-05 19:25 ` Daniel Vetter
0 siblings, 1 reply; 19+ messages in thread
From: David Herrmann @ 2013-07-04 20:32 UTC (permalink / raw)
To: Ben Widawsky; +Cc: Intel GFX, dri-devel@lists.freedesktop.org
Hi
On Thu, Jul 4, 2013 at 10:14 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> For an upcoming patch where we introduce the i915 VMA, it's ideal to
> have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> will break a bunch of code, but amongst them are 2 callers of
> drm_mm_create_block(), both related to stolen memory.
>
> It also allows us to embed the drm_mm_node into the object currently
> which provides a nice transition over to the new code.
>
> v2: Reordered to do before ripping out obj->gtt_offset.
> Some minor cleanups made available because of reordering.
>
> v3: s/continue/break on failed stolen node allocation (David)
> Set obj->gtt_space on failed node allocation (David)
> Only unref stolen (fix double free) on failed create_stolen (David)
> Free node, and NULL it in failed create_stolen (David)
> Add back accidentally removed newline (David)
>
> CC: <dri-devel@lists.freedesktop.org>
> CC: David Herrmann <dh.herrmann@gmail.com>
> Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
I already suspected that you'd embed drm_mm_node in a follow-up patch
but I am not subscribed to intel-gfx so I didn't get the other
patches. Looks good now.
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Cheers
David
> ---
> drivers/gpu/drm/drm_mm.c | 16 +++++----------
> drivers/gpu/drm/i915/i915_gem_gtt.c | 20 ++++++++++++++----
> drivers/gpu/drm/i915/i915_gem_stolen.c | 37 +++++++++++++++++++++++++---------
> include/drm/drm_mm.h | 9 +++++----
> 4 files changed, 53 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index 07cf99c..9e8dfbc 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -147,12 +147,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> }
> }
>
> -struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic)
> +int drm_mm_create_block(struct drm_mm *mm, struct drm_mm_node *node,
> + unsigned long start, unsigned long size)
> {
> - struct drm_mm_node *hole, *node;
> + struct drm_mm_node *hole;
> unsigned long end = start + size;
> unsigned long hole_start;
> unsigned long hole_end;
> @@ -161,10 +159,6 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> if (hole_start > start || hole_end < end)
> continue;
>
> - node = drm_mm_kmalloc(mm, atomic);
> - if (unlikely(node == NULL))
> - return NULL;
> -
> node->start = start;
> node->size = size;
> node->mm = mm;
> @@ -184,11 +178,11 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> node->hole_follows = 1;
> }
>
> - return node;
> + return 0;
> }
>
> WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
> - return NULL;
> + return -ENOSPC;
> }
> EXPORT_SYMBOL(drm_mm_create_block);
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 66929ea..88180a5 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -629,14 +629,26 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
>
> /* Mark any preallocated objects as occupied */
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> + int ret;
> DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> obj->gtt_offset, obj->base.size);
>
> BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_offset,
> - obj->base.size,
> - false);
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_ERROR("Failed to preserve object at offset %x\n",
> + obj->gtt_offset);
> + continue;
> + }
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + obj->gtt_offset,
> + obj->base.size);
> + if (ret) {
> + DRM_DEBUG_KMS("Reservation failed\n");
> + kfree(obj->gtt_space);
> + obj->gtt_space = NULL;
> + }
> obj->has_global_gtt_mapping = 1;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 8e02344..fb19d00 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> struct drm_i915_private *dev_priv = dev->dev_private;
> struct drm_i915_gem_object *obj;
> struct drm_mm_node *stolen;
> + int ret;
>
> if (dev_priv->mm.stolen_base == 0)
> return NULL;
> @@ -344,11 +345,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> if (WARN_ON(size == 0))
> return NULL;
>
> - stolen = drm_mm_create_block(&dev_priv->mm.stolen,
> - stolen_offset, size,
> - false);
> - if (stolen == NULL) {
> + stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
> + if (!stolen)
> + return NULL;
> +
> + ret = drm_mm_create_block(&dev_priv->mm.stolen, stolen, stolen_offset,
> + size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen space\n");
> + kfree(stolen);
> return NULL;
> }
>
> @@ -369,13 +374,18 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - gtt_offset, size,
> - false);
> - if (obj->gtt_space == NULL) {
> + obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> + if (!obj->gtt_space) {
> + DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> + goto unref_out;
> + }
> +
> + ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> + obj->gtt_space,
> + gtt_offset, size);
> + if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> - drm_gem_object_unreference(&obj->base);
> - return NULL;
> + goto free_out;
> }
> } else
> obj->gtt_space = I915_GTT_RESERVED;
> @@ -387,6 +397,13 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> return obj;
> +
> +free_out:
> + kfree(obj->gtt_space);
> + obj->gtt_space = NULL;
> +unref_out:
> + drm_gem_object_unreference(&obj->base);
> + return NULL;
> }
>
> void
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index 88591ef..d8b56b7 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -138,10 +138,10 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> /*
> * Basic range manager support (drm_mm.c)
> */
> -extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
> - unsigned long start,
> - unsigned long size,
> - bool atomic);
> +extern int drm_mm_create_block(struct drm_mm *mm,
> + struct drm_mm_node *node,
> + unsigned long start,
> + unsigned long size);
> extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
> unsigned long size,
> unsigned alignment,
> @@ -155,6 +155,7 @@ extern struct drm_mm_node *drm_mm_get_block_range_generic(
> unsigned long start,
> unsigned long end,
> int atomic);
> +
> static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
> unsigned long size,
> unsigned alignment)
> --
> 1.8.3.2
>
^ permalink raw reply [flat|nested] 19+ messages in thread* Re: [PATCH] [v3] drm: pre allocate node for create_block
2013-07-04 20:32 ` David Herrmann
@ 2013-07-05 19:25 ` Daniel Vetter
2013-07-05 19:44 ` Ben Widawsky
0 siblings, 1 reply; 19+ messages in thread
From: Daniel Vetter @ 2013-07-05 19:25 UTC (permalink / raw)
To: David Herrmann; +Cc: Ben Widawsky, Intel GFX, dri-devel@lists.freedesktop.org
On Thu, Jul 04, 2013 at 10:32:27PM +0200, David Herrmann wrote:
> Hi
>
> On Thu, Jul 4, 2013 at 10:14 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> > For an upcoming patch where we introduce the i915 VMA, it's ideal to
> > have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> > Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> > will break a bunch of code, but amongst them are 2 callers of
> > drm_mm_create_block(), both related to stolen memory.
> >
> > It also allows us to embed the drm_mm_node into the object currently
> > which provides a nice transition over to the new code.
> >
> > v2: Reordered to do before ripping out obj->gtt_offset.
> > Some minor cleanups made available because of reordering.
> >
> > v3: s/continue/break on failed stolen node allocation (David)
> > Set obj->gtt_space on failed node allocation (David)
> > Only unref stolen (fix double free) on failed create_stolen (David)
> > Free node, and NULL it in failed create_stolen (David)
> > Add back accidentally removed newline (David)
> >
> > CC: <dri-devel@lists.freedesktop.org>
> > CC: David Herrmann <dh.herrmann@gmail.com>
> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
>
> I already suspected that you'd embed drm_mm_node in a follow-up patch
> but I am not subscribed to intel-gfx so I didn't get the other
> patches. Looks good now.
>
> Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Ok, I've discussed this a bit with Dave on irc and he's a bit unhappy with
the creat_block name. After a bit of chatting with Ben I think we should
go with
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
The start/size/color arguments would then be passed in through the
drm_mm_node argument. Ben asked whether that's not too much poking around
in drm_mm_node internals, but imo those three pieces of it are part of the
public interface to drm_mm users.
Also, the patch as-is conflicts a bit too badly with my current tree. So
can you please apply the little bikeshed, rebase, and then rebase the
other patches in this series that I haven't merged on top of this?
Thanks, Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH] [v3] drm: pre allocate node for create_block
2013-07-05 19:25 ` Daniel Vetter
@ 2013-07-05 19:44 ` Ben Widawsky
0 siblings, 0 replies; 19+ messages in thread
From: Ben Widawsky @ 2013-07-05 19:44 UTC (permalink / raw)
To: Daniel Vetter; +Cc: Intel GFX, dri-devel@lists.freedesktop.org, David Herrmann
On Fri, Jul 05, 2013 at 09:25:35PM +0200, Daniel Vetter wrote:
> On Thu, Jul 04, 2013 at 10:32:27PM +0200, David Herrmann wrote:
> > Hi
> >
> > On Thu, Jul 4, 2013 at 10:14 PM, Ben Widawsky <ben@bwidawsk.net> wrote:
> > > For an upcoming patch where we introduce the i915 VMA, it's ideal to
> > > have the drm_mm_node as part of the VMA struct (ie. it's pre-allocated).
> > > Part of the conversion to VMAs is to kill off obj->gtt_space. Doing this
> > > will break a bunch of code, but amongst them are 2 callers of
> > > drm_mm_create_block(), both related to stolen memory.
> > >
> > > It also allows us to embed the drm_mm_node into the object currently
> > > which provides a nice transition over to the new code.
> > >
> > > v2: Reordered to do before ripping out obj->gtt_offset.
> > > Some minor cleanups made available because of reordering.
> > >
> > > v3: s/continue/break on failed stolen node allocation (David)
> > > Set obj->gtt_space on failed node allocation (David)
> > > Only unref stolen (fix double free) on failed create_stolen (David)
> > > Free node, and NULL it in failed create_stolen (David)
> > > Add back accidentally removed newline (David)
> > >
> > > CC: <dri-devel@lists.freedesktop.org>
> > > CC: David Herrmann <dh.herrmann@gmail.com>
> > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
> >
> > I already suspected that you'd embed drm_mm_node in a follow-up patch
> > but I am not subscribed to intel-gfx so I didn't get the other
> > patches. Looks good now.
> >
> > Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
>
> Ok, I've discussed this a bit with Dave on irc and he's a bit unhappy with
> the creat_block name. After a bit of chatting with Ben I think we should
> go with
>
> int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
>
> The start/size/color arguments would then be passed in through the
> drm_mm_node argument. Ben asked whether that's not too much poking around
> in drm_mm_node internals, but imo those three pieces of it are part of the
> public interface to drm_mm users.
Do you mind if I leave the patch as is, since it's reviewed, and put the
rename patch on top? I have a long history of screwing up simple
rebases.
>
> Also, the patch as-is conflicts a bit too badly with my current tree. So
> can you please apply the little bikeshed, rebase, and then rebase the
> other patches in this series that I haven't merged on top of this?
>
> Thanks, Daniel
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> +41 (0) 79 365 57 48 - http://blog.ffwll.ch
--
Ben Widawsky, Intel Open Source Technology Center
^ permalink raw reply [flat|nested] 19+ messages in thread