[Intel-gfx] [PATCH 2/6] drm/i915: Getter/setter for object attributes
Daniel Vetter
daniel at ffwll.ch
Thu Jul 4 00:55:51 CEST 2013
On Wed, Jul 03, 2013 at 02:45:22PM -0700, Ben Widawsky wrote:
> Soon we want to gut a lot of our existing assumptions how many address
> spaces an object can live in, and in doing so, embed the drm_mm_node in
> the object (and later the VMA).
>
> It's possible in the future we'll want to add more getter/setter
> methods, but for now this is enough to enable the VMAs.
>
> v2: Reworked commit message (Ben)
> Added comments to the main functions (Ben)
> sed -i "s/i915_gem_obj_set_color/i915_gem_obj_ggtt_set_color/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_bound/i915_gem_obj_ggtt_bound/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_size/i915_gem_obj_ggtt_size/" drivers/gpu/drm/i915/*.[ch]
> sed -i "s/i915_gem_obj_offset/i915_gem_obj_ggtt_offset/" drivers/gpu/drm/i915/*.[ch]
> (Daniel)
>
> Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
[snip]
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index fd0f589..496ed3a 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1227,7 +1227,7 @@ struct drm_i915_gem_object {
> const struct drm_i915_gem_object_ops *ops;
>
> /** Current space allocated to this object in the GTT, if any. */
> - struct drm_mm_node *gtt_space;
> + struct drm_mm_node *ggtt_space;
Is this ...
> /** Stolen memory for this object, instead of being backed by shmem. */
> struct drm_mm_node *stolen;
> struct list_head global_list;
> @@ -1333,7 +1333,7 @@ struct drm_i915_gem_object {
> *
> * This is the same as gtt_space->start
> */
> - uint32_t gtt_offset;
> + uint32_t ggtt_offset;
... and this intentional? Feels a bit like needless churn if we move the
entire thing into the vma rsn anyway. I can bikeshed while applying if
you're ok, need a solid testcase to improve my patch apply scripts anyway
;-)
Cheers, Daniel
>
> struct intel_ring_buffer *ring;
>
> @@ -1360,6 +1360,37 @@ struct drm_i915_gem_object {
>
> #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
>
> +/* Offset of the first PTE pointing to this object */
> +static inline unsigned long
> +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space->start;
> +}
> +
> +/* Whether or not this object is currently mapped by the translation tables */
> +static inline bool
> +i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space != NULL;
> +}
> +
> +/* The size used in the translation tables may be larger than the actual size of
> + * the object on GEN2/GEN3 because of the way tiling is handled. See
> + * i915_gem_get_gtt_size() for more details.
> + */
> +static inline unsigned long
> +i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
> +{
> + return o->ggtt_space->size;
> +}
> +
> +static inline void
> +i915_gem_obj_ggtt_set_color(struct drm_i915_gem_object *o,
> + enum i915_cache_level color)
> +{
> + o->ggtt_space->color = color;
> +}
> +
> /**
> * Request queue structure.
> *
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4200c32..edd5b6d 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
> static inline bool
> i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
> {
> - return obj->gtt_space && !obj->active;
> + return i915_gem_obj_ggtt_bound(obj) && !obj->active;
> }
>
> int
> @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
> mutex_lock(&dev->struct_mutex);
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
> if (obj->pin_count)
> - pinned += obj->gtt_space->size;
> + pinned += i915_gem_obj_ggtt_size(obj);
> mutex_unlock(&dev->struct_mutex);
>
> args->aper_size = dev_priv->gtt.total;
> @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
> * anyway again before the next pread happens. */
> if (obj->cache_level == I915_CACHE_NONE)
> needs_clflush = 1;
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_set_to_gtt_domain(obj, false);
> if (ret)
> return ret;
> @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
> user_data = to_user_ptr(args->data_ptr);
> remain = args->size;
>
> - offset = obj->gtt_offset + args->offset;
> + offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
>
> while (remain > 0) {
> /* Operation in this page
> @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
> * right away and we therefore have to clflush anyway. */
> if (obj->cache_level == I915_CACHE_NONE)
> needs_clflush_after = 1;
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_set_to_gtt_domain(obj, true);
> if (ret)
> return ret;
> @@ -1360,8 +1360,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>
> obj->fault_mappable = true;
>
> - pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
> - page_offset;
> + pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
> + pfn >>= PAGE_SHIFT;
> + pfn += page_offset;
>
> /* Finally, remap it using the new GTT offset */
> ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
> @@ -1667,7 +1668,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> if (obj->pages == NULL)
> return 0;
>
> - BUG_ON(obj->gtt_space);
> + BUG_ON(i915_gem_obj_ggtt_bound(obj));
>
> if (obj->pages_pin_count)
> return -EBUSY;
> @@ -2117,8 +2118,8 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
>
> static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
> {
> - if (acthd >= obj->gtt_offset &&
> - acthd < obj->gtt_offset + obj->base.size)
> + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> return true;
>
> return false;
> @@ -2176,11 +2177,11 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
>
> if (ring->hangcheck.action != wait &&
> i915_request_guilty(request, acthd, &inside)) {
> - DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
> + DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
> ring->name,
> inside ? "inside" : "flushing",
> request->batch_obj ?
> - request->batch_obj->gtt_offset : 0,
> + i915_gem_obj_ggtt_offset(request->batch_obj) : 0,
> request->ctx ? request->ctx->id : 0,
> acthd);
>
> @@ -2581,7 +2582,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
> int ret;
>
> - if (obj->gtt_space == NULL)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return 0;
>
> if (obj->pin_count)
> @@ -2620,9 +2621,9 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
> /* Avoid an unnecessary call to unbind on rebind. */
> obj->map_and_fenceable = true;
>
> - drm_mm_put_block(obj->gtt_space);
> - obj->gtt_space = NULL;
> - obj->gtt_offset = 0;
> + drm_mm_put_block(obj->ggtt_space);
> + obj->ggtt_space = NULL;
> + obj->ggtt_offset = 0;
>
> return 0;
> }
> @@ -2664,11 +2665,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
> }
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
>
> - val = (uint64_t)((obj->gtt_offset + size - 4096) &
> + val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
> 0xfffff000) << 32;
> - val |= obj->gtt_offset & 0xfffff000;
> + val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
> val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I965_FENCE_TILING_Y_SHIFT;
> @@ -2688,15 +2689,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> u32 val;
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
> int pitch_val;
> int tile_width;
>
> - WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
> + WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
> (size & -size) != size ||
> - (obj->gtt_offset & (size - 1)),
> - "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> - obj->gtt_offset, obj->map_and_fenceable, size);
> + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> + "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
> + i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
>
> if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
> tile_width = 128;
> @@ -2707,7 +2708,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
> pitch_val = obj->stride / tile_width;
> pitch_val = ffs(pitch_val) - 1;
>
> - val = obj->gtt_offset;
> + val = i915_gem_obj_ggtt_offset(obj);
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> val |= I915_FENCE_SIZE_BITS(size);
> @@ -2732,19 +2733,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
> uint32_t val;
>
> if (obj) {
> - u32 size = obj->gtt_space->size;
> + u32 size = i915_gem_obj_ggtt_size(obj);
> uint32_t pitch_val;
>
> - WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
> + WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
> (size & -size) != size ||
> - (obj->gtt_offset & (size - 1)),
> - "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
> - obj->gtt_offset, size);
> + (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
> + "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
> + i915_gem_obj_ggtt_offset(obj), size);
>
> pitch_val = obj->stride / 128;
> pitch_val = ffs(pitch_val) - 1;
>
> - val = obj->gtt_offset;
> + val = i915_gem_obj_ggtt_offset(obj);
> if (obj->tiling_mode == I915_TILING_Y)
> val |= 1 << I830_FENCE_TILING_Y_SHIFT;
> val |= I830_FENCE_SIZE_BITS(size);
> @@ -3033,8 +3034,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
>
> if (obj->cache_level != obj->gtt_space->color) {
> printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
> - obj->gtt_space->start,
> - obj->gtt_space->start + obj->gtt_space->size,
> + i915_gem_obj_ggtt_offset(obj),
> + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> obj->cache_level,
> obj->gtt_space->color);
> err++;
> @@ -3045,8 +3046,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
> obj->gtt_space,
> obj->cache_level)) {
> printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
> - obj->gtt_space->start,
> - obj->gtt_space->start + obj->gtt_space->size,
> + i915_gem_obj_ggtt_offset(obj),
> + i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
> obj->cache_level);
> err++;
> continue;
> @@ -3151,15 +3152,15 @@ search_free:
> list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
>
> - obj->gtt_space = node;
> - obj->gtt_offset = node->start;
> + obj->ggtt_space = node;
> + obj->ggtt_offset = node->start;
>
> fenceable =
> node->size == fence_size &&
> (node->start & (fence_alignment - 1)) == 0;
>
> - mappable =
> - obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
> + mappable = i915_gem_obj_ggtt_offset(obj) + obj->base.size <=
> + dev_priv->gtt.mappable_end;
>
> obj->map_and_fenceable = mappable && fenceable;
>
> @@ -3261,7 +3262,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
> int ret;
>
> /* Not valid to be called on unbound objects. */
> - if (obj->gtt_space == NULL)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return -EINVAL;
>
> if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
> @@ -3320,13 +3321,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> return -EBUSY;
> }
>
> - if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
> + if (!i915_gem_valid_gtt_space(dev, obj->ggtt_space, cache_level)) {
> ret = i915_gem_object_unbind(obj);
> if (ret)
> return ret;
> }
>
> - if (obj->gtt_space) {
> + if (i915_gem_obj_ggtt_bound(obj)) {
> ret = i915_gem_object_finish_gpu(obj);
> if (ret)
> return ret;
> @@ -3349,7 +3350,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
> obj, cache_level);
>
> - obj->gtt_space->color = cache_level;
> + i915_gem_obj_ggtt_set_color(obj, cache_level);
> }
>
> if (cache_level == I915_CACHE_NONE) {
> @@ -3630,14 +3631,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
> return -EBUSY;
>
> - if (obj->gtt_space != NULL) {
> - if ((alignment && obj->gtt_offset & (alignment - 1)) ||
> + if (i915_gem_obj_ggtt_bound(obj)) {
> + if ((alignment && i915_gem_obj_ggtt_offset(obj) & (alignment - 1)) ||
> (map_and_fenceable && !obj->map_and_fenceable)) {
> WARN(obj->pin_count,
> "bo is already pinned with incorrect alignment:"
> - " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
> + " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
> " obj->map_and_fenceable=%d\n",
> - obj->gtt_offset, alignment,
> + i915_gem_obj_ggtt_offset(obj), alignment,
> map_and_fenceable,
> obj->map_and_fenceable);
> ret = i915_gem_object_unbind(obj);
> @@ -3646,7 +3647,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
> }
> }
>
> - if (obj->gtt_space == NULL) {
> + if (!i915_gem_obj_ggtt_bound(obj)) {
> struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
>
> ret = i915_gem_object_bind_to_gtt(obj, alignment,
> @@ -3672,7 +3673,7 @@ void
> i915_gem_object_unpin(struct drm_i915_gem_object *obj)
> {
> BUG_ON(obj->pin_count == 0);
> - BUG_ON(obj->gtt_space == NULL);
> + BUG_ON(!i915_gem_obj_ggtt_bound(obj));
>
> if (--obj->pin_count == 0)
> obj->pin_mappable = false;
> @@ -3722,7 +3723,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
> * as the X server doesn't manage domains yet
> */
> i915_gem_object_flush_cpu_write_domain(obj);
> - args->offset = obj->gtt_offset;
> + args->offset = i915_gem_obj_ggtt_offset(obj);
> out:
> drm_gem_object_unreference(&obj->base);
> unlock:
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 51b7a21..2074544 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -377,7 +377,7 @@ mi_set_context(struct intel_ring_buffer *ring,
>
> intel_ring_emit(ring, MI_NOOP);
> intel_ring_emit(ring, MI_SET_CONTEXT);
> - intel_ring_emit(ring, new_context->obj->gtt_offset |
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
> MI_MM_SPACE_GTT |
> MI_SAVE_EXT_STATE_EN |
> MI_RESTORE_EXT_STATE_EN |
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index c86d5d9..5bbdea4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -38,7 +38,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
> return false;
>
> list_add(&obj->exec_list, unwind);
> - return drm_mm_scan_add_block(obj->gtt_space);
> + return drm_mm_scan_add_block(obj->ggtt_space);
> }
>
> int
> @@ -107,7 +107,7 @@ none:
> struct drm_i915_gem_object,
> exec_list);
>
> - ret = drm_mm_scan_remove_block(obj->gtt_space);
> + ret = drm_mm_scan_remove_block(obj->ggtt_space);
> BUG_ON(ret);
>
> list_del_init(&obj->exec_list);
> @@ -127,7 +127,7 @@ found:
> obj = list_first_entry(&unwind_list,
> struct drm_i915_gem_object,
> exec_list);
> - if (drm_mm_scan_remove_block(obj->gtt_space)) {
> + if (drm_mm_scan_remove_block(obj->ggtt_space)) {
> list_move(&obj->exec_list, &eviction_list);
> drm_gem_object_reference(&obj->base);
> continue;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 87a3227..5aeb447 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> return -ENOENT;
>
> target_i915_obj = to_intel_bo(target_obj);
> - target_offset = target_i915_obj->gtt_offset;
> + target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
>
> /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
> * pipe_control writes because the gpu doesn't properly redirect them
> @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
> return ret;
>
> /* Map the page containing the relocation we're going to perform. */
> - reloc->offset += obj->gtt_offset;
> + reloc->offset += i915_gem_obj_ggtt_offset(obj);
> reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> reloc->offset & PAGE_MASK);
> reloc_entry = (uint32_t __iomem *)
> @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
> obj->has_aliasing_ppgtt_mapping = 1;
> }
>
> - if (entry->offset != obj->gtt_offset) {
> - entry->offset = obj->gtt_offset;
> + if (entry->offset != i915_gem_obj_ggtt_offset(obj)) {
> + entry->offset = i915_gem_obj_ggtt_offset(obj);
> *need_reloc = true;
> }
>
> @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
> {
> struct drm_i915_gem_exec_object2 *entry;
>
> - if (!obj->gtt_space)
> + if (!i915_gem_obj_ggtt_bound(obj))
> return;
>
> entry = obj->exec_entry;
> @@ -530,7 +530,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
> bool need_fence, need_mappable;
>
> - if (!obj->gtt_space)
> + if (!i915_gem_obj_ggtt_bound(obj))
> continue;
>
> need_fence =
> @@ -539,7 +539,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
> obj->tiling_mode != I915_TILING_NONE;
> need_mappable = need_fence || need_reloc_mappable(obj);
>
> - if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
> + if ((entry->alignment &&
> + i915_gem_obj_ggtt_offset(obj) & (entry->alignment - 1)) ||
> (need_mappable && !obj->map_and_fenceable))
> ret = i915_gem_object_unbind(obj);
> else
> @@ -550,7 +551,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
>
> /* Bind fresh objects */
> list_for_each_entry(obj, objects, exec_list) {
> - if (obj->gtt_space)
> + if (i915_gem_obj_ggtt_bound(obj))
> continue;
>
> ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
> @@ -1058,7 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
> goto err;
> }
>
> - exec_start = batch_obj->gtt_offset + args->batch_start_offset;
> + exec_start = i915_gem_obj_ggtt_offset(batch_obj) + args->batch_start_offset;
> exec_len = args->batch_len;
> if (cliprects) {
> for (i = 0; i < args->num_cliprects; i++) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 5c6fc0e..1eefba7 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -378,7 +378,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
> enum i915_cache_level cache_level)
> {
> ppgtt->insert_entries(ppgtt, obj->pages,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> cache_level);
> }
>
> @@ -386,7 +386,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
> struct drm_i915_gem_object *obj)
> {
> ppgtt->clear_range(ppgtt,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> obj->base.size >> PAGE_SHIFT);
> }
>
> @@ -551,7 +551,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
> struct drm_i915_private *dev_priv = dev->dev_private;
>
> dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> cache_level);
>
> obj->has_global_gtt_mapping = 1;
> @@ -563,7 +563,7 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
> struct drm_i915_private *dev_priv = dev->dev_private;
>
> dev_priv->gtt.gtt_clear_range(obj->base.dev,
> - obj->gtt_space->start >> PAGE_SHIFT,
> + i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
> obj->base.size >> PAGE_SHIFT);
>
> obj->has_global_gtt_mapping = 0;
> @@ -630,22 +630,22 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
> /* Mark any preallocated objects as occupied */
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> int ret;
> - DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
> - obj->gtt_offset, obj->base.size);
> + DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
> + i915_gem_obj_ggtt_offset(obj), obj->base.size);
>
> - BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
> - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> - if (!obj->gtt_space) {
> + BUG_ON(obj->ggtt_space != I915_GTT_RESERVED);
> + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> + if (!obj->ggtt_space) {
> DRM_ERROR("Failed to preserve all objects\n");
> break;
> }
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_space,
> - obj->gtt_offset,
> + obj->ggtt_space,
> + i915_gem_obj_ggtt_offset(obj),
> obj->base.size);
> if (ret) {
> DRM_DEBUG_KMS("Reservation failed\n");
> - kfree(obj->gtt_space);
> + kfree(obj->ggtt_space);
> }
> obj->has_global_gtt_mapping = 1;
> }
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index f9db84a..cf0d0e0 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -374,23 +374,23 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
> * later.
> */
> if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
> - obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
> - if (!obj->gtt_space) {
> + obj->ggtt_space = kzalloc(sizeof(*obj->ggtt_space), GFP_KERNEL);
> + if (!obj->ggtt_space) {
> DRM_DEBUG_KMS("-ENOMEM stolen GTT space\n");
> goto unref_out;
> }
>
> ret = drm_mm_create_block(&dev_priv->mm.gtt_space,
> - obj->gtt_space,
> + obj->ggtt_space,
> gtt_offset, size);
> if (ret) {
> DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
> goto unref_out;
> }
> } else
> - obj->gtt_space = I915_GTT_RESERVED;
> + obj->ggtt_space = I915_GTT_RESERVED;
>
> - obj->gtt_offset = gtt_offset;
> + obj->ggtt_offset = gtt_offset;
> obj->has_global_gtt_mapping = 1;
>
> list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 537545b..92a8d27 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
> return true;
>
> if (INTEL_INFO(obj->base.dev)->gen == 3) {
> - if (obj->gtt_offset & ~I915_FENCE_START_MASK)
> + if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
> return false;
> } else {
> - if (obj->gtt_offset & ~I830_FENCE_START_MASK)
> + if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
> return false;
> }
>
> size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
> - if (obj->gtt_space->size != size)
> + if (i915_gem_obj_ggtt_size(obj) != size)
> return false;
>
> - if (obj->gtt_offset & (size - 1))
> + if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
> return false;
>
> return true;
> @@ -359,8 +359,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> */
>
> obj->map_and_fenceable =
> - obj->gtt_space == NULL ||
> - (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
> + !i915_gem_obj_ggtt_bound(obj) ||
> + (i915_gem_obj_ggtt_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end &&
> i915_gem_object_fence_ok(obj, args->tiling_mode));
>
> /* Rebind if we need a change of alignment */
> @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
> i915_gem_get_gtt_alignment(dev, obj->base.size,
> args->tiling_mode,
> false);
> - if (obj->gtt_offset & (unfenced_alignment - 1))
> + if (i915_gem_obj_ggtt_offset(obj) & (unfenced_alignment - 1))
> ret = i915_gem_object_unbind(obj);
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 4c1b1e3..d2cf26f 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -1516,7 +1516,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> if (dst == NULL)
> return NULL;
>
> - reloc_offset = src->gtt_offset;
> + reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
> for (i = 0; i < num_pages; i++) {
> unsigned long flags;
> void *d;
> @@ -1568,7 +1568,6 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
> reloc_offset += PAGE_SIZE;
> }
> dst->page_count = num_pages;
> - dst->gtt_offset = src->gtt_offset;
>
> return dst;
>
> @@ -1622,7 +1621,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
> err->name = obj->base.name;
> err->rseqno = obj->last_read_seqno;
> err->wseqno = obj->last_write_seqno;
> - err->gtt_offset = obj->gtt_offset;
> + err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> err->read_domains = obj->base.read_domains;
> err->write_domain = obj->base.write_domain;
> err->fence_reg = obj->fence_reg;
> @@ -1720,8 +1719,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
> return NULL;
>
> obj = ring->private;
> - if (acthd >= obj->gtt_offset &&
> - acthd < obj->gtt_offset + obj->base.size)
> + if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
> + acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
> return i915_error_object_create(dev_priv, obj);
> }
>
> @@ -1802,7 +1801,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
> return;
>
> list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
> - if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
> + if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
> ering->ctx = i915_error_object_create_sized(dev_priv,
> obj, 1);
> break;
> @@ -2156,10 +2155,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
> if (INTEL_INFO(dev)->gen >= 4) {
> int dspsurf = DSPSURF(intel_crtc->plane);
> stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
> - obj->gtt_offset;
> + i915_gem_obj_ggtt_offset(obj);
> } else {
> int dspaddr = DSPADDR(intel_crtc->plane);
> - stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
> + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
> crtc->y * crtc->fb->pitches[0] +
> crtc->x * crtc->fb->bits_per_pixel/8);
> }
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 3db4a68..7d283b5 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind,
>
> TP_fast_assign(
> __entry->obj = obj;
> - __entry->offset = obj->gtt_space->start;
> - __entry->size = obj->gtt_space->size;
> + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> + __entry->size = i915_gem_obj_ggtt_size(obj);
> __entry->mappable = mappable;
> ),
>
> @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind,
>
> TP_fast_assign(
> __entry->obj = obj;
> - __entry->offset = obj->gtt_space->start;
> - __entry->size = obj->gtt_space->size;
> + __entry->offset = i915_gem_obj_ggtt_offset(obj);
> + __entry->size = i915_gem_obj_ggtt_size(obj);
> ),
>
> TP_printk("obj=%p, offset=%08x size=%x",
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 6b0013c..f7cacc0 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -1980,16 +1980,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
> intel_crtc->dspaddr_offset = linear_offset;
> }
>
> - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> + fb->pitches[0]);
> I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> if (INTEL_INFO(dev)->gen >= 4) {
> I915_MODIFY_DISPBASE(DSPSURF(plane),
> - obj->gtt_offset + intel_crtc->dspaddr_offset);
> + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
> I915_WRITE(DSPLINOFF(plane), linear_offset);
> } else
> - I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
> + I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
> POSTING_READ(reg);
>
> return 0;
> @@ -2069,11 +2070,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
> fb->pitches[0]);
> linear_offset -= intel_crtc->dspaddr_offset;
>
> - DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
> - obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
> + DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
> + i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
> + fb->pitches[0]);
> I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
> I915_MODIFY_DISPBASE(DSPSURF(plane),
> - obj->gtt_offset + intel_crtc->dspaddr_offset);
> + i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> if (IS_HASWELL(dev)) {
> I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
> } else {
> @@ -6566,7 +6568,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
> goto fail_unpin;
> }
>
> - addr = obj->gtt_offset;
> + addr = i915_gem_obj_ggtt_offset(obj);
> } else {
> int align = IS_I830(dev) ? 16 * 1024 : 256;
> ret = i915_gem_attach_phys_object(dev, obj,
> @@ -7338,7 +7340,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, 0); /* aux display base address, unused */
>
> intel_mark_page_flip_active(intel_crtc);
> @@ -7379,7 +7381,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, MI_NOOP);
>
> intel_mark_page_flip_active(intel_crtc);
> @@ -7419,7 +7421,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0]);
> intel_ring_emit(ring,
> - (obj->gtt_offset + intel_crtc->dspaddr_offset) |
> + (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
> obj->tiling_mode);
>
> /* XXX Enabling the panel-fitter across page-flip is so far
> @@ -7462,7 +7464,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
> intel_ring_emit(ring, MI_DISPLAY_FLIP |
> MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
> intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
>
> /* Contrary to the suggestions in the documentation,
> * "Enable Panel Fitter" does not seem to be required when page
> @@ -7527,7 +7529,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>
> intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
> intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
> - intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
> intel_ring_emit(ring, (MI_NOOP));
>
> intel_mark_page_flip_active(intel_crtc);
> diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
> index dff669e..f3c97e0 100644
> --- a/drivers/gpu/drm/i915/intel_fb.c
> +++ b/drivers/gpu/drm/i915/intel_fb.c
> @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
> info->apertures->ranges[0].base = dev->mode_config.fb_base;
> info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
>
> - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
> + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
> info->fix.smem_len = size;
>
> info->screen_base =
> - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> size);
> if (!info->screen_base) {
> ret = -ENOSPC;
> @@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
>
> /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
>
> - DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
> + DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
> fb->width, fb->height,
> - obj->gtt_offset, obj);
> + i915_gem_obj_ggtt_offset(obj), obj);
>
>
> mutex_unlock(&dev->struct_mutex);
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index a369881..81c3ca1 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
> regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
> else
> regs = io_mapping_map_wc(dev_priv->gtt.mappable,
> - overlay->reg_bo->gtt_offset);
> + i915_gem_obj_ggtt_offset(overlay->reg_bo));
>
> return regs;
> }
> @@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> swidth = params->src_w;
> swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
> sheight = params->src_h;
> - iowrite32(new_bo->gtt_offset + params->offset_Y, ®s->OBUF_0Y);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, ®s->OBUF_0Y);
> ostride = params->stride_Y;
>
> if (params->format & I915_OVERLAY_YUV_PLANAR) {
> @@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
> params->src_w/uv_hscale);
> swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
> sheight |= (params->src_h/uv_vscale) << 16;
> - iowrite32(new_bo->gtt_offset + params->offset_U, ®s->OBUF_0U);
> - iowrite32(new_bo->gtt_offset + params->offset_V, ®s->OBUF_0V);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, ®s->OBUF_0U);
> + iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, ®s->OBUF_0V);
> ostride |= params->stride_UV << 16;
> }
>
> @@ -1355,7 +1355,7 @@ void intel_setup_overlay(struct drm_device *dev)
> DRM_ERROR("failed to pin overlay register bo\n");
> goto out_free_bo;
> }
> - overlay->flip_addr = reg_bo->gtt_offset;
> + overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
>
> ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
> if (ret) {
> @@ -1435,7 +1435,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
> overlay->reg_bo->phys_obj->handle->vaddr;
> else
> regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
> - overlay->reg_bo->gtt_offset);
> + i915_gem_obj_ggtt_offset(overlay->reg_bo));
>
> return regs;
> }
> @@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
> if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
> error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
> else
> - error->base = overlay->reg_bo->gtt_offset;
> + error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
>
> regs = intel_overlay_map_regs_atomic(overlay);
> if (!regs)
> diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
> index 5b4ade6..d06648d 100644
> --- a/drivers/gpu/drm/i915/intel_pm.c
> +++ b/drivers/gpu/drm/i915/intel_pm.c
> @@ -218,7 +218,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
> (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
> I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
> - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
> + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
> /* enable it... */
> I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
>
> @@ -275,7 +275,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
> struct drm_i915_gem_object *obj = intel_fb->obj;
> struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>
> - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
> + I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
>
> I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
> IVB_DPFC_CTL_FENCE_EN |
> @@ -3707,7 +3707,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
>
> intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
> intel_ring_emit(ring, MI_SET_CONTEXT);
> - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
> + intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
> MI_MM_SPACE_GTT |
> MI_SAVE_EXT_STATE_EN |
> MI_RESTORE_EXT_STATE_EN |
> @@ -3730,7 +3730,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
> return;
> }
>
> - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
> + I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
> I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
> }
>
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index e51ab55..54495df 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
> * registers with the above sequence (the readback of the HEAD registers
> * also enforces ordering), otherwise the hw might lose the new ring
> * register values. */
> - I915_WRITE_START(ring, obj->gtt_offset);
> + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
> I915_WRITE_CTL(ring,
> ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
> | RING_VALID);
>
> /* If the head is still not zero, the ring is dead */
> if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
> - I915_READ_START(ring) == obj->gtt_offset &&
> + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
> (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
> DRM_ERROR("%s initialization failed "
> "ctl %08x head %08x tail %08x start %08x\n",
> @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
> if (ret)
> goto err_unref;
>
> - pc->gtt_offset = obj->gtt_offset;
> + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
> pc->cpu_page = kmap(sg_page(obj->pages->sgl));
> if (pc->cpu_page == NULL) {
> ret = -ENOMEM;
> @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
> intel_ring_advance(ring);
> } else {
> struct drm_i915_gem_object *obj = ring->private;
> - u32 cs_offset = obj->gtt_offset;
> + u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
>
> if (len > I830_BATCH_LIMIT)
> return -ENOSPC;
> @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
> goto err_unref;
> }
>
> - ring->status_page.gfx_addr = obj->gtt_offset;
> + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
> ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
> if (ring->status_page.page_addr == NULL) {
> ret = -ENOMEM;
> @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
> goto err_unpin;
>
> ring->virtual_start =
> - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
> + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
> ring->size);
> if (ring->virtual_start == NULL) {
> DRM_ERROR("Failed to map ringbuffer.\n");
> diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
> index 1fa5612..55bdf70 100644
> --- a/drivers/gpu/drm/i915/intel_sprite.c
> +++ b/drivers/gpu/drm/i915/intel_sprite.c
> @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
>
> I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
> I915_WRITE(SPCNTR(pipe, plane), sprctl);
> - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
> + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
> sprsurf_offset);
> POSTING_READ(SPSURF(pipe, plane));
> }
> @@ -308,7 +308,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> if (intel_plane->can_scale)
> I915_WRITE(SPRSCALE(pipe), sprscale);
> I915_WRITE(SPRCTL(pipe), sprctl);
> - I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
> + I915_MODIFY_DISPBASE(SPRSURF(pipe),
> + i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
> POSTING_READ(SPRSURF(pipe));
>
> /* potentially re-enable LP watermarks */
> @@ -478,7 +479,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
> I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
> I915_WRITE(DVSSCALE(pipe), dvsscale);
> I915_WRITE(DVSCNTR(pipe), dvscntr);
> - I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
> + I915_MODIFY_DISPBASE(DVSSURF(pipe),
> + i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
> POSTING_READ(DVSSURF(pipe));
> }
>
> --
> 1.8.3.2
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
More information about the Intel-gfx
mailing list