[Intel-gfx] [PATCH 1/2] drm/i915: introduce to_intel_bo helper

Daniel Vetter daniel.vetter at ffwll.ch
Mon Mar 8 13:35:02 CET 2010


This is a purely cosmetic change to make changes in this area easier.
And hey, it's not only clearer and typechecked, but actually shorter,
too!

Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
 drivers/gpu/drm/i915/i915_debugfs.c    |    2 +-
 drivers/gpu/drm/i915/i915_drv.c        |    2 +-
 drivers/gpu/drm/i915/i915_drv.h        |    2 +
 drivers/gpu/drm/i915/i915_gem.c        |  132 ++++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_debug.c  |    4 +-
 drivers/gpu/drm/i915/i915_gem_tiling.c |   10 +-
 drivers/gpu/drm/i915/i915_irq.c        |    2 +-
 drivers/gpu/drm/i915/intel_display.c   |   28 ++++----
 drivers/gpu/drm/i915/intel_fb.c        |    2 +-
 drivers/gpu/drm/i915/intel_overlay.c   |    6 +-
 10 files changed, 96 insertions(+), 94 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1376dfe..bb3a4a8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -225,7 +225,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 		} else {
 			struct drm_i915_gem_object *obj_priv;
 
-			obj_priv = obj->driver_private;
+			obj_priv = to_intel_bo(obj);
 			seq_printf(m, "Fenced object[%2d] = %p: %s "
 				   "%08x %08zx %08x %s %08x %08x %d",
 				   i, obj, get_pin_flag(obj_priv),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 85ad020..02de6ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -361,7 +361,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
 	    !dev_priv->mm.suspended) {
 		drm_i915_ring_buffer_t *ring = &dev_priv->ring;
 		struct drm_gem_object *obj = ring->ring_obj;
-		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 		dev_priv->mm.suspended = 0;
 
 		/* Stop the ring if it's running. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f975926..0a526d9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -730,6 +730,8 @@ struct drm_i915_gem_object {
 	atomic_t pending_flip;
 };
 
+#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
+
 /**
  * Request queue structure.
  *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fba37e9..2c68ffb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -162,7 +162,7 @@ fast_shmem_read(struct page **pages,
 static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
 {
 	drm_i915_private_t *dev_priv = obj->dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
 		obj_priv->tiling_mode != I915_TILING_NONE;
@@ -263,7 +263,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
 			  struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	ssize_t remain;
 	loff_t offset, page_base;
 	char __user *user_data;
@@ -284,7 +284,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret != 0)
 		goto fail_put_pages;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
@@ -353,7 +353,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
 			  struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
@@ -402,7 +402,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret != 0)
 		goto fail_put_pages;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
@@ -478,7 +478,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 		return -EBADF;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check source.
 	 *
@@ -580,7 +580,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
 			 struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t offset, page_base;
@@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret)
 		goto fail;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = obj_priv->gtt_offset + args->offset;
 
 	while (remain > 0) {
@@ -654,7 +654,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
 			 struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t gtt_page_base, offset;
@@ -698,7 +698,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret)
 		goto out_unpin_object;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = obj_priv->gtt_offset + args->offset;
 
 	while (remain > 0) {
@@ -760,7 +760,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
 			   struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	ssize_t remain;
 	loff_t offset, page_base;
 	char __user *user_data;
@@ -780,7 +780,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret != 0)
 		goto fail_put_pages;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 	obj_priv->dirty = 1;
 
@@ -828,7 +828,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
 			   struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
@@ -876,7 +876,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 	if (ret != 0)
 		goto fail_put_pages;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 	obj_priv->dirty = 1;
 
@@ -951,7 +951,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 		return -EBADF;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check destination.
 	 *
@@ -1033,7 +1033,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 		return -EBADF;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -1095,7 +1095,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 	DRM_INFO("%s: sw_finish %d (%p %zd)\n",
 		 __func__, args->handle, obj, obj->size);
 #endif
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	/* Pinned buffers may be scanout, so flush the cache */
 	if (obj_priv->pin_count)
@@ -1166,7 +1166,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	struct drm_gem_object *obj = vma->vm_private_data;
 	struct drm_device *dev = obj->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	pgoff_t page_offset;
 	unsigned long pfn;
 	int ret = 0;
@@ -1233,7 +1233,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_map_list *list;
 	struct drm_local_map *map;
 	int ret = 0;
@@ -1304,7 +1304,7 @@ void
 i915_gem_release_mmap(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if (dev->dev_mapping)
 		unmap_mapping_range(dev->dev_mapping,
@@ -1315,7 +1315,7 @@ static void
 i915_gem_free_mmap_offset(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_gem_mm *mm = dev->mm_private;
 	struct drm_map_list *list;
 
@@ -1346,7 +1346,7 @@ static uint32_t
 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int start, i;
 
 	/*
@@ -1405,7 +1405,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 
 	mutex_lock(&dev->struct_mutex);
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1449,7 +1449,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 void
 i915_gem_object_put_pages(struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page_count = obj->size / PAGE_SIZE;
 	int i;
 
@@ -1488,7 +1488,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	/* Add a reference if we're newly entering the active list. */
 	if (!obj_priv->active) {
@@ -1508,7 +1508,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	BUG_ON(!obj_priv->active);
 	list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1519,7 +1519,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
 static void
 i915_gem_object_truncate(struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct inode *inode;
 
 	inode = obj->filp->f_path.dentry->d_inode;
@@ -1540,7 +1540,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 	if (obj_priv->pin_count != 0)
@@ -1967,7 +1967,7 @@ static int
 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
 	/* This function only exists to support waiting for existing rendering,
@@ -1999,7 +1999,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret = 0;
 
 #if WATCH_BUF
@@ -2175,7 +2175,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 #if WATCH_LRU
 			DRM_INFO("%s: evicting %p\n", __func__, obj);
 #endif
-			obj_priv = obj->driver_private;
+			obj_priv = to_intel_bo(obj);
 			BUG_ON(obj_priv->pin_count != 0);
 			BUG_ON(obj_priv->active);
 
@@ -2251,7 +2251,7 @@ int
 i915_gem_object_get_pages(struct drm_gem_object *obj,
 			  gfp_t gfpmask)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page_count, i;
 	struct address_space *mapping;
 	struct inode *inode;
@@ -2298,7 +2298,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
 	struct drm_gem_object *obj = reg->obj;
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int regnum = obj_priv->fence_reg;
 	uint64_t val;
 
@@ -2320,7 +2320,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
 	struct drm_gem_object *obj = reg->obj;
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int regnum = obj_priv->fence_reg;
 	uint64_t val;
 
@@ -2340,7 +2340,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
 	struct drm_gem_object *obj = reg->obj;
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int regnum = obj_priv->fence_reg;
 	int tile_width;
 	uint32_t fence_reg, val;
@@ -2382,7 +2382,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
 	struct drm_gem_object *obj = reg->obj;
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int regnum = obj_priv->fence_reg;
 	uint32_t val;
 	uint32_t pitch_val;
@@ -2426,7 +2426,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
 		if (!reg->obj)
 			return i;
 
-		obj_priv = reg->obj->driver_private;
+		obj_priv = to_intel_bo(reg->obj);
 		if (!obj_priv->pin_count)
 		    avail++;
 	}
@@ -2481,7 +2481,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_i915_fence_reg *reg = NULL;
 	int ret;
 
@@ -2548,7 +2548,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if (IS_GEN6(dev)) {
 		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2584,7 +2584,7 @@ int
 i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
 		return 0;
@@ -2622,7 +2622,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_mm_node *free_space;
 	gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
 	int ret;
@@ -2729,7 +2729,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
 void
 i915_gem_clflush_object(struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
 
 	/* If we don't have a page list set up, then we're not pinned
 	 * to GPU, and we can ignore the cache flush because it'll happen
@@ -2830,7 +2830,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
@@ -2880,7 +2880,7 @@ int
 i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
@@ -3093,7 +3093,7 @@ static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
 	struct drm_device		*dev = obj->dev;
-	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
 	uint32_t			invalidate_domains = 0;
 	uint32_t			flush_domains = 0;
 	uint32_t			old_read_domains;
@@ -3178,7 +3178,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 static void
 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if (!obj_priv->page_cpu_valid)
 		return;
@@ -3218,7 +3218,7 @@ static int
 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
 					  uint64_t offset, uint64_t size)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_read_domains;
 	int i, ret;
 
@@ -3287,7 +3287,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int i, ret;
 	void __iomem *reloc_page;
 	bool need_fence;
@@ -3338,7 +3338,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 			i915_gem_object_unpin(obj);
 			return -EBADF;
 		}
-		target_obj_priv = target_obj->driver_private;
+		target_obj_priv = to_intel_bo(target_obj);
 
 #if WATCH_RELOC
 		DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3690,7 +3690,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
 		prepare_to_wait(&dev_priv->pending_flip_queue,
 				&wait, TASK_INTERRUPTIBLE);
 		for (i = 0; i < count; i++) {
-			obj_priv = object_list[i]->driver_private;
+			obj_priv = to_intel_bo(object_list[i]);
 			if (atomic_read(&obj_priv->pending_flip) > 0)
 				break;
 		}
@@ -3799,7 +3799,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			goto err;
 		}
 
-		obj_priv = object_list[i]->driver_private;
+		obj_priv = to_intel_bo(object_list[i]);
 		if (obj_priv->in_execbuffer) {
 			DRM_ERROR("Object %p appears more than once in object list\n",
 				   object_list[i]);
@@ -3925,7 +3925,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
-		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 		uint32_t old_write_domain = obj->write_domain;
 
 		obj->write_domain = obj->pending_write_domain;
@@ -4000,7 +4000,7 @@ err:
 
 	for (i = 0; i < args->buffer_count; i++) {
 		if (object_list[i]) {
-			obj_priv = object_list[i]->driver_private;
+			obj_priv = to_intel_bo(object_list[i]);
 			obj_priv->in_execbuffer = false;
 		}
 		drm_gem_object_unreference(object_list[i]);
@@ -4178,7 +4178,7 @@ int
 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4211,7 +4211,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 	obj_priv->pin_count--;
@@ -4251,7 +4251,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 		mutex_unlock(&dev->struct_mutex);
 		return -EBADF;
 	}
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4308,7 +4308,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 		return -EBADF;
 	}
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	if (obj_priv->pin_filp != file_priv) {
 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
 			  args->handle);
@@ -4350,7 +4350,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 */
 	i915_gem_retire_requests(dev);
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	/* Don't count being on the flushing list against the object being
 	 * done.  Otherwise, a buffer left on the flushing list but not getting
 	 * flushed (because nobody's flushing that domain) won't ever return
@@ -4396,7 +4396,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->pin_count) {
 		drm_gem_object_unreference(obj);
@@ -4457,7 +4457,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
 void i915_gem_free_object(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	trace_i915_gem_object_destroy(obj);
 
@@ -4566,7 +4566,7 @@ i915_gem_init_hws(struct drm_device *dev)
 		DRM_ERROR("Failed to allocate status page\n");
 		return -ENOMEM;
 	}
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
 
 	ret = i915_gem_object_pin(obj, 4096);
@@ -4610,7 +4610,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
 		return;
 
 	obj = dev_priv->hws_obj;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	kunmap(obj_priv->pages[0]);
 	i915_gem_object_unpin(obj);
@@ -4644,7 +4644,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 		i915_gem_cleanup_hws(dev);
 		return -ENOMEM;
 	}
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	ret = i915_gem_object_pin(obj, 4096);
 	if (ret != 0) {
@@ -4932,7 +4932,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 	int ret;
 	int page_count;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	if (!obj_priv->phys_obj)
 		return;
 
@@ -4971,7 +4971,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
 	if (id > I915_MAX_PHYS_OBJECT)
 		return -EINVAL;
 
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->phys_obj) {
 		if (obj_priv->phys_obj->id == id)
@@ -5022,7 +5022,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
 		     struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	void *obj_addr;
 	int ret;
 	char __user *user_data;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614..35507cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
 i915_gem_dump_object(struct drm_gem_object *obj, int len,
 		     const char *where, uint32_t mark)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page;
 
 	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page;
 	uint32_t *gtt_mapping;
 	uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b5c55d8..a10380f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -240,7 +240,7 @@ bool
 i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
 {
 	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	if (obj_priv->gtt_space == NULL)
 		return true;
@@ -280,7 +280,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 		return -EINVAL;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
 		drm_gem_object_unreference_unlocked(obj);
@@ -361,7 +361,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 		return -EINVAL;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	mutex_lock(&dev->struct_mutex);
 
@@ -424,7 +424,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page_count = obj->size >> PAGE_SHIFT;
 	int i;
 
@@ -453,7 +453,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page_count = obj->size >> PAGE_SHIFT;
 	int i;
 
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5388354..bfbdad9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -443,7 +443,7 @@ i915_error_object_create(struct drm_device *dev,
 	if (src == NULL)
 		return NULL;
 
-	src_priv = src->driver_private;
+	src_priv = to_intel_bo(src);
 	if (src_priv->pages == NULL)
 		return NULL;
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cd6de5..678f17f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1002,7 +1002,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane, i;
 	u32 fbc_ctl, fbc_ctl2;
@@ -1079,7 +1079,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
 		     DPFC_CTL_PLANEB);
@@ -1175,7 +1175,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
 		return;
 
 	intel_fb = to_intel_framebuffer(fb);
-	obj_priv = intel_fb->obj->driver_private;
+	obj_priv = to_intel_bo(intel_fb->obj);
 
 	/*
 	 * If FBC is already on, we just have to verify that we can
@@ -1242,7 +1242,7 @@ out_disable:
 static int
 intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	u32 alignment;
 	int ret;
 
@@ -1322,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
 	intel_fb = to_intel_framebuffer(crtc->fb);
 	obj = intel_fb->obj;
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 
 	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1400,7 +1400,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
 	if (old_fb) {
 		intel_fb = to_intel_framebuffer(old_fb);
-		obj_priv = intel_fb->obj->driver_private;
+		obj_priv = to_intel_bo(intel_fb->obj);
 		i915_gem_object_unpin(intel_fb->obj);
 	}
 	intel_increase_pllclock(crtc, true);
@@ -3510,7 +3510,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 	if (!bo)
 		return -ENOENT;
 
-	obj_priv = bo->driver_private;
+	obj_priv = to_intel_bo(bo);
 
 	if (bo->size < width * height * 4) {
 		DRM_ERROR("buffer is to small\n");
@@ -4155,7 +4155,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
 	work = intel_crtc->unpin_work;
 	if (work == NULL || !work->pending) {
 		if (work && !work->pending) {
-			obj_priv = work->pending_flip_obj->driver_private;
+			obj_priv = to_intel_bo(work->pending_flip_obj);
 			DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
 					 obj_priv,
 					 atomic_read(&obj_priv->pending_flip));
@@ -4180,7 +4180,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
-	obj_priv = work->pending_flip_obj->driver_private;
+	obj_priv = to_intel_bo(work->pending_flip_obj);
 
 	/* Initial scanout buffer will have a 0 pending flip count */
 	if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4251,7 +4251,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	ret = intel_pin_and_fence_fb_obj(dev, obj);
 	if (ret != 0) {
 		DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
-			  obj->driver_private);
+			  to_intel_bo(obj));
 		kfree(work);
 		intel_crtc->unpin_work = NULL;
 		mutex_unlock(&dev->struct_mutex);
@@ -4265,7 +4265,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	crtc->fb = fb;
 	i915_gem_object_flush_write_domain(obj);
 	drm_vblank_get(dev, intel_crtc->pipe);
-	obj_priv = obj->driver_private;
+	obj_priv = to_intel_bo(obj);
 	atomic_inc(&obj_priv->pending_flip);
 	work->pending_flip_obj = obj;
 
@@ -4764,14 +4764,14 @@ void intel_init_clock_gating(struct drm_device *dev)
 		struct drm_i915_gem_object *obj_priv = NULL;
 
 		if (dev_priv->pwrctx) {
-			obj_priv = dev_priv->pwrctx->driver_private;
+			obj_priv = to_intel_bo(dev_priv->pwrctx);
 		} else {
 			struct drm_gem_object *pwrctx;
 
 			pwrctx = intel_alloc_power_context(dev);
 			if (pwrctx) {
 				dev_priv->pwrctx = pwrctx;
-				obj_priv = pwrctx->driver_private;
+				obj_priv = to_intel_bo(pwrctx);
 			}
 		}
 
@@ -4942,7 +4942,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
 	if (dev_priv->pwrctx) {
 		struct drm_i915_gem_object *obj_priv;
 
-		obj_priv = dev_priv->pwrctx->driver_private;
+		obj_priv = to_intel_bo(dev_priv->pwrctx);
 		I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
 		I915_READ(PWRCTXA);
 		i915_gem_object_unpin(dev_priv->pwrctx);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcb..785a5b4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -144,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
 		ret = -ENOMEM;
 		goto out;
 	}
-	obj_priv = fbo->driver_private;
+	obj_priv = to_intel_bo(fbo);
 
 	mutex_lock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d355d1d..31bb96e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	int ret, tmp_width;
 	struct overlay_registers *regs;
 	bool scale_changed = false;
-	struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+	struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
 	struct drm_device *dev = overlay->dev;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	intel_overlay_continue(overlay, scale_changed);
 
 	overlay->old_vid_bo = overlay->vid_bo;
-	overlay->vid_bo = new_bo->driver_private;
+	overlay->vid_bo = to_intel_bo(new_bo);
 
 	return 0;
 
@@ -1339,7 +1339,7 @@ void intel_setup_overlay(struct drm_device *dev)
 	reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
 	if (!reg_bo)
 		goto out_free;
-	overlay->reg_bo = reg_bo->driver_private;
+	overlay->reg_bo = to_intel_bo(reg_bo);
 
 	if (OVERLAY_NONPHYSICAL(dev)) {
 		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
-- 
1.6.6.1




More information about the Intel-gfx mailing list