[Intel-gfx] [PATCH 164/190] drm/i915: Move obj->dirty:1 to obj->flags

Chris Wilson chris at chris-wilson.co.uk
Mon Jan 11 03:01:05 PST 2016


The obj->dirty bit is a companion to the obj->active bits that were
moved to the obj->flags bitmask. Since we also update this bit inside
the i915_vma_move_to_active() hotpath, we can aide gcc by also moving
the obj->dirty bit to obj->flags bitmask.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 21 ++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 18 +++++++++---------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  3 +--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  6 +++---
 drivers/gpu/drm/i915/i915_gpu_error.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  2 +-
 7 files changed, 36 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 558d79b63e6c..8a59630fe5fb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -136,7 +136,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	seq_printf(m, "] %x %s%s%s",
 		   i915_gem_request_get_seqno(obj->last_write.request),
 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
-		   obj->dirty ? " dirty" : "",
+		   i915_gem_object_is_dirty(obj) ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 62a024a7225b..d664a67cda7b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2058,7 +2058,8 @@ struct drm_i915_gem_object {
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
-	unsigned int dirty:1;
+#define I915_BO_DIRTY_SHIFT (I915_BO_ACTIVE_REF_SHIFT + 1)
+#define I915_BO_DIRTY_BIT (1 << I915_BO_DIRTY_SHIFT)
 
 	/**
 	 * Advice: are the backing pages purgeable?
@@ -2189,6 +2190,24 @@ i915_gem_object_unset_active_reference(struct drm_i915_gem_object *obj)
 }
 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
 
+static inline bool
+i915_gem_object_is_dirty(const struct drm_i915_gem_object *obj)
+{
+	return obj->flags & I915_BO_DIRTY_BIT;
+}
+
+static inline void
+i915_gem_object_set_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->flags |= I915_BO_DIRTY_BIT;
+}
+
+static inline void
+i915_gem_object_unset_dirty(struct drm_i915_gem_object *obj)
+{
+	obj->flags &= ~I915_BO_DIRTY_BIT;
+}
+
 void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 497b68849d09..5347469bbea1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -209,9 +209,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 	}
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
+		i915_gem_object_unset_dirty(obj);
 
-	if (obj->dirty) {
+	if (i915_gem_object_is_dirty(obj)) {
 		struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
 		int i;
@@ -235,7 +235,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 			page_cache_release(page);
 			vaddr += PAGE_SIZE;
 		}
-		obj->dirty = 0;
+		i915_gem_object_unset_dirty(obj);
 	}
 
 	sg_free_table(obj->pages);
@@ -589,7 +589,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
 
 out:
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-	obj->dirty = 1;
+	i915_gem_object_set_dirty(obj);
 	/* return with the pages pinned */
 	return 0;
 
@@ -1836,12 +1836,12 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
-		obj->dirty = 0;
+		i915_gem_object_unset_dirty(obj);
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
 		struct page *page = sg_page_iter_page(&sg_iter);
 
-		if (obj->dirty)
+		if (i915_gem_object_is_dirty(obj))
 			set_page_dirty(page);
 
 		if (obj->madv == I915_MADV_WILLNEED)
@@ -1849,7 +1849,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 
 		page_cache_release(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_unset_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
@@ -3029,7 +3029,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (write) {
 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-		obj->dirty = 1;
+		i915_gem_object_set_dirty(obj);
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -4389,7 +4389,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
 	i915_gem_object_pin_pages(obj);
 	sg = obj->pages;
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->dirty = 1;		/* Backing store is now out of date */
+	i915_gem_object_set_dirty(obj); /* Backing store is now out of date */
 	i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 7af562996767..185fbf45a5d2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1197,14 +1197,13 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
 
-	obj->dirty = 1; /* be paranoid  */
-
 	/* The order in which we add operations to the retirement queue is
 	 * vital here: mark_active adds to the start of the callback list,
 	 * such that subsequent callbacks are called first. Therefore we
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
+	i915_gem_object_set_dirty(obj); /* be paranoid */
 	i915_gem_object_set_active(obj, engine);
 	i915_gem_request_mark_active(req, &obj->last_read[engine]);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 53f8094b3198..232ce85b39db 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -745,20 +745,20 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 	__i915_gem_userptr_set_active(obj, false);
 
 	if (obj->madv != I915_MADV_WILLNEED)
-		obj->dirty = 0;
+		i915_gem_object_unset_dirty(obj);
 
 	i915_gem_gtt_finish_object(obj);
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
 		struct page *page = sg_page_iter_page(&sg_iter);
 
-		if (obj->dirty)
+		if (i915_gem_object_is_dirty(obj))
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
 		page_cache_release(page);
 	}
-	obj->dirty = 0;
+	i915_gem_object_unset_dirty(obj);
 
 	sg_free_table(obj->pages);
 	kfree(obj->pages);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index e9ef6b25c696..6fbb11a53b60 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -715,7 +715,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->write_domain = obj->base.write_domain;
 	err->fence_reg = vma->fence ? vma->fence->id : -1;
 	err->tiling = obj->tiling_mode;
-	err->dirty = obj->dirty;
+	err->dirty = i915_gem_object_is_dirty(obj);
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->ring = obj->last_write.request ? obj->last_write.request->engine->id : -1;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 62f19ed51fb2..3e61fce1326e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -597,7 +597,7 @@ static int intel_lr_context_pin(struct intel_context *ctx,
 
 	i915_gem_context_reference(ctx);
 	ce->vma = vma;
-	vma->obj->dirty = true;
+	i915_gem_object_set_dirty(vma->obj);
 
 	ggtt_offset = vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
 	ring->context_descriptor =
-- 
2.7.0.rc3



More information about the Intel-gfx mailing list