[PATCH 1/2] drm/i915: introduce & use i915_gem_object_mark_dirty()

Dave Gordon david.s.gordon at intel.com
Mon May 16 15:07:59 UTC 2016


This just hides the existing obj->dirty flag inside a trivial inline
setter, to discourage non-GEM code from looking too closely.

Existing code that sets obj->dirty is then changed to use the function
instead.

Inspired-by: http://www.spinics.net/lists/intel-gfx/msg92390.html
Cc: Chris Wilson <chris at chris-wilson.co.uk>
Signed-off-by: Dave Gordon <david.s.gordon at intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            | 11 +++++++++++
 drivers/gpu/drm/i915/i915_gem.c            |  6 +++---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 16 ++++++++--------
 5 files changed, 24 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 72f0b02..74bacc7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3063,6 +3063,17 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 	obj->pages_pin_count++;
 }
 
+/*
+ * Flag the object content as having changed since the last call to
+ * i915_gem_object_pin_pages() above, so that the new content is not
+ * lost after the next call to i915_gem_object_unpin_pages() below
+ */
+static inline void i915_gem_object_mark_dirty(struct drm_i915_gem_object *obj)
+{
+	BUG_ON(obj->pages_pin_count == 0);
+	obj->dirty = true;
+}
+
 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	BUG_ON(obj->pages_pin_count == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aff386e..dc3dcd7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -931,9 +931,9 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
 
 	i915_gem_object_pin_pages(obj);
+	i915_gem_object_mark_dirty(obj);
 
 	offset = args->offset;
-	obj->dirty = 1;
 
 	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
 			 offset >> PAGE_SHIFT) {
@@ -3779,7 +3779,7 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 	if (write) {
 		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
 		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
-		obj->dirty = 1;
+		i915_gem_object_mark_dirty(obj);
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -5301,7 +5301,7 @@ struct drm_i915_gem_object *
 	i915_gem_object_pin_pages(obj);
 	sg = obj->pages;
 	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
-	obj->dirty = 1;		/* Backing store is now out of date */
+	i915_gem_object_mark_dirty(obj); /* Backing store is out of date */
 	i915_gem_object_unpin_pages(obj);
 
 	if (WARN_ON(bytes != size)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2aedd18..655f709 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -782,7 +782,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from->legacy_hw_ctx.rcs_state->dirty = 1;
+		i915_gem_object_mark_dirty(from->legacy_hw_ctx.rcs_state);
 
 		/* obj is kept alive until the next request by its active ref */
 		i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a54a243..6f3c0d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1101,7 +1101,7 @@ static bool only_mappable_for_reloc(unsigned int flags)
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 
-		obj->dirty = 1; /* be paranoid  */
+		i915_gem_object_mark_dirty(obj); /* be paranoid  */
 		obj->base.write_domain = obj->base.pending_write_domain;
 		if (obj->base.write_domain == 0)
 			obj->base.pending_read_domains |= obj->base.read_domains;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index db10c96..db4057e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -961,19 +961,19 @@ static int intel_lr_context_pin(struct intel_context *ctx,
 		goto unpin_ctx_obj;
 	}
 
-	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-
 	ringbuf = ctx->engine[engine->id].ringbuf;
 	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
 	if (ret)
 		goto unpin_map;
 
 	i915_gem_context_reference(ctx);
-	ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
-	intel_lr_context_descriptor_update(ctx, engine);
+	i915_gem_object_mark_dirty(ctx_obj);
+
+	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
 	ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
-	ctx_obj->dirty = true;
+	ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+	intel_lr_context_descriptor_update(ctx, engine);
 
 	/* Invalidate GuC TLB. */
 	if (i915.enable_guc_submission)
@@ -2301,7 +2301,7 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
 		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
 		return ret;
 	}
-	ctx_obj->dirty = true;
+	i915_gem_object_mark_dirty(ctx_obj);
 
 	/* The second page of the context object contains some fields which must
 	 * be set up prior to the first execution. */
@@ -2560,9 +2560,9 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
 		if (WARN_ON(IS_ERR(vaddr)))
 			continue;
 
-		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
-		ctx_obj->dirty = true;
+		i915_gem_object_mark_dirty(ctx_obj);
 
+		reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 		reg_state[CTX_RING_HEAD+1] = 0;
 		reg_state[CTX_RING_TAIL+1] = 0;
 
-- 
1.9.1



More information about the Intel-gfx-trybot mailing list