[Intel-gfx] [PATCH 55/62] drm/i915: i915_vma_move_to_active prep patch
Chris Wilson
chris at chris-wilson.co.uk
Fri Jun 3 16:37:20 UTC 2016
This patch is broken out of the next just to remove the code motion from
that patch and make it more readable. What we do here is move the
i915_vma_move_to_active() to i915_gem_execbuffer.c and put the three
stages (read, write, fenced) together so that future modifications to
active handling are all located in the same spot. The importance of this
is so that we can more simply control the order in which the requests
are place in the retirement list (i.e. control the order at which we
retire and so control the lifetimes to avoid having to hold onto
references).
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_drv.h | 3 +-
drivers/gpu/drm/i915/i915_gem.c | 16 -------
drivers/gpu/drm/i915/i915_gem_context.c | 9 ++--
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 64 ++++++++++++++++++----------
drivers/gpu/drm/i915/i915_gem_render_state.c | 2 +-
5 files changed, 50 insertions(+), 44 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 92cd3744783c..912d54b6998a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3045,7 +3045,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *to);
void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req);
+ struct drm_i915_gem_request *req,
+ unsigned flags);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b51d20a4f1ea..ca6b55f52f8b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2066,22 +2066,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
return obj->mapping;
}
-void i915_vma_move_to_active(struct i915_vma *vma,
- struct drm_i915_gem_request *req)
-{
- struct drm_i915_gem_object *obj = vma->obj;
- struct intel_engine_cs *engine = req->engine;
-
- /* Add a reference if we're newly entering the active list. */
- if (obj->active == 0)
- i915_gem_object_get(obj);
- obj->active |= intel_engine_flag(engine);
-
- i915_gem_active_set(&obj->last_read[engine->id], req);
-
- list_move_tail(&vma->vm_link, &vma->vm->active_list);
-}
-
static void
i915_gem_object_retire__fence(struct i915_gem_active *active,
struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a649f6eabf98..cace85998204 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -775,8 +775,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
- from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
+ struct drm_i915_gem_object *obj = from->engine[RCS].state;
+
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -784,10 +784,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
* able to defer doing this until we know the object would be
* swapped, but there is no way to do that yet.
*/
- from->engine[RCS].state->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
/* obj is kept alive until the next request by its active ref */
- i915_gem_object_ggtt_unpin(from->engine[RCS].state);
+ i915_gem_object_ggtt_unpin(obj);
i915_gem_context_put(from);
}
engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6fa13c618a6b..e099080b3b5b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1110,43 +1110,63 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
return ctx;
}
+void i915_vma_move_to_active(struct i915_vma *vma,
+ struct drm_i915_gem_request *req,
+ unsigned flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ const unsigned idx = req->engine->id;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+ obj->dirty = 1; /* be paranoid */
+
+ /* Add a reference if we're newly entering the active list. */
+ if (obj->active == 0)
+ i915_gem_object_get(obj);
+ obj->active |= 1 << idx;
+ i915_gem_active_set(&obj->last_read[idx], req);
+
+ if (flags & EXEC_OBJECT_WRITE) {
+ i915_gem_active_set(&obj->last_write, req);
+
+ intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+ /* update for the implicit flush after a batch */
+ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+ }
+
+ if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+ i915_gem_active_set(&obj->last_fence, req);
+ if (flags & __EXEC_OBJECT_HAS_FENCE) {
+ struct drm_i915_private *dev_priv = req->i915;
+ list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+ &dev_priv->mm.fence_list);
+ }
+ }
+
+ list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
static void
i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req)
{
- struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
struct i915_vma *vma;
list_for_each_entry(vma, vmas, exec_list) {
- struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
struct drm_i915_gem_object *obj = vma->obj;
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
- obj->dirty = 1; /* be paranoid */
obj->base.write_domain = obj->base.pending_write_domain;
- if (obj->base.write_domain == 0)
+ if (obj->base.write_domain)
+ vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+ else
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
- i915_vma_move_to_active(vma, req);
- if (obj->base.write_domain) {
- i915_gem_active_set(&obj->last_write, req);
-
- intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
- /* update for the implicit flush after a batch */
- obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
- }
- if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
- i915_gem_active_set(&obj->last_fence, req);
- if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
- struct drm_i915_private *dev_priv = engine->i915;
- list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
- &dev_priv->mm.fence_list);
- }
- }
-
+ i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 8587dbc302e0..c0abe9a2210f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -232,7 +232,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
goto out;
}
- i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+ i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
out:
render_state_fini(&so);
return ret;
--
2.8.1
More information about the Intel-gfx
mailing list