[Intel-gfx] [PATCH 14/59] drm/i915: Update move_to_gpu() to take a request structure
John.C.Harrison at Intel.com
John.C.Harrison at Intel.com
Thu Mar 19 05:30:19 PDT 2015
From: John Harrison <John.C.Harrison at Intel.com>
The plan is to pass requests around as the basic submission tracking structure
rather than rings and contexts. This patch updates the move_to_gpu() code paths.
For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison at Intel.com>
Reviewed-by: Tomas Elf <tomas.elf at intel.com>
---
drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 +++++-----
drivers/gpu/drm/i915/intel_lrc.c | 10 ++++------
2 files changed, 9 insertions(+), 11 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0b705cb..7c7ec0c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -892,7 +892,7 @@ err:
}
static int
-i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
+i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
struct i915_vma *vma;
@@ -902,7 +902,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
@@ -913,7 +913,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
}
if (flush_chipset)
- i915_gem_chipset_flush(ring->dev);
+ i915_gem_chipset_flush(req->ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
wmb();
@@ -921,7 +921,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return intel_ring_invalidate_all_caches(ring);
+ return intel_ring_invalidate_all_caches(req->ring);
}
static bool
@@ -1238,7 +1238,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4c72c11b..4ed3621 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -579,11 +579,9 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
return 0;
}
-static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
- struct intel_context *ctx,
+static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- struct intel_engine_cs *ring = ringbuf->ring;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -592,7 +590,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- ret = i915_gem_object_sync(obj, ring);
+ ret = i915_gem_object_sync(obj, req->ring);
if (ret)
return ret;
@@ -608,7 +606,7 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- return logical_ring_invalidate_all_caches(ringbuf, ctx);
+ return logical_ring_invalidate_all_caches(req->ringbuf, req->ctx);
}
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -913,7 +911,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, params->ctx, vmas);
+ ret = execlists_move_to_gpu(params->request, vmas);
if (ret)
return ret;
--
1.7.9.5
More information about the Intel-gfx
mailing list