[Intel-gfx] [CI 03/26] drm/i915: Double check the active status on the batch pool
Chris Wilson
chris at chris-wilson.co.uk
Thu Aug 4 15:32:19 UTC 2016
We should not rely on obj->active being uptodate unless we manually
flush it. Instead, we can verify that the next available batch object is
idle by looking at its last active request (and checking it for
completion).
v2: remove the struct drm_device forward declaration added in the
process of removing its necessity
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_batch_pool.c | 15 ++++++++-------
drivers/gpu/drm/i915/i915_gem_batch_pool.h | 6 ++++--
drivers/gpu/drm/i915/intel_engine_cs.c | 2 +-
3 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 825981b5aa40..ed989596d9a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -41,15 +41,15 @@
/**
* i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
* @pool: the batch buffer pool
*/
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
struct i915_gem_batch_pool *pool)
{
int n;
- pool->dev = dev;
+ pool->engine = engine;
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,7 +65,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
{
int n;
- WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
struct drm_i915_gem_object *obj, *next;
@@ -101,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
struct list_head *list;
int n;
- WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+ lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
/* Compute a power-of-two bucket, but throw everything greater than
* 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -114,7 +114,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (tmp->active)
+ if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+ &tmp->base.dev->struct_mutex))
break;
/* While we're looping, do some clean up */
@@ -133,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (obj == NULL) {
int ret;
- obj = i915_gem_object_create(pool->dev, size);
+ obj = i915_gem_object_create(&pool->engine->i915->drm, size);
if (IS_ERR(obj))
return obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 848e90703eed..10d5ac4c00d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -27,13 +27,15 @@
#include "i915_drv.h"
+struct intel_engine_cs;
+
struct i915_gem_batch_pool {
- struct drm_device *dev;
+ struct intel_engine_cs *engine;
struct list_head cache_list[4];
};
/* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
struct i915_gem_batch_pool *pool);
void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
struct drm_i915_gem_object*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 202ad83b3dd1..f495969f749b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -185,7 +185,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
engine->fence_context = fence_context_alloc(1);
intel_engine_init_hangcheck(engine);
- i915_gem_batch_pool_init(&engine->i915->drm, &engine->batch_pool);
+ i915_gem_batch_pool_init(engine, &engine->batch_pool);
}
/**
--
2.8.1
More information about the Intel-gfx
mailing list