[Intel-gfx] [PATCH 19/19] drm/i915: Replace struct_mutex for batch pool serialisation
Matthew Auld
matthew.william.auld at gmail.com
Mon Jun 24 19:36:49 UTC 2019
On Mon, 24 Jun 2019 at 06:45, Chris Wilson <chris at chris-wilson.co.uk> wrote:
>
> Switch to tracking activity via i915_active on individual nodes, only
> keeping a list of retired objects in the cache, and reaping the cache
> when the engine itself idles.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/Makefile | 2 +-
> .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 58 ++++---
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 1 -
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 1 -
> drivers/gpu/drm/i915/gem/i915_gem_pm.c | 4 +-
> drivers/gpu/drm/i915/gt/intel_engine.h | 1 -
> drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 +-
> drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 +
> drivers/gpu/drm/i915/gt/intel_engine_pool.c | 164 ++++++++++++++++++
> drivers/gpu/drm/i915/gt/intel_engine_pool.h | 34 ++++
> .../gpu/drm/i915/gt/intel_engine_pool_types.h | 29 ++++
> drivers/gpu/drm/i915/gt/intel_engine_types.h | 6 +-
> drivers/gpu/drm/i915/gt/mock_engine.c | 3 +
> drivers/gpu/drm/i915/i915_debugfs.c | 68 --------
> drivers/gpu/drm/i915/i915_gem_batch_pool.h | 26 ---
> 15 files changed, 277 insertions(+), 133 deletions(-)
> create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.c
> create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.h
> create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
> delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.h
i915_gem_batch_pool.c is still skulking around somewhere :)
[snip]
> +static struct list_head *
> +bucket_for_size(struct intel_engine_pool *pool, size_t sz)
> +{
> + int n;
> +
> + /*
> + * Compute a power-of-two bucket, but throw everything greater than
> + * 16KiB into the same bucket: i.e. the the buckets hold objects of
the buckets
> + * (1 page, 2 pages, 4 pages, 8+ pages).
> + */
> + n = fls(sz >> PAGE_SHIFT) - 1;
> + if (n >= ARRAY_SIZE(pool->cache_list))
> + n = ARRAY_SIZE(pool->cache_list) - 1;
> +
> + return &pool->cache_list[n];
> +}
> +
> +static void node_free(struct intel_engine_pool_node *node)
> +{
> + i915_gem_object_put(node->obj);
> + kfree(node);
> +}
> +
> +static int pool_active(struct i915_active *ref)
> +{
> + struct intel_engine_pool_node *node =
> + container_of(ref, typeof(*node), active);
> + struct reservation_object *resv = node->obj->base.resv;
> +
> + if (reservation_object_trylock(resv)) {
> + reservation_object_add_excl_fence(resv, NULL);
> + reservation_object_unlock(resv);
> + }
> +
> + return i915_gem_object_pin_pages(node->obj);
> +}
> +
> +static void pool_retire(struct i915_active *ref)
> +{
> + struct intel_engine_pool_node *node =
> + container_of(ref, typeof(*node), active);
> + struct intel_engine_pool *pool = node->pool;
> + struct list_head *list = bucket_for_size(pool, node->obj->base.size);
> + unsigned long flags;
> +
> + GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
> +
> + i915_gem_object_unpin_pages(node->obj);
> +
> + spin_lock_irqsave(&pool->lock, flags);
> + list_add(&node->link, list);
> + spin_unlock_irqrestore(&pool->lock, flags);
> +}
> +
> +static struct intel_engine_pool_node *
> +node_create(struct intel_engine_pool *pool, size_t sz)
> +{
> + struct intel_engine_cs *engine = to_engine(pool);
> + struct intel_engine_pool_node *node;
> + struct drm_i915_gem_object *obj;
> +
> + node = kmalloc(sizeof(*node),
> + GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
> + if (!node)
> + return ERR_PTR(-ENOMEM);
> +
> + node->pool = pool;
> + i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
> +
> + obj = i915_gem_object_create_internal(engine->i915, sz);
> + if (IS_ERR(obj)) {
> + kfree(node);
> + return ERR_CAST(obj);
i915_active_fini() somewhere, ditto for node_free?
Anyway, looks good.
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
More information about the Intel-gfx
mailing list