[PATCH 21/21] smart-acquire-lock
Chris Wilson
chris at chris-wilson.co.uk
Sat Jul 4 23:26:35 UTC 2020
---
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 76 +---------
.../gpu/drm/i915/gem/i915_gem_object_types.h | 9 --
drivers/gpu/drm/i915/i915_globals.c | 1 +
drivers/gpu/drm/i915/i915_globals.h | 1 +
drivers/gpu/drm/i915/mm/i915_acquire_ctx.c | 133 ++++++++++++++----
drivers/gpu/drm/i915/mm/i915_acquire_ctx.h | 6 +-
6 files changed, 114 insertions(+), 112 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index dc9af5034669..5da06608b021 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -639,18 +639,6 @@ static int eb_lock_mm(struct i915_execbuffer *eb)
list_for_each_entry(ev, &eb->bind_list, bind_link) {
err = i915_acquire_ctx_lock(&eb->acquire, ev->vma->obj);
- if (err == -EDEADLK) {
- struct eb_vma *unlock = ev, *en;
-
- list_for_each_entry_safe_continue_reverse(unlock, en,
- &eb->bind_list,
- bind_link)
- list_move_tail(&unlock->bind_link,
- &eb->bind_list);
-
- err = i915_acquire_ctx_backoff(&eb->acquire,
- ev->vma->obj);
- }
if (err)
return err;
}
@@ -660,20 +648,7 @@ static int eb_lock_mm(struct i915_execbuffer *eb)
static int eb_acquire_mm(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_object *obj;
- int err;
-
- /* i915_acquire_all(&eb->acquire); */
-
- list_for_each_entry(obj, &eb->acquire.acquire_list, mm.acquire_link) {
- err = __i915_gem_object_get_pages_locked(obj);
- if (err)
- return err;
-
- GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- }
-
- return 0;
+ return i915_acquire_mm(&eb->acquire);
}
struct eb_vm_work {
@@ -1353,63 +1328,18 @@ static int eb_prepare_vma(struct eb_vm_work *work,
static int eb_lock_pt(struct i915_execbuffer *eb,
struct i915_vm_pt_stash *stash)
{
- struct chain {
- struct i915_page_table **head;
- struct chain *next;
- } chains[ARRAY_SIZE(stash->pt)], *chain, *cur, *pc;
int n, err;
- chain = NULL;
for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
- chains[n].head = &stash->pt[n];
- chains[n].next = chain;
- chain = &chains[n];
- }
-
- for (pc = NULL, cur = chain; cur; pc = cur, cur = cur->next) {
- struct i915_page_table *prev, *pt;
+ struct i915_page_table *pt;
- prev = NULL;
- for (pt = *cur->head; pt; pt = pt->stash) {
+ for (pt = stash->pt[n]; pt; pt = pt->stash) {
err = i915_acquire_ctx_lock(&eb->acquire, pt->base);
GEM_BUG_ON(err == -EALREADY);
-
- /* Someone wants one of our older obj locks */
- while (err == -EDEADLK) {
- err = i915_acquire_ctx_backoff(&eb->acquire,
- pt->base);
- if (err)
- return err;
-
- err = eb_lock_mm(eb);
- if (err)
- return err;
-
- /* Move pt to the head, restart the chain */
- if (prev) {
- prev->stash = pt->stash;
- pt->stash = *cur->head;
- *cur->head = pt;
- prev = NULL;
- }
-
- /* And repeat the other chains of objects */
- if (pc) {
- pc->next = cur->next;
- cur->next = chain;
- chain = cur;
- pc = NULL;
- }
-
- err = i915_acquire_ctx_lock(&eb->acquire,
- pt->base);
- }
-
if (err)
return err;
assert_object_held(pt->base);
- prev = pt;
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index c4622b785ae5..ae3303ba272c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -205,15 +205,6 @@ struct drm_i915_gem_object {
*/
struct list_head region_link;
- /**
- * @acquire_link: Link into @i915_acquire_ctx.list
- *
- * When we lock this object through i915_gem_object_lock() with a
- * context, we add it to the list to ensure we can unlock everything
- * when i915_gem_ww_ctx_backoff() or i915_gem_ww_ctx_fini() are called.
- */
- struct list_head acquire_link;
-
struct sg_table *pages;
void *mapping;
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 3aa213684293..51ec42a14694 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -87,6 +87,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_acquire_init,
i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index b2f5cd9b9b1a..11227abf2769 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -27,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_acquire_init(void);
int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
diff --git a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
index 180a04874125..06abccab7e03 100644
--- a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
+++ b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
@@ -5,62 +5,116 @@
#include <linux/dma-resv.h>
+#include "i915_globals.h"
#include "gem/i915_gem_object.h"
#include "i915_acquire_ctx.h"
+static struct i915_global_acquire {
+ struct i915_global base;
+ struct kmem_cache *slab_acquires;
+} global;
+
+struct i915_acquire {
+ struct drm_i915_gem_object *obj;
+ struct i915_acquire *next;
+};
+
+static struct i915_acquire *i915_acquire_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_acquires, GFP_KERNEL);
+}
+
+static void i915_acquire_free(struct i915_acquire *lnk)
+{
+ kmem_cache_free(global.slab_acquires, lnk);
+}
+
void i915_acquire_ctx_init(struct i915_acquire_ctx *ctx)
{
ww_acquire_init(&ctx->ctx, &reservation_ww_class);
- INIT_LIST_HEAD(&ctx->acquire_list);
+ ctx->locked = NULL;
}
int i915_acquire_ctx_lock(struct i915_acquire_ctx *ctx,
struct drm_i915_gem_object *obj)
{
+ struct i915_acquire *lock, *lnk;
int err;
- err = dma_resv_lock_interruptible(obj->base.resv, &ctx->ctx);
- if (!err) {
- list_add_tail(&obj->mm.acquire_link, &ctx->acquire_list);
- i915_gem_object_get(obj);
+ lock = i915_acquire_alloc();
+ if (!lock)
+ return -ENOMEM;
+
+ lock->obj = i915_gem_object_get(obj);
+ lock->next = NULL;
+
+ while ((lnk = lock)) {
+ obj = lnk->obj;
+ lock = lnk->next;
+
+ err = dma_resv_lock_interruptible(obj->base.resv, &ctx->ctx);
+ if (err == -EDEADLK) {
+ struct i915_acquire *old;
+
+ while ((old = ctx->locked)) {
+ i915_gem_object_unlock(old->obj);
+ ctx->locked = old->next;
+ old->next = lock;
+ lock = old;
+ }
+
+ err = dma_resv_lock_slow_interruptible(obj->base.resv,
+ &ctx->ctx);
+ }
+ if (!err) {
+ lnk->next = ctx->locked;
+ ctx->locked = lnk;
+ } else {
+ i915_gem_object_put(obj);
+ i915_acquire_free(lnk);
+ }
+ if (err == -EALREADY)
+ err = 0;
+ if (err)
+ break;
+ }
+
+ while ((lnk = lock)) {
+ lock = lnk->next;
+ i915_gem_object_put(lnk->obj);
+ i915_acquire_free(lnk);
}
- if (err == -EALREADY)
- err = 0;
return err;
}
-static void i915_acquire_ctx_unlock_all(struct i915_acquire_ctx *ctx)
+int i915_acquire_mm(struct i915_acquire_ctx *ctx)
{
- struct drm_i915_gem_object *obj, *on;
+ struct i915_acquire *lnk;
+ int err;
- list_for_each_entry_safe(obj, on, &ctx->acquire_list, mm.acquire_link) {
- i915_gem_object_unlock(obj);
- i915_gem_object_put(obj);
+ for (lnk = ctx->locked; lnk; lnk = lnk->next) {
+ err = __i915_gem_object_get_pages_locked(lnk->obj);
+ if (err)
+ return err;
}
- INIT_LIST_HEAD(&ctx->acquire_list);
+
+ return 0;
}
-int i915_acquire_ctx_backoff(struct i915_acquire_ctx *ctx,
- struct drm_i915_gem_object *obj)
+void i915_acquire_ctx_fini(struct i915_acquire_ctx *ctx)
{
- int err;
+ struct i915_acquire *lnk;
- i915_acquire_ctx_unlock_all(ctx);
+ while ((lnk = ctx->locked)) {
+ i915_gem_object_unlock(lnk->obj);
+ i915_gem_object_put(lnk->obj);
- err = dma_resv_lock_slow_interruptible(obj->base.resv, &ctx->ctx);
- if (!err) {
- list_add_tail(&obj->mm.acquire_link, &ctx->acquire_list);
- i915_gem_object_get(obj);
+ ctx->locked = lnk->next;
+ i915_acquire_free(lnk);
}
- return err;
-}
-
-void i915_acquire_ctx_fini(struct i915_acquire_ctx *ctx)
-{
- i915_acquire_ctx_unlock_all(ctx);
ww_acquire_fini(&ctx->ctx);
}
@@ -106,3 +160,28 @@ static int igt_acquire_ctx(void *arg)
}
#endif
+
+static void i915_global_acquire_shrink(void)
+{
+ kmem_cache_shrink(global.slab_acquires);
+}
+
+static void i915_global_acquire_exit(void)
+{
+ kmem_cache_destroy(global.slab_acquires);
+}
+
+static struct i915_global_acquire global = { {
+ .shrink = i915_global_acquire_shrink,
+ .exit = i915_global_acquire_exit,
+} };
+
+int __init i915_global_acquire_init(void)
+{
+ global.slab_acquires = KMEM_CACHE(i915_acquire, 0);
+ if (!global.slab_acquires)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
index 8f3d7d4ab9c2..96a951d532ae 100644
--- a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
+++ b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
@@ -10,10 +10,11 @@
#include <linux/ww_mutex.h>
struct drm_i915_gem_object;
+struct i915_acquire;
struct i915_acquire_ctx {
struct ww_acquire_ctx ctx;
- struct list_head acquire_list;
+ struct i915_acquire *locked;
};
void i915_acquire_ctx_init(struct i915_acquire_ctx *acquire);
@@ -28,7 +29,6 @@ void i915_acquire_ctx_fini(struct i915_acquire_ctx *acquire);
int __must_check i915_acquire_ctx_lock(struct i915_acquire_ctx *acquire,
struct drm_i915_gem_object *obj);
-int __must_check i915_acquire_ctx_backoff(struct i915_acquire_ctx *ctx,
- struct drm_i915_gem_object *obj);
+int i915_acquire_mm(struct i915_acquire_ctx *ctx);
#endif /* __I915_ACQUIRE_CTX_H__ */
--
2.20.1
More information about the Intel-gfx-trybot
mailing list