[Intel-gfx] [PATCH 17/20] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2.
Chris Wilson
chris at chris-wilson.co.uk
Mon Jul 6 06:19:23 UTC 2020
From: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
i915_gem_ww_ctx is used to lock all gem bo's for pinning and memory
eviction. We don't use it yet, but lets start adding the definition
first.
To use it, we have to pass a non-NULL ww to gem_object_lock, and don't
unlock directly. It is done in i915_gem_ww_ctx_fini.
Changes since v1:
- Change ww_ctx and obj order in locking functions (Jonas Lahtinen)
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at linux.intel.com>
---
drivers/gpu/drm/i915/Makefile | 4 +
drivers/gpu/drm/i915/i915_globals.c | 1 +
drivers/gpu/drm/i915/i915_globals.h | 1 +
drivers/gpu/drm/i915/mm/i915_acquire_ctx.c | 139 ++++++++++
drivers/gpu/drm/i915/mm/i915_acquire_ctx.h | 34 +++
drivers/gpu/drm/i915/mm/st_acquire_ctx.c | 242 ++++++++++++++++++
.../drm/i915/selftests/i915_mock_selftests.h | 1 +
7 files changed, 422 insertions(+)
create mode 100644 drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
create mode 100644 drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
create mode 100644 drivers/gpu/drm/i915/mm/st_acquire_ctx.c
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 41a27fd5dbc7..33c85b4ff3ed 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -124,6 +124,10 @@ gt-y += \
gt/gen9_renderstate.o
i915-y += $(gt-y)
+# Memory + DMA management
+i915-y += \
+ mm/i915_acquire_ctx.o
+
# GEM (Graphics Execution Management) code
gem-y += \
gem/i915_gem_busy.o \
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
index 3aa213684293..51ec42a14694 100644
--- a/drivers/gpu/drm/i915/i915_globals.c
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -87,6 +87,7 @@ static void __i915_globals_cleanup(void)
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
+ i915_global_acquire_init,
i915_global_buddy_init,
i915_global_context_init,
i915_global_gem_context_init,
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
index b2f5cd9b9b1a..11227abf2769 100644
--- a/drivers/gpu/drm/i915/i915_globals.h
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -27,6 +27,7 @@ void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
+int i915_global_acquire_init(void);
int i915_global_buddy_init(void);
int i915_global_context_init(void);
int i915_global_gem_context_init(void);
diff --git a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
new file mode 100644
index 000000000000..d1c3b958c15d
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/dma-resv.h>
+
+#include "i915_globals.h"
+#include "gem/i915_gem_object.h"
+
+#include "i915_acquire_ctx.h"
+
+static struct i915_global_acquire {
+ struct i915_global base;
+ struct kmem_cache *slab_acquires;
+} global;
+
+struct i915_acquire {
+ struct drm_i915_gem_object *obj;
+ struct i915_acquire *next;
+};
+
+static struct i915_acquire *i915_acquire_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_acquires, GFP_KERNEL);
+}
+
+static void i915_acquire_free(struct i915_acquire *lnk)
+{
+ kmem_cache_free(global.slab_acquires, lnk);
+}
+
+void i915_acquire_ctx_init(struct i915_acquire_ctx *ctx)
+{
+ ww_acquire_init(&ctx->ctx, &reservation_ww_class);
+ ctx->locked = NULL;
+}
+
+int i915_acquire_ctx_lock(struct i915_acquire_ctx *ctx,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_acquire *lock, *lnk;
+ int err;
+
+ lock = i915_acquire_alloc();
+ if (!lock)
+ return -ENOMEM;
+
+ lock->obj = i915_gem_object_get(obj);
+ lock->next = NULL;
+
+ while ((lnk = lock)) {
+ obj = lnk->obj;
+ lock = lnk->next;
+
+ err = dma_resv_lock_interruptible(obj->base.resv, &ctx->ctx);
+ if (err == -EDEADLK) {
+ struct i915_acquire *old;
+
+ while ((old = ctx->locked)) {
+ i915_gem_object_unlock(old->obj);
+ ctx->locked = old->next;
+ old->next = lock;
+ lock = old;
+ }
+
+ err = dma_resv_lock_slow_interruptible(obj->base.resv,
+ &ctx->ctx);
+ }
+ if (!err) {
+ lnk->next = ctx->locked;
+ ctx->locked = lnk;
+ } else {
+ i915_gem_object_put(obj);
+ i915_acquire_free(lnk);
+ }
+ if (err == -EALREADY)
+ err = 0;
+ if (err)
+ break;
+ }
+
+ while ((lnk = lock)) {
+ lock = lnk->next;
+ i915_gem_object_put(lnk->obj);
+ i915_acquire_free(lnk);
+ }
+
+ return err;
+}
+
+int i915_acquire_mm(struct i915_acquire_ctx *acquire)
+{
+ return 0;
+}
+
+void i915_acquire_ctx_fini(struct i915_acquire_ctx *ctx)
+{
+ struct i915_acquire *lnk;
+
+ while ((lnk = ctx->locked)) {
+ i915_gem_object_unlock(lnk->obj);
+ i915_gem_object_put(lnk->obj);
+
+ ctx->locked = lnk->next;
+ i915_acquire_free(lnk);
+ }
+
+ ww_acquire_fini(&ctx->ctx);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "st_acquire_ctx.c"
+#endif
+
+static void i915_global_acquire_shrink(void)
+{
+ kmem_cache_shrink(global.slab_acquires);
+}
+
+static void i915_global_acquire_exit(void)
+{
+ kmem_cache_destroy(global.slab_acquires);
+}
+
+static struct i915_global_acquire global = { {
+ .shrink = i915_global_acquire_shrink,
+ .exit = i915_global_acquire_exit,
+} };
+
+int __init i915_global_acquire_init(void)
+{
+ global.slab_acquires = KMEM_CACHE(i915_acquire, 0);
+ if (!global.slab_acquires)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
new file mode 100644
index 000000000000..bea00e3d6a36
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_acquire_ctx.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_ACQIURE_CTX_H__
+#define __I915_ACQUIRE_CTX_H__
+
+#include <linux/list.h>
+#include <linux/ww_mutex.h>
+
+struct drm_i915_gem_object;
+struct i915_acquire;
+
+struct i915_acquire_ctx {
+ struct ww_acquire_ctx ctx;
+ struct i915_acquire *locked;
+};
+
+void i915_acquire_ctx_init(struct i915_acquire_ctx *acquire);
+
+static inline void i915_acquire_ctx_done(struct i915_acquire_ctx *acquire)
+{
+ ww_acquire_done(&acquire->ctx);
+}
+
+void i915_acquire_ctx_fini(struct i915_acquire_ctx *acquire);
+
+int __must_check i915_acquire_ctx_lock(struct i915_acquire_ctx *acquire,
+ struct drm_i915_gem_object *obj);
+
+int i915_acquire_mm(struct i915_acquire_ctx *acquire);
+
+#endif /* __I915_ACQUIRE_CTX_H__ */
diff --git a/drivers/gpu/drm/i915/mm/st_acquire_ctx.c b/drivers/gpu/drm/i915/mm/st_acquire_ctx.c
new file mode 100644
index 000000000000..6e94bdbb3265
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/st_acquire_ctx.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "selftests/i915_random.h"
+#include "selftests/mock_gem_device.h"
+
+static int checked_acquire_lock(struct i915_acquire_ctx *acquire,
+ struct drm_i915_gem_object *obj,
+ const char *name)
+{
+ int err;
+
+ err = i915_acquire_ctx_lock(acquire, obj);
+ if (err) {
+ pr_err("i915_acquire_lock(%s) failed, err:%d\n", name, err);
+ return err;
+ }
+
+ if (!mutex_is_locked(&obj->base.resv->lock.base)) {
+ pr_err("Failed to lock %s!\n", name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_acquire_lock(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *a, *b;
+ struct i915_acquire_ctx acquire;
+ int err;
+
+ a = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ b = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(b)) {
+ err = PTR_ERR(b);
+ goto out_a;
+ }
+
+ i915_acquire_ctx_init(&acquire);
+
+ err = checked_acquire_lock(&acquire, a, "A");
+ if (err)
+ goto out_fini;
+
+ err = checked_acquire_lock(&acquire, b, "B");
+ if (err)
+ goto out_fini;
+
+ /* Again for EALREADY */
+
+ err = checked_acquire_lock(&acquire, a, "A");
+ if (err)
+ goto out_fini;
+
+ err = checked_acquire_lock(&acquire, b, "B");
+ if (err)
+ goto out_fini;
+
+ i915_acquire_ctx_done(&acquire);
+
+ if (!mutex_is_locked(&a->base.resv->lock.base)) {
+ pr_err("Failed to lock A, after i915_acquire_done\n");
+ err = -EINVAL;
+ }
+ if (!mutex_is_locked(&b->base.resv->lock.base)) {
+ pr_err("Failed to lock B, after i915_acquire_done\n");
+ err = -EINVAL;
+ }
+
+out_fini:
+ i915_acquire_ctx_fini(&acquire);
+
+ if (mutex_is_locked(&a->base.resv->lock.base)) {
+ pr_err("A is still locked!\n");
+ err = -EINVAL;
+ }
+ if (mutex_is_locked(&b->base.resv->lock.base)) {
+ pr_err("B is still locked!\n");
+ err = -EINVAL;
+ }
+
+ i915_gem_object_put(b);
+out_a:
+ i915_gem_object_put(a);
+ return err;
+}
+
+struct deadlock {
+ struct drm_i915_gem_object *obj[64];
+};
+
+static int __igt_acquire_deadlock(void *arg)
+{
+ struct deadlock *dl = arg;
+ const unsigned int total = ARRAY_SIZE(dl->obj);
+ I915_RND_STATE(prng);
+ unsigned int *order;
+ int n, count, err = 0;
+
+ order = i915_random_order(total, &prng);
+ if (!order)
+ return -ENOMEM;
+
+ while (!kthread_should_stop()) {
+ struct i915_acquire_ctx acquire;
+
+ i915_random_reorder(order, total, &prng);
+ count = i915_prandom_u32_max_state(total, &prng);
+
+ i915_acquire_ctx_init(&acquire);
+
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ err = checked_acquire_lock(&acquire, obj, "dl");
+ if (err) {
+ i915_acquire_ctx_fini(&acquire);
+ goto out;
+ }
+ }
+
+ i915_acquire_ctx_done(&acquire);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ if (!lockdep_is_held(&obj->base.resv->lock.base)) {
+ pr_err("lock not taken!\n");
+ i915_acquire_ctx_fini(&acquire);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+#endif
+
+ i915_acquire_ctx_fini(&acquire);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+ for (n = 0; n < count; n++) {
+ struct drm_i915_gem_object *obj = dl->obj[order[n]];
+
+ if (lockdep_is_held(&obj->base.resv->lock.base)) {
+ pr_err("lock still held after fini!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+#endif
+ }
+
+out:
+ kfree(order);
+ return err;
+}
+
+static int igt_acquire_deadlock(void *arg)
+{
+ unsigned int ncpus = num_online_cpus();
+ struct drm_i915_private *i915 = arg;
+ struct task_struct **threads;
+ struct deadlock dl;
+ int ret = 0, n;
+
+ threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads)
+ return -ENOMEM;
+
+ for (n = 0; n < ARRAY_SIZE(dl.obj); n += 2) {
+ dl.obj[n] = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(dl.obj[n])) {
+ ret = PTR_ERR(dl.obj[n]);
+ goto out_obj;
+ }
+
+ /* Repeat the objects for -EALREADY */
+ dl.obj[n + 1] = i915_gem_object_get(dl.obj[n]);
+ }
+
+ for (n = 0; n < ncpus; n++) {
+ threads[n] = kthread_run(__igt_acquire_deadlock,
+ &dl, "igt/%d", n);
+ if (IS_ERR(threads[n])) {
+ ret = PTR_ERR(threads[n]);
+ ncpus = n;
+ break;
+ }
+
+ get_task_struct(threads[n]);
+ }
+
+ yield(); /* start all threads before we begin */
+ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
+
+ for (n = 0; n < ncpus; n++) {
+ int err;
+
+ err = kthread_stop(threads[n]);
+ if (err < 0 && !ret)
+ ret = err;
+
+ put_task_struct(threads[n]);
+ }
+
+out_obj:
+ for (n = 0; n < ARRAY_SIZE(dl.obj); n++) {
+ if (IS_ERR(dl.obj[n]))
+ break;
+ i915_gem_object_put(dl.obj[n]);
+ }
+ kfree(threads);
+ return ret;
+}
+
+int i915_acquire_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_acquire_lock),
+ SUBTEST(igt_acquire_deadlock),
+ };
+ struct drm_i915_private *i915;
+ int err = 0;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+ drm_dev_put(&i915->drm);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3db34d3eea58..cb6f94633356 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -26,6 +26,7 @@ selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
selftest(objects, i915_gem_object_mock_selftests)
+selftest(acquire, i915_acquire_mock_selftests)
selftest(phys, i915_gem_phys_mock_selftests)
selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
selftest(vma, i915_vma_mock_selftests)
--
2.20.1
More information about the Intel-gfx
mailing list