[Intel-gfx] [PATCH] drm/i915: Move context management under GEM
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Fri Oct 4 10:27:42 UTC 2019
On 03/10/2019 15:20, Chris Wilson wrote:
> Keep track of the GEM contexts underneath i915->gem.contexts and assign
> them their own lock for the purposes of list management.
>
> v2: Focus on lock tracking; ctx->vm is protected by ctx->mutex
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
> Another missed __assign_ppgtt inside the mock.
> ---
> drivers/gpu/drm/i915/gem/i915_gem_context.c | 183 +++++++++---------
> drivers/gpu/drm/i915/gem/i915_gem_context.h | 27 ++-
> .../gpu/drm/i915/gem/i915_gem_context_types.h | 2 +-
> .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
> drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 3 +-
> .../gpu/drm/i915/gem/selftests/huge_pages.c | 36 ++--
> .../drm/i915/gem/selftests/i915_gem_context.c | 168 ++++++++--------
> .../gpu/drm/i915/gem/selftests/mock_context.c | 7 +-
> drivers/gpu/drm/i915/gt/intel_context.c | 10 +-
> drivers/gpu/drm/i915/gt/intel_lrc.c | 5 +
> drivers/gpu/drm/i915/gt/selftest_context.c | 24 +--
> drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 39 ++--
> drivers/gpu/drm/i915/gt/selftest_lrc.c | 6 +-
> .../gpu/drm/i915/gt/selftest_workarounds.c | 22 ++-
> drivers/gpu/drm/i915/gvt/scheduler.c | 24 +--
> drivers/gpu/drm/i915/i915_debugfs.c | 50 +++--
> drivers/gpu/drm/i915/i915_drv.c | 2 -
> drivers/gpu/drm/i915/i915_drv.h | 27 +--
> drivers/gpu/drm/i915/i915_gem.c | 10 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +-
> drivers/gpu/drm/i915/i915_perf.c | 24 ++-
> drivers/gpu/drm/i915/i915_sysfs.c | 77 ++++----
> drivers/gpu/drm/i915/i915_trace.h | 2 +-
> drivers/gpu/drm/i915/selftests/i915_gem.c | 8 -
> .../gpu/drm/i915/selftests/i915_gem_evict.c | 3 -
> drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 15 +-
> drivers/gpu/drm/i915/selftests/i915_request.c | 12 +-
> drivers/gpu/drm/i915/selftests/i915_vma.c | 7 +-
> .../gpu/drm/i915/selftests/mock_gem_device.c | 6 +-
> 29 files changed, 415 insertions(+), 390 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index ae588544bf3a..dadb820b0502 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -218,9 +218,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
>
> static void i915_gem_context_free(struct i915_gem_context *ctx)
> {
> - lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
>
> + spin_lock(&ctx->i915->gem.contexts.lock);
> + list_del(&ctx->link);
> + spin_unlock(&ctx->i915->gem.contexts.lock);
> +
> free_engines(rcu_access_pointer(ctx->engines));
> mutex_destroy(&ctx->engines_mutex);
>
> @@ -230,67 +233,54 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
> kfree(ctx->name);
> put_pid(ctx->pid);
>
> - list_del(&ctx->link);
> mutex_destroy(&ctx->mutex);
>
> kfree_rcu(ctx, rcu);
> }
>
> -static void contexts_free(struct drm_i915_private *i915)
> +static void contexts_free_all(struct llist_node *list)
> {
> - struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
> struct i915_gem_context *ctx, *cn;
>
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> - llist_for_each_entry_safe(ctx, cn, freed, free_link)
> + llist_for_each_entry_safe(ctx, cn, list, free_link)
> i915_gem_context_free(ctx);
> }
>
> -static void contexts_free_first(struct drm_i915_private *i915)
> +static void contexts_flush_free(struct i915_gem_contexts *gc)
> {
> - struct i915_gem_context *ctx;
> - struct llist_node *freed;
> -
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> - freed = llist_del_first(&i915->contexts.free_list);
> - if (!freed)
> - return;
> -
> - ctx = container_of(freed, typeof(*ctx), free_link);
> - i915_gem_context_free(ctx);
> + contexts_free_all(llist_del_all(&gc->free_list));
> }
>
> static void contexts_free_worker(struct work_struct *work)
> {
> - struct drm_i915_private *i915 =
> - container_of(work, typeof(*i915), contexts.free_work);
> + struct i915_gem_contexts *gc =
> + container_of(work, typeof(*gc), free_work);
>
> - mutex_lock(&i915->drm.struct_mutex);
> - contexts_free(i915);
> - mutex_unlock(&i915->drm.struct_mutex);
> + contexts_flush_free(gc);
> }
>
> void i915_gem_context_release(struct kref *ref)
> {
> struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
> - struct drm_i915_private *i915 = ctx->i915;
> + struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
>
> trace_i915_context_free(ctx);
> - if (llist_add(&ctx->free_link, &i915->contexts.free_list))
> - queue_work(i915->wq, &i915->contexts.free_work);
> + if (llist_add(&ctx->free_link, &gc->free_list))
> + queue_work(ctx->i915->wq, &gc->free_work);
> }
>
> static void context_close(struct i915_gem_context *ctx)
> {
> - i915_gem_context_set_closed(ctx);
> + struct i915_address_space *vm;
>
> - if (ctx->vm)
> - i915_vm_close(ctx->vm);
> + i915_gem_context_set_closed(ctx);
>
> mutex_lock(&ctx->mutex);
>
> + vm = i915_gem_context_vm(ctx);
> + if (vm)
> + i915_vm_close(vm);
> +
> ctx->file_priv = ERR_PTR(-EBADF);
>
> /*
> @@ -317,7 +307,6 @@ __create_context(struct drm_i915_private *i915)
> return ERR_PTR(-ENOMEM);
>
> kref_init(&ctx->ref);
> - list_add_tail(&ctx->link, &i915->contexts.list);
> ctx->i915 = i915;
> ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
> mutex_init(&ctx->mutex);
> @@ -343,6 +332,10 @@ __create_context(struct drm_i915_private *i915)
> for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
> ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
>
> + spin_lock(&i915->gem.contexts.lock);
> + list_add_tail(&ctx->link, &i915->gem.contexts.list);
> + spin_unlock(&i915->gem.contexts.lock);
> +
> return ctx;
>
> err_free:
> @@ -372,11 +365,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
> static struct i915_address_space *
> __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
> {
> - struct i915_address_space *old = ctx->vm;
> + struct i915_address_space *old = i915_gem_context_vm(ctx);
>
> GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
>
> - ctx->vm = i915_vm_open(vm);
> + rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
> context_apply_all(ctx, __apply_ppgtt, vm);
>
> return old;
> @@ -385,7 +378,7 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
> static void __assign_ppgtt(struct i915_gem_context *ctx,
> struct i915_address_space *vm)
> {
> - if (vm == ctx->vm)
> + if (vm == rcu_access_pointer(ctx->vm))
> return;
>
> vm = __set_ppgtt(ctx, vm);
> @@ -417,27 +410,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
> }
>
> static struct i915_gem_context *
> -i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
> +i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
> {
> struct i915_gem_context *ctx;
>
> - lockdep_assert_held(&dev_priv->drm.struct_mutex);
> -
> if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
> - !HAS_EXECLISTS(dev_priv))
> + !HAS_EXECLISTS(i915))
> return ERR_PTR(-EINVAL);
>
> - /* Reap the most stale context */
> - contexts_free_first(dev_priv);
> + /* Reap the stale contexts */
> + contexts_flush_free(&i915->gem.contexts);
>
> - ctx = __create_context(dev_priv);
> + ctx = __create_context(i915);
> if (IS_ERR(ctx))
> return ctx;
>
> - if (HAS_FULL_PPGTT(dev_priv)) {
> + if (HAS_FULL_PPGTT(i915)) {
> struct i915_ppgtt *ppgtt;
>
> - ppgtt = i915_ppgtt_create(dev_priv);
> + ppgtt = i915_ppgtt_create(i915);
> if (IS_ERR(ppgtt)) {
> DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
> PTR_ERR(ppgtt));
> @@ -445,14 +436,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
> return ERR_CAST(ppgtt);
> }
>
> + mutex_lock(&ctx->mutex);
> __assign_ppgtt(ctx, &ppgtt->vm);
> + mutex_unlock(&ctx->mutex);
> +
> i915_vm_put(&ppgtt->vm);
> }
>
> if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
> struct intel_timeline *timeline;
>
> - timeline = intel_timeline_create(&dev_priv->gt, NULL);
> + timeline = intel_timeline_create(&i915->gt, NULL);
> if (IS_ERR(timeline)) {
> context_close(ctx);
> return ERR_CAST(timeline);
> @@ -497,48 +491,40 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
> return ctx;
> }
>
> -static void init_contexts(struct drm_i915_private *i915)
> +static void init_contexts(struct i915_gem_contexts *gc)
> {
> - mutex_init(&i915->contexts.mutex);
> - INIT_LIST_HEAD(&i915->contexts.list);
> -
> - /* Using the simple ida interface, the max is limited by sizeof(int) */
> - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
> - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
> - ida_init(&i915->contexts.hw_ida);
> - INIT_LIST_HEAD(&i915->contexts.hw_id_list);
hw_id removal should go to that different patch.
> + spin_lock_init(&gc->lock);
> + INIT_LIST_HEAD(&gc->list);
>
> - INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
> - init_llist_head(&i915->contexts.free_list);
> + INIT_WORK(&gc->free_work, contexts_free_worker);
> + init_llist_head(&gc->free_list);
> }
>
> -int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
> +int i915_gem_init_contexts(struct drm_i915_private *i915)
> {
> struct i915_gem_context *ctx;
>
> /* Reassure ourselves we are only called once */
> - GEM_BUG_ON(dev_priv->kernel_context);
> + GEM_BUG_ON(i915->kernel_context);
>
> - init_contexts(dev_priv);
> + init_contexts(&i915->gem.contexts);
>
> /* lowest priority; idle task */
> - ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
> + ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
> if (IS_ERR(ctx)) {
> DRM_ERROR("Failed to create default global context\n");
> return PTR_ERR(ctx);
> }
> - dev_priv->kernel_context = ctx;
> + i915->kernel_context = ctx;
>
> DRM_DEBUG_DRIVER("%s context support initialized\n",
> - DRIVER_CAPS(dev_priv)->has_logical_contexts ?
> + DRIVER_CAPS(i915)->has_logical_contexts ?
> "logical" : "fake");
> return 0;
> }
>
> -void i915_gem_contexts_fini(struct drm_i915_private *i915)
> +void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
> {
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> destroy_kernel_context(&i915->kernel_context);
> }
>
> @@ -557,11 +543,16 @@ static int vm_idr_cleanup(int id, void *p, void *data)
> static int gem_context_register(struct i915_gem_context *ctx,
> struct drm_i915_file_private *fpriv)
> {
> + struct i915_address_space *vm;
> int ret;
>
> ctx->file_priv = fpriv;
> - if (ctx->vm)
> - ctx->vm->file = fpriv;
> +
> + mutex_lock(&ctx->mutex);
> + vm = i915_gem_context_vm(ctx);
> + if (vm)
> + WRITE_ONCE(vm->file, fpriv); /* XXX */
> + mutex_unlock(&ctx->mutex);
>
> ctx->pid = get_task_pid(current, PIDTYPE_PID);
> ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
> @@ -598,9 +589,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
> idr_init(&file_priv->context_idr);
> idr_init_base(&file_priv->vm_idr, 1);
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = i915_gem_create_context(i915, 0);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto err;
> @@ -628,6 +617,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
> void i915_gem_context_close(struct drm_file *file)
> {
> struct drm_i915_file_private *file_priv = file->driver_priv;
> + struct drm_i915_private *i915 = file_priv->dev_priv;
>
> idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
> idr_destroy(&file_priv->context_idr);
> @@ -636,6 +626,8 @@ void i915_gem_context_close(struct drm_file *file)
> idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
> idr_destroy(&file_priv->vm_idr);
> mutex_destroy(&file_priv->vm_idr_lock);
> +
> + contexts_flush_free(&i915->gem.contexts);
> }
>
> int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
> @@ -814,16 +806,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
> struct i915_address_space *vm;
> int ret;
>
> - if (!ctx->vm)
> + if (!rcu_access_pointer(ctx->vm))
> return -ENODEV;
>
> - /* XXX rcu acquire? */
> - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
> - if (ret)
> - return ret;
> -
> + rcu_read_lock();
> vm = i915_vm_get(ctx->vm);
> - mutex_unlock(&ctx->i915->drm.struct_mutex);
> + rcu_read_unlock();
>
> ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> if (ret)
> @@ -932,7 +920,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> if (args->size)
> return -EINVAL;
>
> - if (!ctx->vm)
> + if (!rcu_access_pointer(ctx->vm))
> return -ENODEV;
>
> if (upper_32_bits(args->value))
> @@ -946,17 +934,20 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> if (!vm)
> return -ENOENT;
>
> - err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
> + err = mutex_lock_interruptible(&ctx->mutex);
> if (err)
> goto out;
>
> - if (vm == ctx->vm)
> + if (i915_gem_context_is_closed(ctx)) {
> + err = -ENOENT;
> + goto out;
> + }
> +
> + if (vm == rcu_access_pointer(ctx->vm))
> goto unlock;
>
> /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
> - mutex_lock(&ctx->mutex);
> lut_close(ctx);
> - mutex_unlock(&ctx->mutex);
>
> old = __set_ppgtt(ctx, vm);
>
> @@ -976,8 +967,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> }
>
> unlock:
> - mutex_unlock(&ctx->i915->drm.struct_mutex);
> -
> + mutex_unlock(&ctx->mutex);
> out:
> i915_vm_put(vm);
> return err;
> @@ -1833,10 +1823,11 @@ static int clone_vm(struct i915_gem_context *dst,
> struct i915_gem_context *src)
> {
> struct i915_address_space *vm;
> + int err = 0;
>
> rcu_read_lock();
> do {
> - vm = READ_ONCE(src->vm);
> + vm = rcu_dereference(src->vm);
> if (!vm)
> break;
>
> @@ -1858,7 +1849,7 @@ static int clone_vm(struct i915_gem_context *dst,
> * it cannot be reallocated elsewhere.
> */
>
> - if (vm == READ_ONCE(src->vm))
> + if (vm == rcu_access_pointer(src->vm))
> break;
>
> i915_vm_put(vm);
> @@ -1866,11 +1857,16 @@ static int clone_vm(struct i915_gem_context *dst,
> rcu_read_unlock();
>
> if (vm) {
> - __assign_ppgtt(dst, vm);
> + if (!mutex_lock_interruptible(&dst->mutex)) {
> + __assign_ppgtt(dst, vm);
> + mutex_unlock(&dst->mutex);
> + } else {
> + err = -EINTR;
> + }
> i915_vm_put(vm);
> }
>
> - return 0;
> + return err;
> }
>
> static int create_clone(struct i915_user_extension __user *ext, void *data)
> @@ -1960,12 +1956,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
> return -EIO;
> }
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> -
> ext_data.ctx = i915_gem_create_context(i915, args->flags);
> - mutex_unlock(&dev->struct_mutex);
> if (IS_ERR(ext_data.ctx))
> return PTR_ERR(ext_data.ctx);
>
> @@ -2092,10 +2083,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>
> case I915_CONTEXT_PARAM_GTT_SIZE:
> args->size = 0;
> - if (ctx->vm)
> - args->value = ctx->vm->total;
> + rcu_read_lock();
> + if (rcu_access_pointer(ctx->vm))
> + args->value = rcu_dereference(ctx->vm)->total;
> else
> args->value = to_i915(dev)->ggtt.vm.total;
> + rcu_read_unlock();
> break;
>
> case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
> @@ -2161,7 +2154,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
> int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> + struct drm_i915_private *i915 = to_i915(dev);
> struct drm_i915_reset_stats *args = data;
> struct i915_gem_context *ctx;
> int ret;
> @@ -2183,7 +2176,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
> */
>
> if (capable(CAP_SYS_ADMIN))
> - args->reset_count = i915_reset_count(&dev_priv->gpu_error);
> + args->reset_count = i915_reset_count(&i915->gpu_error);
> else
> args->reset_count = 0;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> index 50bc27d30c03..9234586830d1 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> @@ -11,7 +11,9 @@
>
> #include "gt/intel_context.h"
>
> +#include "i915_drv.h"
> #include "i915_gem.h"
> +#include "i915_gem_gtt.h"
> #include "i915_scheduler.h"
> #include "intel_device_info.h"
>
> @@ -118,8 +120,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
> }
>
> /* i915_gem_context.c */
> -int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
> -void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
> +int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
> +void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
>
> int i915_gem_context_open(struct drm_i915_private *i915,
> struct drm_file *file);
> @@ -158,6 +160,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
> kref_put(&ctx->ref, i915_gem_context_release);
> }
>
> +static inline struct i915_address_space *
> +i915_gem_context_vm(struct i915_gem_context *ctx)
> +{
> + return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
> +}
> +
> +static inline struct i915_address_space *
> +i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
> +{
> + struct i915_address_space *vm;
> +
> + rcu_read_lock();
> + vm = rcu_dereference(ctx->vm);
> + if (!vm)
> + vm = &ctx->i915->ggtt.vm;
> + vm = i915_vm_get(vm);
> + rcu_read_unlock();
> +
> + return vm;
> +}
> +
> static inline struct i915_gem_engines *
> i915_gem_context_engines(struct i915_gem_context *ctx)
> {
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> index 87be27877e22..ab8e1367dfc8 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> @@ -88,7 +88,7 @@ struct i915_gem_context {
> * In other modes, this is a NULL pointer with the expectation that
> * the caller uses the shared global GTT.
> */
> - struct i915_address_space *vm;
> + struct i915_address_space __rcu *vm;
>
> /**
> * @pid: process id of creator
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 88a881be12ec..98816c35ffc3 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -728,7 +728,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
> return -ENOENT;
>
> eb->gem_context = ctx;
> - if (ctx->vm)
> + if (rcu_access_pointer(ctx->vm))
> eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
>
> eb->context_flags = 0;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index 1738a15eb911..4f970474013f 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -758,7 +758,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
> * On almost all of the older hw, we cannot tell the GPU that
> * a page is readonly.
> */
> - vm = dev_priv->kernel_context->vm;
> + vm = rcu_dereference_protected(dev_priv->kernel_context->vm,
> + true); /* static vm */
> if (!vm || !vm->has_read_only)
> return -ENODEV;
> }
> diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> index 98b2a6ccfcc1..3314858f3046 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> @@ -1322,15 +1322,15 @@ static int igt_ppgtt_pin_update(void *arg)
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *dev_priv = ctx->i915;
> unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
> - struct i915_address_space *vm = ctx->vm;
> struct drm_i915_gem_object *obj;
> struct i915_gem_engines_iter it;
> + struct i915_address_space *vm;
> struct intel_context *ce;
> struct i915_vma *vma;
> unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
> unsigned int n;
> int first, last;
> - int err;
> + int err = 0;
>
> /*
> * Make sure there's no funny business when doing a PIN_UPDATE -- in the
> @@ -1340,9 +1340,10 @@ static int igt_ppgtt_pin_update(void *arg)
> * huge-gtt-pages.
> */
>
> - if (!vm || !i915_vm_is_4lvl(vm)) {
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + if (!i915_vm_is_4lvl(vm)) {
> pr_info("48b PPGTT not supported, skipping\n");
> - return 0;
> + goto out_vm;
> }
>
> first = ilog2(I915_GTT_PAGE_SIZE_64K);
> @@ -1451,6 +1452,8 @@ static int igt_ppgtt_pin_update(void *arg)
> i915_vma_close(vma);
> out_put:
> i915_gem_object_put(obj);
> +out_vm:
> + i915_vm_put(vm);
>
> return err;
> }
> @@ -1460,7 +1463,7 @@ static int igt_tmpfs_fallback(void *arg)
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *i915 = ctx->i915;
> struct vfsmount *gemfs = i915->mm.gemfs;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
> struct drm_i915_gem_object *obj;
> struct i915_vma *vma;
> u32 *vaddr;
> @@ -1510,6 +1513,7 @@ static int igt_tmpfs_fallback(void *arg)
> out_restore:
> i915->mm.gemfs = gemfs;
>
> + i915_vm_put(vm);
> return err;
> }
>
> @@ -1517,14 +1521,14 @@ static int igt_shrink_thp(void *arg)
> {
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *i915 = ctx->i915;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
> struct drm_i915_gem_object *obj;
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
> struct i915_vma *vma;
> unsigned int flags = PIN_USER;
> unsigned int n;
> - int err;
> + int err = 0;
>
> /*
> * Sanity check shrinking huge-paged object -- make sure nothing blows
> @@ -1533,12 +1537,14 @@ static int igt_shrink_thp(void *arg)
>
> if (!igt_can_allocate_thp(i915)) {
> pr_info("missing THP support, skipping\n");
> - return 0;
> + goto out_vm;
> }
>
> obj = i915_gem_object_create_shmem(i915, SZ_2M);
> - if (IS_ERR(obj))
> - return PTR_ERR(obj);
> + if (IS_ERR(obj)) {
> + err = PTR_ERR(obj);
> + goto out_vm;
> + }
>
> vma = i915_vma_instance(obj, vm, NULL);
> if (IS_ERR(vma)) {
> @@ -1607,6 +1613,8 @@ static int igt_shrink_thp(void *arg)
> i915_vma_close(vma);
> out_put:
> i915_gem_object_put(obj);
> +out_vm:
> + i915_vm_put(vm);
>
> return err;
> }
> @@ -1675,6 +1683,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
> };
> struct drm_file *file;
> struct i915_gem_context *ctx;
> + struct i915_address_space *vm;
> intel_wakeref_t wakeref;
> int err;
>
> @@ -1699,8 +1708,11 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
> goto out_unlock;
> }
>
> - if (ctx->vm)
> - ctx->vm->scrub_64K = true;
> + mutex_lock(&ctx->mutex);
> + vm = i915_gem_context_vm(ctx);
> + if (vm)
> + WRITE_ONCE(vm->scrub_64K, true);
> + mutex_unlock(&ctx->mutex);
>
> err = i915_subtests(tests, ctx);
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index 2fb31ada2fa7..d44fa9d356f1 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -53,19 +53,17 @@ static int live_nop_switch(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
> if (!ctx) {
> err = -ENOMEM;
> - goto out_unlock;
> + goto out_file;
> }
>
> for (n = 0; n < nctx; n++) {
> ctx[n] = live_context(i915, file);
> if (IS_ERR(ctx[n])) {
> err = PTR_ERR(ctx[n]);
> - goto out_unlock;
> + goto out_file;
> }
> }
>
> @@ -79,7 +77,7 @@ static int live_nop_switch(void *arg)
> rq = igt_request_alloc(ctx[n], engine);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> - goto out_unlock;
> + goto out_file;
> }
> i915_request_add(rq);
> }
> @@ -87,7 +85,7 @@ static int live_nop_switch(void *arg)
> pr_err("Failed to populated %d contexts\n", nctx);
> intel_gt_set_wedged(&i915->gt);
> err = -EIO;
> - goto out_unlock;
> + goto out_file;
> }
>
> times[1] = ktime_get_raw();
> @@ -97,7 +95,7 @@ static int live_nop_switch(void *arg)
>
> err = igt_live_test_begin(&t, i915, __func__, engine->name);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> end_time = jiffies + i915_selftest.timeout_jiffies;
> for_each_prime_number_from(prime, 2, 8192) {
> @@ -107,7 +105,7 @@ static int live_nop_switch(void *arg)
> rq = igt_request_alloc(ctx[n % nctx], engine);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> - goto out_unlock;
> + goto out_file;
> }
>
> /*
> @@ -143,7 +141,7 @@ static int live_nop_switch(void *arg)
>
> err = igt_live_test_end(&t);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
> engine->name,
> @@ -151,8 +149,7 @@ static int live_nop_switch(void *arg)
> prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
> }
>
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> +out_file:
> mock_file_free(i915, file);
> return err;
> }
> @@ -253,12 +250,10 @@ static int live_parallel_switch(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_locked;
> + goto out_file;
> }
>
> engines = i915_gem_context_lock_engines(ctx);
> @@ -268,7 +263,7 @@ static int live_parallel_switch(void *arg)
> if (!data) {
> i915_gem_context_unlock_engines(ctx);
> err = -ENOMEM;
> - goto out_locked;
> + goto out;
> }
>
> m = 0; /* Use the first context as our template for the engines */
> @@ -276,7 +271,7 @@ static int live_parallel_switch(void *arg)
> err = intel_context_pin(ce);
> if (err) {
> i915_gem_context_unlock_engines(ctx);
> - goto out_locked;
> + goto out;
> }
> data[m++].ce[0] = intel_context_get(ce);
> }
> @@ -287,7 +282,7 @@ static int live_parallel_switch(void *arg)
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_locked;
> + goto out;
> }
>
> for (m = 0; m < count; m++) {
> @@ -296,20 +291,18 @@ static int live_parallel_switch(void *arg)
>
> ce = intel_context_create(ctx, data[m].ce[0]->engine);
> if (IS_ERR(ce))
> - goto out_locked;
> + goto out;
>
> err = intel_context_pin(ce);
> if (err) {
> intel_context_put(ce);
> - goto out_locked;
> + goto out;
> }
>
> data[m].ce[n] = ce;
> }
> }
>
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> for (fn = func; !err && *fn; fn++) {
> struct igt_live_test t;
> int n;
> @@ -354,8 +347,7 @@ static int live_parallel_switch(void *arg)
> mutex_unlock(&i915->drm.struct_mutex);
> }
>
> - mutex_lock(&i915->drm.struct_mutex);
> -out_locked:
> +out:
> for (n = 0; n < count; n++) {
> for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
> if (!data[n].ce[m])
> @@ -365,8 +357,8 @@ static int live_parallel_switch(void *arg)
> intel_context_put(data[n].ce[m]);
> }
> }
> - mutex_unlock(&i915->drm.struct_mutex);
> kfree(data);
> +out_file:
> mock_file_free(i915, file);
> return err;
> }
> @@ -626,11 +618,9 @@ static int igt_ctx_exec(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, engine->name);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ncontexts = 0;
> ndwords = 0;
> @@ -642,7 +632,7 @@ static int igt_ctx_exec(void *arg)
> ctx = kernel_context(i915);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
> @@ -654,7 +644,7 @@ static int igt_ctx_exec(void *arg)
> err = PTR_ERR(obj);
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
> }
>
> @@ -663,17 +653,18 @@ static int igt_ctx_exec(void *arg)
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> engine->name,
> - yesno(!!ctx->vm), err);
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> err = throttle(ce, tq, ARRAY_SIZE(tq));
> if (err) {
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (++dw == max_dwords(obj)) {
> @@ -703,11 +694,10 @@ static int igt_ctx_exec(void *arg)
> dw += rem;
> }
>
> -out_unlock:
> +out_file:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
>
> mock_file_free(i915, file);
> if (err)
> @@ -742,22 +732,20 @@ static int igt_shared_ctx_exec(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> parent = live_context(i915, file);
> if (IS_ERR(parent)) {
> err = PTR_ERR(parent);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (!parent->vm) { /* not full-ppgtt; nothing to share */
> err = 0;
> - goto out_unlock;
> + goto out_file;
> }
>
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> for_each_engine(engine, i915, id) {
> unsigned long ncontexts, ndwords, dw;
> @@ -781,7 +769,9 @@ static int igt_shared_ctx_exec(void *arg)
> goto out_test;
> }
>
> + mutex_lock(&ctx->mutex);
> __assign_ppgtt(ctx, parent->vm);
> + mutex_unlock(&ctx->mutex);
>
> ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
> GEM_BUG_ON(IS_ERR(ce));
> @@ -801,7 +791,8 @@ static int igt_shared_ctx_exec(void *arg)
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> engine->name,
> - yesno(!!ctx->vm), err);
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> intel_context_put(ce);
> kernel_context_close(ctx);
> goto out_test;
> @@ -840,17 +831,13 @@ static int igt_shared_ctx_exec(void *arg)
> dw += rem;
> }
>
> - mutex_unlock(&i915->drm.struct_mutex);
> i915_gem_drain_freed_objects(i915);
> - mutex_lock(&i915->drm.struct_mutex);
> }
> out_test:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> +out_file:
> mock_file_free(i915, file);
> return err;
> }
> @@ -1222,8 +1209,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
> if (flags & TEST_RESET)
> igt_global_reset_lock(&i915->gt);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> ret = PTR_ERR(ctx);
> @@ -1278,8 +1263,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
> i915_gem_object_put(obj);
>
> out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> if (flags & TEST_RESET)
> igt_global_reset_unlock(&i915->gt);
>
> @@ -1339,23 +1322,24 @@ static int igt_ctx_readonly(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> - vm = ctx->vm ?: &i915->ggtt.alias->vm;
> + rcu_read_lock();
> + vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
> if (!vm || !vm->has_read_only) {
> + rcu_read_unlock();
> err = 0;
> - goto out_unlock;
> + goto out_file;
> }
> + rcu_read_unlock();
>
> ndwords = 0;
> dw = 0;
> @@ -1373,7 +1357,7 @@ static int igt_ctx_readonly(void *arg)
> if (IS_ERR(obj)) {
> err = PTR_ERR(obj);
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (prandom_u32_state(&prng) & 1)
> @@ -1384,15 +1368,17 @@ static int igt_ctx_readonly(void *arg)
> if (err) {
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> - ce->engine->name, yesno(!!ctx->vm), err);
> + ce->engine->name,
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> err = throttle(ce, tq, ARRAY_SIZE(tq));
> if (err) {
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (++dw == max_dwords(obj)) {
> @@ -1424,20 +1410,19 @@ static int igt_ctx_readonly(void *arg)
> dw += rem;
> }
>
> -out_unlock:
> +out_file:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
>
> mock_file_free(i915, file);
> return err;
> }
>
> -static int check_scratch(struct i915_gem_context *ctx, u64 offset)
> +static int check_scratch(struct i915_address_space *vm, u64 offset)
> {
> struct drm_mm_node *node =
> - __drm_mm_interval_first(&ctx->vm->mm,
> + __drm_mm_interval_first(&vm->mm,
> offset, offset + sizeof(u32) - 1);
> if (!node || node->start > offset)
> return 0;
> @@ -1455,6 +1440,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
> {
> struct drm_i915_private *i915 = ctx->i915;
> struct drm_i915_gem_object *obj;
> + struct i915_address_space *vm;
> struct i915_request *rq;
> struct i915_vma *vma;
> u32 *cmd;
> @@ -1487,17 +1473,18 @@ static int write_to_scratch(struct i915_gem_context *ctx,
>
> intel_gt_chipset_flush(engine->gt);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + vma = i915_vma_instance(obj, vm, NULL);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> - goto err;
> + goto err_vm;
> }
>
> err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
> if (err)
> - goto err;
> + goto err_vm;
>
> - err = check_scratch(ctx, offset);
> + err = check_scratch(vm, offset);
> if (err)
> goto err_unpin;
>
> @@ -1523,6 +1510,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
>
> i915_request_add(rq);
>
> + i915_vm_put(vm);
> return 0;
>
> skip_request:
> @@ -1531,6 +1519,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
> i915_request_add(rq);
> err_unpin:
> i915_vma_unpin(vma);
> +err_vm:
> + i915_vm_put(vm);
> err:
> i915_gem_object_put(obj);
> return err;
> @@ -1542,6 +1532,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
> {
> struct drm_i915_private *i915 = ctx->i915;
> struct drm_i915_gem_object *obj;
> + struct i915_address_space *vm;
> const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
> const u32 result = 0x100;
> struct i915_request *rq;
> @@ -1586,17 +1577,18 @@ static int read_from_scratch(struct i915_gem_context *ctx,
>
> intel_gt_chipset_flush(engine->gt);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + vma = i915_vma_instance(obj, vm, NULL);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> - goto err;
> + goto err_vm;
> }
>
> err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
> if (err)
> - goto err;
> + goto err_vm;
>
> - err = check_scratch(ctx, offset);
> + err = check_scratch(vm, offset);
> if (err)
> goto err_unpin;
>
> @@ -1627,12 +1619,12 @@ static int read_from_scratch(struct i915_gem_context *ctx,
> err = i915_gem_object_set_to_cpu_domain(obj, false);
> i915_gem_object_unlock(obj);
> if (err)
> - goto err;
> + goto err_vm;
>
> cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
> if (IS_ERR(cmd)) {
> err = PTR_ERR(cmd);
> - goto err;
> + goto err_vm;
> }
>
> *value = cmd[result / sizeof(*cmd)];
> @@ -1647,6 +1639,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
> i915_request_add(rq);
> err_unpin:
> i915_vma_unpin(vma);
> +err_vm:
> + i915_vm_put(vm);
> err:
> i915_gem_object_put(obj);
> return err;
> @@ -1677,27 +1671,25 @@ static int igt_vm_isolation(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ctx_a = live_context(i915, file);
> if (IS_ERR(ctx_a)) {
> err = PTR_ERR(ctx_a);
> - goto out_unlock;
> + goto out_file;
> }
>
> ctx_b = live_context(i915, file);
> if (IS_ERR(ctx_b)) {
> err = PTR_ERR(ctx_b);
> - goto out_unlock;
> + goto out_file;
> }
>
> /* We can only test vm isolation, if the vm are distinct */
> if (ctx_a->vm == ctx_b->vm)
> - goto out_unlock;
> + goto out_file;
>
> vm_total = ctx_a->vm->total;
> GEM_BUG_ON(ctx_b->vm->total != vm_total);
> @@ -1726,7 +1718,7 @@ static int igt_vm_isolation(void *arg)
> err = read_from_scratch(ctx_b, engine,
> offset, &value);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> if (value) {
> pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
> @@ -1735,7 +1727,7 @@ static int igt_vm_isolation(void *arg)
> lower_32_bits(offset),
> this);
> err = -EINVAL;
> - goto out_unlock;
> + goto out_file;
> }
>
> this++;
> @@ -1745,11 +1737,9 @@ static int igt_vm_isolation(void *arg)
> pr_info("Checked %lu scratch offsets across %d engines\n",
> count, RUNTIME_INFO(i915)->num_engines);
>
> -out_unlock:
> +out_file:
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> mock_file_free(i915, file);
> return err;
> }
> @@ -1781,13 +1771,9 @@ static int mock_context_barrier(void *arg)
> * a request; useful for retiring old state after loading new.
> */
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = mock_context(i915, "mock");
> - if (!ctx) {
> - err = -ENOMEM;
> - goto unlock;
> - }
> + if (!ctx)
> + return -ENOMEM;
>
> counter = 0;
> err = context_barrier_task(ctx, 0,
> @@ -1860,8 +1846,6 @@ static int mock_context_barrier(void *arg)
>
> out:
> mock_context_close(ctx);
> -unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> return err;
> #undef pr_fmt
> #define pr_fmt(x) x
> diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> index 0104f16b1327..74ddd682c9cd 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> @@ -42,7 +42,10 @@ mock_context(struct drm_i915_private *i915,
> if (!ppgtt)
> goto err_put;
>
> + mutex_lock(&ctx->mutex);
> __set_ppgtt(ctx, &ppgtt->vm);
> + mutex_unlock(&ctx->mutex);
> +
> i915_vm_put(&ppgtt->vm);
> }
>
> @@ -65,7 +68,7 @@ void mock_context_close(struct i915_gem_context *ctx)
>
> void mock_init_contexts(struct drm_i915_private *i915)
> {
> - init_contexts(i915);
> + init_contexts(&i915->gem.contexts);
> }
>
> struct i915_gem_context *
> @@ -74,8 +77,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
> struct i915_gem_context *ctx;
> int err;
>
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> ctx = i915_gem_create_context(i915, 0);
> if (IS_ERR(ctx))
> return ctx;
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 35a40c2820a2..be34d97ac18f 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -221,12 +221,20 @@ intel_context_init(struct intel_context *ce,
> struct i915_gem_context *ctx,
> struct intel_engine_cs *engine)
> {
> + struct i915_address_space *vm;
> +
> GEM_BUG_ON(!engine->cops);
>
> kref_init(&ce->ref);
>
> ce->gem_context = ctx;
> - ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
> + rcu_read_lock();
> + vm = rcu_dereference(ctx->vm);
> + if (vm)
> + ce->vm = i915_vm_get(vm);
> + else
> + ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
> + rcu_read_unlock();
> if (ctx->timeline)
> ce->timeline = intel_timeline_get(ctx->timeline);
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 468438fb47af..471c23d0cffb 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -981,6 +981,11 @@ __execlists_schedule_in(struct i915_request *rq)
> ce->lrc_desc |=
> (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) <<
> GEN11_SW_CTX_ID_SHIFT;
> +
> +#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
> +/* in Gen12 ID 0x7FF is reserved to indicate idle */
> +#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
> +
> BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID);
> }
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 86cffbb0a9cb..7c838a57e174 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -155,13 +155,9 @@ static int live_context_size(void *arg)
> * HW tries to write past the end of one.
> */
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = kernel_context(gt->i915);
> - if (IS_ERR(fixme)) {
> - err = PTR_ERR(fixme);
> - goto unlock;
> - }
> + if (IS_ERR(fixme))
> + return PTR_ERR(fixme);
>
> for_each_engine(engine, gt->i915, id) {
> struct {
> @@ -201,8 +197,6 @@ static int live_context_size(void *arg)
> }
>
> kernel_context_close(fixme);
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> return err;
> }
>
> @@ -305,12 +299,10 @@ static int live_active_context(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = live_context(gt->i915, file);
> if (IS_ERR(fixme)) {
> err = PTR_ERR(fixme);
> - goto unlock;
> + goto out_file;
> }
>
> for_each_engine(engine, gt->i915, id) {
> @@ -323,8 +315,7 @@ static int live_active_context(void *arg)
> break;
> }
>
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> +out_file:
> mock_file_free(gt->i915, file);
> return err;
> }
> @@ -418,12 +409,10 @@ static int live_remote_context(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = live_context(gt->i915, file);
> if (IS_ERR(fixme)) {
> err = PTR_ERR(fixme);
> - goto unlock;
> + goto out_file;
> }
>
> for_each_engine(engine, gt->i915, id) {
> @@ -436,8 +425,7 @@ static int live_remote_context(void *arg)
> break;
> }
>
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> +out_file:
> mock_file_free(gt->i915, file);
> return err;
> }
> diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> index ffbb3d23b887..e8a40df79bd0 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> @@ -58,9 +58,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
> memset(h, 0, sizeof(*h));
> h->gt = gt;
>
> - mutex_lock(>->i915->drm.struct_mutex);
> h->ctx = kernel_context(gt->i915);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(h->ctx))
> return PTR_ERR(h->ctx);
>
> @@ -133,7 +131,7 @@ static struct i915_request *
> hang_create_request(struct hang *h, struct intel_engine_cs *engine)
> {
> struct intel_gt *gt = h->gt;
> - struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
> + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
> struct drm_i915_gem_object *obj;
> struct i915_request *rq = NULL;
> struct i915_vma *hws, *vma;
> @@ -143,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
> int err;
>
> obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
> - if (IS_ERR(obj))
> + if (IS_ERR(obj)) {
> + i915_vm_put(vm);
> return ERR_CAST(obj);
> + }
>
> vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915));
> if (IS_ERR(vaddr)) {
> i915_gem_object_put(obj);
> + i915_vm_put(vm);
> return ERR_CAST(vaddr);
> }
>
> @@ -159,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
> h->batch = vaddr;
>
> vma = i915_vma_instance(h->obj, vm, NULL);
> - if (IS_ERR(vma))
> + if (IS_ERR(vma)) {
> + i915_vm_put(vm);
> return ERR_CAST(vma);
> + }
>
> hws = i915_vma_instance(h->hws, vm, NULL);
> - if (IS_ERR(hws))
> + if (IS_ERR(hws)) {
> + i915_vm_put(vm);
> return ERR_CAST(hws);
> + }
>
> err = i915_vma_pin(vma, 0, 0, PIN_USER);
> - if (err)
> + if (err) {
> + i915_vm_put(vm);
> return ERR_PTR(err);
> + }
>
> err = i915_vma_pin(hws, 0, 0, PIN_USER);
> if (err)
> @@ -266,6 +273,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
> i915_vma_unpin(hws);
> unpin_vma:
> i915_vma_unpin(vma);
> + i915_vm_put(vm);
> return err ? ERR_PTR(err) : rq;
> }
>
> @@ -382,9 +390,7 @@ static int igt_reset_nop(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> @@ -458,9 +464,7 @@ static int igt_reset_nop_engine(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> @@ -705,9 +709,7 @@ static int active_engine(void *data)
> return PTR_ERR(file);
>
> for (count = 0; count < ARRAY_SIZE(ctx); count++) {
> - mutex_lock(&engine->i915->drm.struct_mutex);
> ctx[count] = live_context(engine->i915, file);
> - mutex_unlock(&engine->i915->drm.struct_mutex);
> if (IS_ERR(ctx[count])) {
> err = PTR_ERR(ctx[count]);
> while (--count)
> @@ -1291,6 +1293,7 @@ static int igt_reset_evict_ppgtt(void *arg)
> {
> struct intel_gt *gt = arg;
> struct i915_gem_context *ctx;
> + struct i915_address_space *vm;
> struct drm_file *file;
> int err;
>
> @@ -1298,18 +1301,20 @@ static int igt_reset_evict_ppgtt(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> }
>
> err = 0;
> - if (ctx->vm) /* aliasing == global gtt locking, covered above */
> - err = __igt_reset_evict_vma(gt, ctx->vm,
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + if (!i915_is_ggtt(vm)) {
> + /* aliasing == global gtt locking, covered above */
> + err = __igt_reset_evict_vma(gt, vm,
> evict_vma, EXEC_OBJECT_WRITE);
> + }
> + i915_vm_put(vm);
>
> out:
> mock_file_free(gt->i915, file);
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 04c1cf573642..8dc42c5c7569 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -1631,7 +1631,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
> int err = 0;
>
> if (batch) {
> - vma = i915_vma_instance(batch, ctx->vm, NULL);
> + struct i915_address_space *vm;
> +
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + vma = i915_vma_instance(batch, vm, NULL);
> + i915_vm_put(vm);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> index 4ee2e2babd0d..7c7aceb85a74 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> @@ -260,7 +260,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
> rq = igt_spinner_create_request(spin, ce, MI_NOOP);
>
> intel_context_put(ce);
> - kernel_context_close(ctx);
>
> if (IS_ERR(rq)) {
> spin = NULL;
> @@ -279,6 +278,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
> if (err && spin)
> igt_spinner_end(spin);
>
> + kernel_context_close(ctx);
> return err;
> }
>
> @@ -355,6 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
> static struct i915_vma *create_batch(struct i915_gem_context *ctx)
> {
> struct drm_i915_gem_object *obj;
> + struct i915_address_space *vm;
> struct i915_vma *vma;
> int err;
>
> @@ -362,7 +363,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
> if (IS_ERR(obj))
> return ERR_CAST(obj);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + vma = i915_vma_instance(obj, vm, NULL);
> + i915_vm_put(vm);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> goto err_obj;
> @@ -463,12 +466,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
> 0xffff00ff,
> 0xffffffff,
> };
> + struct i915_address_space *vm;
> struct i915_vma *scratch;
> struct i915_vma *batch;
> int err = 0, i, v;
> u32 *cs, *results;
>
> - scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1);
> + i915_vm_put(vm);
> if (IS_ERR(scratch))
> return PTR_ERR(scratch);
>
> @@ -1010,6 +1016,7 @@ static int live_isolated_whitelist(void *arg)
> return 0;
>
> for (i = 0; i < ARRAY_SIZE(client); i++) {
> + struct i915_address_space *vm;
> struct i915_gem_context *c;
>
> c = kernel_context(i915);
> @@ -1018,22 +1025,27 @@ static int live_isolated_whitelist(void *arg)
> goto err;
> }
>
> - client[i].scratch[0] = create_scratch(c->vm, 1024);
> + vm = i915_gem_context_get_vm_rcu(c);
> +
> + client[i].scratch[0] = create_scratch(vm, 1024);
> if (IS_ERR(client[i].scratch[0])) {
> err = PTR_ERR(client[i].scratch[0]);
> + i915_vm_put(vm);
> kernel_context_close(c);
> goto err;
> }
>
> - client[i].scratch[1] = create_scratch(c->vm, 1024);
> + client[i].scratch[1] = create_scratch(vm, 1024);
> if (IS_ERR(client[i].scratch[1])) {
> err = PTR_ERR(client[i].scratch[1]);
> i915_vma_unpin_and_release(&client[i].scratch[0], 0);
> + i915_vm_put(vm);
> kernel_context_close(c);
> goto err;
> }
>
> client[i].ctx = c;
> + i915_vm_put(vm);
> }
>
> for_each_engine(engine, i915, id) {
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 03f567084548..6850f1f40241 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -365,7 +365,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
> struct i915_gem_context *ctx)
> {
> struct intel_vgpu_mm *mm = workload->shadow_mm;
> - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
> + struct i915_ppgtt *ppgtt =
> + i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
> int i = 0;
>
> if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
> @@ -378,6 +379,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
> px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
> }
> }
> +
> + i915_vm_put(&ppgtt->vm);
> }
>
> static int
> @@ -1230,20 +1233,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> struct intel_vgpu_submission *s = &vgpu->submission;
> struct intel_engine_cs *engine;
> struct i915_gem_context *ctx;
> + struct i915_ppgtt *ppgtt;
> enum intel_engine_id i;
> int ret;
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
> - if (IS_ERR(ctx)) {
> - ret = PTR_ERR(ctx);
> - goto out_unlock;
> - }
> + if (IS_ERR(ctx))
> + return PTR_ERR(ctx);
>
> i915_gem_context_set_force_single_submission(ctx);
>
> - i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
> + ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx));
> + i915_context_ppgtt_root_save(s, ppgtt);
>
> for_each_engine(engine, i915, i) {
> struct intel_context *ce;
> @@ -1288,12 +1289,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> atomic_set(&s->running_workload_num, 0);
> bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
>
> + i915_vm_put(&ppgtt->vm);
> i915_gem_context_put(ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> return 0;
>
> out_shadow_ctx:
> - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
> + i915_context_ppgtt_root_restore(s, ppgtt);
> for_each_engine(engine, i915, i) {
> if (IS_ERR(s->shadow[i]))
> break;
> @@ -1301,9 +1302,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> intel_context_unpin(s->shadow[i]);
> intel_context_put(s->shadow[i]);
> }
> + i915_vm_put(&ppgtt->vm);
> i915_gem_context_put(ctx);
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 0e90ac608e07..b04cebc26eca 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -316,12 +316,18 @@ static void print_context_stats(struct seq_file *m,
> struct drm_i915_private *i915)
> {
> struct file_stats kstats = {};
> - struct i915_gem_context *ctx;
> + struct i915_gem_context *ctx, *cn;
>
> - list_for_each_entry(ctx, &i915->contexts.list, link) {
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> for_each_gem_engine(ce,
> i915_gem_context_lock_engines(ctx), it) {
> intel_context_lock_pinned(ce);
> @@ -338,7 +344,9 @@ static void print_context_stats(struct seq_file *m,
> i915_gem_context_unlock_engines(ctx);
>
> if (!IS_ERR_OR_NULL(ctx->file_priv)) {
> - struct file_stats stats = { .vm = ctx->vm, };
> + struct file_stats stats = {
> + .vm = rcu_access_pointer(ctx->vm),
> + };
> struct drm_file *file = ctx->file_priv->file;
> struct task_struct *task;
> char name[80];
> @@ -355,7 +363,12 @@ static void print_context_stats(struct seq_file *m,
>
> print_file_stats(m, name, stats);
> }
> +
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> }
> + spin_unlock(&i915->gem.contexts.lock);
>
> print_file_stats(m, "[k]contexts", kstats);
> }
> @@ -363,7 +376,6 @@ static void print_context_stats(struct seq_file *m,
> static int i915_gem_object_info(struct seq_file *m, void *data)
> {
> struct drm_i915_private *i915 = node_to_i915(m->private);
> - int ret;
>
> seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
> i915->mm.shrink_count,
> @@ -372,12 +384,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>
> seq_putc(m, '\n');
>
> - ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
> - if (ret)
> - return ret;
> -
> print_context_stats(m, i915);
> - mutex_unlock(&i915->drm.struct_mutex);
>
> return 0;
> }
> @@ -1579,19 +1586,19 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
>
> static int i915_context_status(struct seq_file *m, void *unused)
> {
> - struct drm_i915_private *dev_priv = node_to_i915(m->private);
> - struct drm_device *dev = &dev_priv->drm;
> - struct i915_gem_context *ctx;
> - int ret;
> -
> - ret = mutex_lock_interruptible(&dev->struct_mutex);
> - if (ret)
> - return ret;
> + struct drm_i915_private *i915 = node_to_i915(m->private);
> + struct i915_gem_context *ctx, *cn;
>
> - list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> seq_puts(m, "HW context ");
> if (ctx->pid) {
> struct task_struct *task;
> @@ -1626,9 +1633,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
> i915_gem_context_unlock_engines(ctx);
>
> seq_putc(m, '\n');
> - }
>
> - mutex_unlock(&dev->struct_mutex);
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> + }
> + spin_unlock(&i915->gem.contexts.lock);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 5323e4fa55d9..024da582ba0f 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1665,10 +1665,8 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
> {
> struct drm_i915_file_private *file_priv = file->driver_priv;
>
> - mutex_lock(&dev->struct_mutex);
> i915_gem_context_close(file);
> i915_gem_release(dev, file);
> - mutex_unlock(&dev->struct_mutex);
>
> kfree_rcu(file_priv, rcu);
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index cb63b2bd0ce8..35b610d52379 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1536,25 +1536,6 @@ struct drm_i915_private {
> int audio_power_refcount;
> u32 audio_freq_cntrl;
>
> - struct {
> - struct mutex mutex;
> - struct list_head list;
> - struct llist_head free_list;
> - struct work_struct free_work;
> -
> - /* The hw wants to have a stable context identifier for the
> - * lifetime of the context (for OA, PASID, faults, etc).
> - * This is limited in execlists to 21 bits.
> - */
> - struct ida hw_ida;
> -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
> -#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
> -#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
> -/* in Gen12 ID 0x7FF is reserved to indicate idle */
> -#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
> - struct list_head hw_id_list;
> - } contexts;
> -
> u32 fdi_rx_config;
>
> /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
> @@ -1710,6 +1691,14 @@ struct drm_i915_private {
>
> struct {
> struct notifier_block pm_notifier;
> +
> + struct i915_gem_contexts {
> + spinlock_t lock; /* locks list */
> + struct list_head list;
> +
> + struct llist_head free_list;
> + struct work_struct free_work;
> + } contexts;
> } gem;
>
> /* For i945gm vblank irq vs. C3 workaround */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 5a664bdead8c..f6db415985d5 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1266,7 +1266,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
> goto err_unlock;
> }
>
> - ret = i915_gem_contexts_init(dev_priv);
> + ret = i915_gem_init_contexts(dev_priv);
> if (ret) {
> GEM_BUG_ON(ret == -EIO);
> goto err_scratch;
> @@ -1348,7 +1348,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
> }
> err_context:
> if (ret != -EIO)
> - i915_gem_contexts_fini(dev_priv);
> + i915_gem_driver_release__contexts(dev_priv);
> err_scratch:
> intel_gt_driver_release(&dev_priv->gt);
> err_unlock:
> @@ -1416,11 +1416,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
>
> void i915_gem_driver_release(struct drm_i915_private *dev_priv)
> {
> - mutex_lock(&dev_priv->drm.struct_mutex);
> intel_engines_cleanup(dev_priv);
> - i915_gem_contexts_fini(dev_priv);
> + i915_gem_driver_release__contexts(dev_priv);
> intel_gt_driver_release(&dev_priv->gt);
> - mutex_unlock(&dev_priv->drm.struct_mutex);
>
> intel_wa_list_free(&dev_priv->gt_wa_list);
>
> @@ -1430,7 +1428,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
>
> i915_gem_drain_freed_objects(dev_priv);
>
> - WARN_ON(!list_empty(&dev_priv->contexts.list));
> + WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
> }
>
> void i915_gem_init_mmio(struct drm_i915_private *i915)
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 1d26634ca597..7b15bb891970 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -1366,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
> if (vm->has_read_only &&
> vm->i915->kernel_context &&
> vm->i915->kernel_context->vm) {
> - struct i915_address_space *clone = vm->i915->kernel_context->vm;
> + struct i915_address_space *clone =
> + rcu_dereference_protected(vm->i915->kernel_context->vm,
> + true); /* static */
>
> GEM_BUG_ON(!clone->has_read_only);
>
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index ecfbc37b738b..231388d06c82 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -1853,8 +1853,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> };
> #undef ctx_flexeuN
> struct intel_engine_cs *engine;
> - struct i915_gem_context *ctx;
> - int i;
> + struct i915_gem_context *ctx, *cn;
> + int i, err;
>
> for (i = 2; i < ARRAY_SIZE(regs); i++)
> regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
> @@ -1877,16 +1877,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> * context. Contexts idle at the time of reconfiguration are not
> * trapped behind the barrier.
> */
> - list_for_each_entry(ctx, &i915->contexts.list, link) {
> - int err;
> -
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> if (ctx == i915->kernel_context)
> continue;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
> - if (err)
> + if (err) {
> + i915_gem_context_put(ctx);
> return err;
> + }
> +
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> }
> + spin_unlock(&i915->gem.contexts.lock);
>
> /*
> * After updating all other contexts, we need to modify ourselves.
> @@ -1895,7 +1906,6 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> */
> for_each_uabi_engine(engine, i915) {
> struct intel_context *ce = engine->kernel_context;
> - int err;
>
> if (engine->class != RENDER_CLASS)
> continue;
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index 034b8abc5062..2a72407a6380 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -144,9 +144,9 @@ static const struct attribute_group media_rc6_attr_group = {
> };
> #endif
>
> -static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
> +static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
> {
> - if (!HAS_L3_DPF(dev_priv))
> + if (!HAS_L3_DPF(i915))
> return -EPERM;
>
> if (offset % 4 != 0)
> @@ -164,31 +164,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
> loff_t offset, size_t count)
> {
> struct device *kdev = kobj_to_dev(kobj);
> - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
> - struct drm_device *dev = &dev_priv->drm;
> + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
> int slice = (int)(uintptr_t)attr->private;
> int ret;
>
> - count = round_down(count, 4);
> -
> - ret = l3_access_valid(dev_priv, offset);
> + ret = l3_access_valid(i915, offset);
> if (ret)
> return ret;
>
> + count = round_down(count, 4);
> count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
> + memset(buf, 0, count);
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> -
> - if (dev_priv->l3_parity.remap_info[slice])
> + spin_lock(&i915->gem.contexts.lock);
> + if (i915->l3_parity.remap_info[slice])
> memcpy(buf,
> - dev_priv->l3_parity.remap_info[slice] + (offset/4),
> + i915->l3_parity.remap_info[slice] + offset / 4,
> count);
> - else
> - memset(buf, 0, count);
> -
> - mutex_unlock(&dev->struct_mutex);
> + spin_unlock(&i915->gem.contexts.lock);
>
> return count;
> }
> @@ -199,46 +192,46 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
> loff_t offset, size_t count)
> {
> struct device *kdev = kobj_to_dev(kobj);
> - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
> - struct drm_device *dev = &dev_priv->drm;
> + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
> struct i915_gem_context *ctx;
> int slice = (int)(uintptr_t)attr->private;
> - u32 **remap_info;
> + u32 *remap_info, *freeme = NULL;
> int ret;
>
> - ret = l3_access_valid(dev_priv, offset);
> + ret = l3_access_valid(i915, offset);
> if (ret)
> return ret;
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> + remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
> + if (!remap_info)
> + return -ENOMEM;
>
> - remap_info = &dev_priv->l3_parity.remap_info[slice];
> - if (!*remap_info) {
> - *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
> - if (!*remap_info) {
> - ret = -ENOMEM;
> - goto out;
> - }
> + spin_lock(&i915->gem.contexts.lock);
> +
> + if (i915->l3_parity.remap_info[slice]) {
> + freeme = remap_info;
> + remap_info = i915->l3_parity.remap_info[slice];
> + } else {
> + i915->l3_parity.remap_info[slice] = remap_info;
> }
>
> - /* TODO: Ideally we really want a GPU reset here to make sure errors
> - * aren't propagated. Since I cannot find a stable way to reset the GPU
> - * at this point it is left as a TODO.
> - */
> - memcpy(*remap_info + (offset/4), buf, count);
> + count = round_down(count, 4);
This round_down is new.
> + memcpy(remap_info + offset / 4, buf, count);
>
> /* NB: We defer the remapping until we switch to the context */
> - list_for_each_entry(ctx, &dev_priv->contexts.list, link)
> - ctx->remap_slice |= (1<<slice);
> + list_for_each_entry(ctx, &i915->gem.contexts.list, link)
> + ctx->remap_slice |= BIT(slice);
>
> - ret = count;
> + spin_unlock(&i915->gem.contexts.lock);
> + kfree(freeme);
>
> -out:
> - mutex_unlock(&dev->struct_mutex);
> + /*
> + * TODO: Ideally we really want a GPU reset here to make sure errors
> + * aren't propagated. Since I cannot find a stable way to reset the GPU
> + * at this point it is left as a TODO.
> + */
>
> - return ret;
> + return count;
> }
>
> static const struct bin_attribute dpf_attrs = {
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 1f2cf6cfafb5..7ef7a1e1664c 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -952,7 +952,7 @@ DECLARE_EVENT_CLASS(i915_context,
> TP_fast_assign(
> __entry->dev = ctx->i915->drm.primary->index;
> __entry->ctx = ctx;
> - __entry->vm = ctx->vm;
> + __entry->vm = rcu_access_pointer(ctx->vm);
> ),
>
> TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
> index 0346c3e5b6b6..bfa40a5b6d98 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
> @@ -138,11 +138,9 @@ static int igt_gem_suspend(void *arg)
> return PTR_ERR(file);
>
> err = -ENOMEM;
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = live_context(i915, file);
> if (!IS_ERR(ctx))
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (err)
> goto out;
>
> @@ -157,9 +155,7 @@ static int igt_gem_suspend(void *arg)
>
> pm_resume(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> out:
> mock_file_free(i915, file);
> return err;
> @@ -177,11 +173,9 @@ static int igt_gem_hibernate(void *arg)
> return PTR_ERR(file);
>
> err = -ENOMEM;
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = live_context(i915, file);
> if (!IS_ERR(ctx))
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (err)
> goto out;
>
> @@ -196,9 +190,7 @@ static int igt_gem_hibernate(void *arg)
>
> pm_resume(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> out:
> mock_file_free(i915, file);
> return err;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index f39f0282e78c..0af9a58d011d 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -473,7 +473,6 @@ static int igt_evict_contexts(void *arg)
> }
>
> count = 0;
> - mutex_lock(&i915->drm.struct_mutex);
> onstack_fence_init(&fence);
> do {
> struct i915_request *rq;
> @@ -510,8 +509,6 @@ static int igt_evict_contexts(void *arg)
> count++;
> err = 0;
> } while(1);
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> onstack_fence_fini(&fence);
> pr_info("Submitted %lu contexts/requests on %s\n",
> count, engine->name);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> index e40e6cfa51f1..8d8121c02161 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> @@ -1246,6 +1246,7 @@ static int exercise_mock(struct drm_i915_private *i915,
> unsigned long end_time))
> {
> const u64 limit = totalram_pages() << PAGE_SHIFT;
> + struct i915_address_space *vm;
> struct i915_gem_context *ctx;
> IGT_TIMEOUT(end_time);
> int err;
> @@ -1254,7 +1255,9 @@ static int exercise_mock(struct drm_i915_private *i915,
> if (!ctx)
> return -ENOMEM;
>
> - err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + err = func(i915, vm, 0, min(vm->total, limit), end_time);
> + i915_vm_put(vm);
>
> mock_context_close(ctx);
> return err;
> @@ -1801,15 +1804,15 @@ static int igt_cs_tlb(void *arg)
> goto out_unlock;
> }
>
> - vm = ctx->vm;
> - if (!vm)
> - goto out_unlock;
> + vm = i915_gem_context_get_vm_rcu(ctx);
> + if (i915_is_ggtt(vm))
> + goto out_vm;
>
> /* Create two pages; dummy we prefill the TLB, and intended */
> bbe = i915_gem_object_create_internal(i915, PAGE_SIZE);
> if (IS_ERR(bbe)) {
> err = PTR_ERR(bbe);
> - goto out_unlock;
> + goto out_vm;
> }
>
> batch = i915_gem_object_pin_map(bbe, I915_MAP_WC);
> @@ -2014,6 +2017,8 @@ static int igt_cs_tlb(void *arg)
> i915_gem_object_put(act);
> out_put_bbe:
> i915_gem_object_put(bbe);
> +out_vm:
> + i915_vm_put(vm);
> out_unlock:
> mutex_unlock(&i915->drm.struct_mutex);
> mock_file_free(i915, file);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index d7d68c6a6bd5..0897a7b04944 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -181,9 +181,7 @@ static int igt_request_rewind(void *arg)
> struct intel_context *ce;
> int err = -EINVAL;
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx[0] = mock_context(i915, "A");
> - mutex_unlock(&i915->drm.struct_mutex);
>
> ce = i915_gem_context_get_engine(ctx[0], RCS0);
> GEM_BUG_ON(IS_ERR(ce));
> @@ -197,9 +195,7 @@ static int igt_request_rewind(void *arg)
> i915_request_get(request);
> i915_request_add(request);
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx[1] = mock_context(i915, "B");
> - mutex_unlock(&i915->drm.struct_mutex);
>
> ce = i915_gem_context_get_engine(ctx[1], RCS0);
> GEM_BUG_ON(IS_ERR(ce));
> @@ -438,9 +434,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
> }
>
> for (n = 0; n < t.ncontexts; n++) {
> - mutex_lock(&t.engine->i915->drm.struct_mutex);
> t.contexts[n] = mock_context(t.engine->i915, "mock");
> - mutex_unlock(&t.engine->i915->drm.struct_mutex);
> if (!t.contexts[n]) {
> ret = -ENOMEM;
> goto out_contexts;
> @@ -734,9 +728,9 @@ static int live_empty_request(void *arg)
> static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
> {
> struct i915_gem_context *ctx = i915->kernel_context;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> struct drm_i915_gem_object *obj;
> const int gen = INTEL_GEN(i915);
> + struct i915_address_space *vm;
> struct i915_vma *vma;
> u32 *cmd;
> int err;
> @@ -745,7 +739,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
> if (IS_ERR(obj))
> return ERR_CAST(obj);
>
> + vm = i915_gem_context_get_vm_rcu(ctx);
> vma = i915_vma_instance(obj, vm, NULL);
> + i915_vm_put(vm);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> goto err;
> @@ -1220,9 +1216,7 @@ static int live_breadcrumbs_smoketest(void *arg)
> }
>
> for (n = 0; n < t[0].ncontexts; n++) {
> - mutex_lock(&i915->drm.struct_mutex);
> t[0].contexts[n] = live_context(i915, file);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (!t[0].contexts[n]) {
> ret = -ENOMEM;
> goto out_contexts;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
> index ac1ff558eb90..58b5f40a07dd 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_vma.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
> @@ -24,6 +24,7 @@
>
> #include <linux/prime_numbers.h>
>
> +#include "gem/i915_gem_context.h"
> #include "gem/selftests/mock_context.h"
>
> #include "i915_scatterlist.h"
> @@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
> {
> bool ok = true;
>
> - if (vma->vm != ctx->vm) {
> + if (vma->vm != rcu_access_pointer(ctx->vm)) {
> pr_err("VMA created with wrong VM\n");
> ok = false;
> }
> @@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915,
> list_for_each_entry(obj, objects, st_link) {
> for (pinned = 0; pinned <= 1; pinned++) {
> list_for_each_entry(ctx, contexts, link) {
> - struct i915_address_space *vm = ctx->vm;
> + struct i915_address_space *vm;
> struct i915_vma *vma;
> int err;
>
> + vm = i915_gem_context_get_vm_rcu(ctx);
> vma = checked_vma_instance(obj, vm, NULL);
> + i915_vm_put(vm);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index 4e6cde0d4859..335f37ba98de 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -59,11 +59,9 @@ static void mock_device_release(struct drm_device *dev)
>
> i915_gem_drain_workqueue(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> for_each_engine(engine, i915, id)
> mock_engine_free(engine);
> - i915_gem_contexts_fini(i915);
> - mutex_unlock(&i915->drm.struct_mutex);
> + i915_gem_driver_release__contexts(i915);
>
> intel_timelines_fini(i915);
>
> @@ -206,7 +204,7 @@ struct drm_i915_private *mock_gem_device(void)
> return i915;
>
> err_context:
> - i915_gem_contexts_fini(i915);
> + i915_gem_driver_release__contexts(i915);
> err_engine:
> mock_engine_free(i915->engine[RCS0]);
> err_unlock:
>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list