[Intel-gfx] [PATCH 21/27] drm/i915: Move context management under GEM
Tvrtko Ursulin
tvrtko.ursulin at linux.intel.com
Thu Sep 26 13:57:24 UTC 2019
On 25/09/2019 11:01, Chris Wilson wrote:
> Keep track of the GEM contexts underneath i915->gem.contexts and assign
> them their own lock for the purposes of list management.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_context.c | 155 +++++++-----------
> drivers/gpu/drm/i915/gem/i915_gem_context.h | 4 +-
> .../gpu/drm/i915/gem/i915_gem_context_types.h | 2 +-
> .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 2 +-
> drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 2 +-
> .../gpu/drm/i915/gem/selftests/huge_pages.c | 12 +-
> .../drm/i915/gem/selftests/i915_gem_context.c | 122 ++++++--------
> .../gpu/drm/i915/gem/selftests/mock_context.c | 4 +-
> drivers/gpu/drm/i915/gt/intel_context.c | 8 +-
> drivers/gpu/drm/i915/gt/selftest_context.c | 24 +--
> drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 19 +--
> drivers/gpu/drm/i915/gt/selftest_lrc.c | 4 +-
> .../gpu/drm/i915/gt/selftest_workarounds.c | 10 +-
> drivers/gpu/drm/i915/gvt/scheduler.c | 20 +--
> drivers/gpu/drm/i915/i915_debugfs.c | 50 +++---
> drivers/gpu/drm/i915/i915_drv.c | 2 -
> drivers/gpu/drm/i915/i915_drv.h | 27 +--
> drivers/gpu/drm/i915/i915_gem.c | 10 +-
> drivers/gpu/drm/i915/i915_gem_gtt.c | 4 +-
> drivers/gpu/drm/i915/i915_perf.c | 24 ++-
> drivers/gpu/drm/i915/i915_sysfs.c | 77 ++++-----
> drivers/gpu/drm/i915/i915_trace.h | 2 +-
> drivers/gpu/drm/i915/selftests/i915_gem.c | 8 -
> .../gpu/drm/i915/selftests/i915_gem_evict.c | 3 -
> drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 4 +-
> drivers/gpu/drm/i915/selftests/i915_request.c | 11 +-
> drivers/gpu/drm/i915/selftests/i915_vma.c | 5 +-
> .../gpu/drm/i915/selftests/mock_gem_device.c | 6 +-
> 28 files changed, 272 insertions(+), 349 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 4e59b809d901..a77f439358d7 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -219,9 +219,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
>
> static void i915_gem_context_free(struct i915_gem_context *ctx)
> {
> - lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
>
> + spin_lock(&ctx->i915->gem.contexts.lock);
> + list_del(&ctx->link);
> + spin_unlock(&ctx->i915->gem.contexts.lock);
> +
> free_engines(rcu_access_pointer(ctx->engines));
> mutex_destroy(&ctx->engines_mutex);
>
> @@ -231,56 +234,40 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
> kfree(ctx->name);
> put_pid(ctx->pid);
>
> - list_del(&ctx->link);
> mutex_destroy(&ctx->mutex);
>
> kfree_rcu(ctx, rcu);
> }
>
> -static void contexts_free(struct drm_i915_private *i915)
> +static void contexts_free_all(struct llist_node *list)
> {
> - struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
> struct i915_gem_context *ctx, *cn;
>
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> - llist_for_each_entry_safe(ctx, cn, freed, free_link)
> + llist_for_each_entry_safe(ctx, cn, list, free_link)
> i915_gem_context_free(ctx);
> }
>
> -static void contexts_free_first(struct drm_i915_private *i915)
> +static void contexts_flush_free(struct i915_gem_contexts *gc)
> {
> - struct i915_gem_context *ctx;
> - struct llist_node *freed;
> -
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> - freed = llist_del_first(&i915->contexts.free_list);
> - if (!freed)
> - return;
> -
> - ctx = container_of(freed, typeof(*ctx), free_link);
> - i915_gem_context_free(ctx);
> + contexts_free_all(llist_del_all(&gc->free_list));
> }
>
> static void contexts_free_worker(struct work_struct *work)
> {
> - struct drm_i915_private *i915 =
> - container_of(work, typeof(*i915), contexts.free_work);
> + struct i915_gem_contexts *gc =
> + container_of(work, typeof(*gc), free_work);
>
> - mutex_lock(&i915->drm.struct_mutex);
> - contexts_free(i915);
> - mutex_unlock(&i915->drm.struct_mutex);
> + contexts_flush_free(gc);
> }
>
> void i915_gem_context_release(struct kref *ref)
> {
> struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
> - struct drm_i915_private *i915 = ctx->i915;
> + struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
>
> trace_i915_context_free(ctx);
> - if (llist_add(&ctx->free_link, &i915->contexts.free_list))
> - queue_work(i915->wq, &i915->contexts.free_work);
> + if (llist_add(&ctx->free_link, &gc->free_list))
> + queue_work(ctx->i915->wq, &gc->free_work);
At first I thought gc was some sort of garbage collection list. But it
is a GEM contexts list. :) This hunk looks completely avoidable, I don't
see it brings much benefit on balance since in turn it adds ctx->i915
twice . But as you wish..
> }
>
> static inline struct i915_gem_engines *
> @@ -359,8 +346,8 @@ static void context_close(struct i915_gem_context *ctx)
> {
> i915_gem_context_set_closed(ctx);
>
> - if (ctx->vm)
> - i915_vm_close(ctx->vm);
> + if (rcu_access_pointer(ctx->vm))
> + i915_vm_close(rcu_dereference_protected(ctx->vm, true));
>
> mutex_lock(&ctx->mutex);
>
> @@ -394,7 +381,6 @@ __create_context(struct drm_i915_private *i915)
> return ERR_PTR(-ENOMEM);
>
> kref_init(&ctx->ref);
> - list_add_tail(&ctx->link, &i915->contexts.list);
> ctx->i915 = i915;
> ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
> mutex_init(&ctx->mutex);
> @@ -435,6 +421,10 @@ __create_context(struct drm_i915_private *i915)
> for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
> ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
>
> + spin_lock(&i915->gem.contexts.lock);
> + list_add_tail(&ctx->link, &i915->gem.contexts.list);
> + spin_unlock(&i915->gem.contexts.lock);
> +
> return ctx;
>
> err_free:
> @@ -464,11 +454,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm)
> static struct i915_address_space *
> __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
> {
> - struct i915_address_space *old = ctx->vm;
> + struct i915_address_space *old = rcu_dereference_protected(ctx->vm, 1);
>
> GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
>
> - ctx->vm = i915_vm_open(vm);
> + rcu_assign_pointer(ctx->vm, i915_vm_open(vm));
Are updaters here serialized? Lost track if struct mutex is still held
in set_ppgtt at this point in the series or not.. Thinking of "old =
rcu_dereference_protected.." and here.
> context_apply_all(ctx, __apply_ppgtt, vm);
>
> return old;
> @@ -477,7 +467,7 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
> static void __assign_ppgtt(struct i915_gem_context *ctx,
> struct i915_address_space *vm)
> {
> - if (vm == ctx->vm)
> + if (vm == rcu_access_pointer(ctx->vm))
> return;
>
> vm = __set_ppgtt(ctx, vm);
> @@ -509,27 +499,25 @@ static void __assign_timeline(struct i915_gem_context *ctx,
> }
>
> static struct i915_gem_context *
> -i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
> +i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
> {
> struct i915_gem_context *ctx;
>
> - lockdep_assert_held(&dev_priv->drm.struct_mutex);
> -
> if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
> - !HAS_EXECLISTS(dev_priv))
> + !HAS_EXECLISTS(i915))
> return ERR_PTR(-EINVAL);
>
> - /* Reap the most stale context */
> - contexts_free_first(dev_priv);
> + /* Reap the stale contexts */
> + contexts_flush_free(&i915->gem.contexts);
>
> - ctx = __create_context(dev_priv);
> + ctx = __create_context(i915);
> if (IS_ERR(ctx))
> return ctx;
>
> - if (HAS_FULL_PPGTT(dev_priv)) {
> + if (HAS_FULL_PPGTT(i915)) {
> struct i915_ppgtt *ppgtt;
>
> - ppgtt = i915_ppgtt_create(dev_priv);
> + ppgtt = i915_ppgtt_create(i915);
> if (IS_ERR(ppgtt)) {
> DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
> PTR_ERR(ppgtt));
> @@ -544,7 +532,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
> if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
> struct intel_timeline *timeline;
>
> - timeline = intel_timeline_create(&dev_priv->gt, NULL);
> + timeline = intel_timeline_create(&i915->gt, NULL);
> if (IS_ERR(timeline)) {
> context_close(ctx);
> return ERR_CAST(timeline);
> @@ -590,48 +578,40 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
> return ctx;
> }
>
> -static void init_contexts(struct drm_i915_private *i915)
> +static void init_contexts(struct i915_gem_contexts *gc)
> {
> - mutex_init(&i915->contexts.mutex);
> - INIT_LIST_HEAD(&i915->contexts.list);
> -
> - /* Using the simple ida interface, the max is limited by sizeof(int) */
> - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
> - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
> - ida_init(&i915->contexts.hw_ida);
> - INIT_LIST_HEAD(&i915->contexts.hw_id_list);
This (and some more bits) belong to the hw_id removal patch.
> + spin_lock_init(&gc->lock);
> + INIT_LIST_HEAD(&gc->list);
>
> - INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
> - init_llist_head(&i915->contexts.free_list);
> + INIT_WORK(&gc->free_work, contexts_free_worker);
> + init_llist_head(&gc->free_list);
> }
>
> -int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
> +int i915_gem_init_contexts(struct drm_i915_private *i915)
> {
> struct i915_gem_context *ctx;
>
> /* Reassure ourselves we are only called once */
> - GEM_BUG_ON(dev_priv->kernel_context);
> + GEM_BUG_ON(i915->kernel_context);
>
> - init_contexts(dev_priv);
> + init_contexts(&i915->gem.contexts);
>
> /* lowest priority; idle task */
> - ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
> + ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
> if (IS_ERR(ctx)) {
> DRM_ERROR("Failed to create default global context\n");
> return PTR_ERR(ctx);
> }
> - dev_priv->kernel_context = ctx;
> + i915->kernel_context = ctx;
>
> DRM_DEBUG_DRIVER("%s context support initialized\n",
> - DRIVER_CAPS(dev_priv)->has_logical_contexts ?
> + DRIVER_CAPS(i915)->has_logical_contexts ?
> "logical" : "fake");
> return 0;
> }
>
> -void i915_gem_contexts_fini(struct drm_i915_private *i915)
> +void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
> {
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> destroy_kernel_context(&i915->kernel_context);
> }
>
> @@ -653,8 +633,8 @@ static int gem_context_register(struct i915_gem_context *ctx,
> int ret;
>
> ctx->file_priv = fpriv;
> - if (ctx->vm)
> - ctx->vm->file = fpriv;
> + if (rcu_access_pointer(ctx->vm))
> + rcu_dereference_protected(ctx->vm, true)->file = fpriv;
Here rcu accesses are just to satisfy sparse?
>
> ctx->pid = get_task_pid(current, PIDTYPE_PID);
> ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
> @@ -691,9 +671,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
> idr_init(&file_priv->context_idr);
> idr_init_base(&file_priv->vm_idr, 1);
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = i915_gem_create_context(i915, 0);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto err;
> @@ -721,6 +699,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
> void i915_gem_context_close(struct drm_file *file)
> {
> struct drm_i915_file_private *file_priv = file->driver_priv;
> + struct drm_i915_private *i915 = file_priv->dev_priv;
>
> idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
> idr_destroy(&file_priv->context_idr);
> @@ -729,6 +708,8 @@ void i915_gem_context_close(struct drm_file *file)
> idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
> idr_destroy(&file_priv->vm_idr);
> mutex_destroy(&file_priv->vm_idr_lock);
> +
> + contexts_flush_free(&i915->gem.contexts);
> }
>
> int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
> @@ -907,16 +888,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
> struct i915_address_space *vm;
> int ret;
>
> - if (!ctx->vm)
> + if (!rcu_access_pointer(ctx->vm))
> return -ENODEV;
>
> - /* XXX rcu acquire? */
> - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
> - if (ret)
> - return ret;
> -
> + rcu_read_lock();
> vm = i915_vm_get(ctx->vm);
> - mutex_unlock(&ctx->i915->drm.struct_mutex);
> + rcu_read_unlock();
>
> ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
> if (ret)
> @@ -1025,7 +1002,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> if (args->size)
> return -EINVAL;
>
> - if (!ctx->vm)
> + if (!rcu_access_pointer(ctx->vm))
> return -ENODEV;
>
> if (upper_32_bits(args->value))
> @@ -1039,17 +1016,15 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> if (!vm)
> return -ENOENT;
>
> - err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
> + err = mutex_lock_interruptible(&ctx->mutex);
> if (err)
> goto out;
>
> - if (vm == ctx->vm)
> + if (vm == rcu_access_pointer(ctx->vm))
> goto unlock;
>
> /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
> - mutex_lock(&ctx->mutex);
> lut_close(ctx);
> - mutex_unlock(&ctx->mutex);
>
> old = __set_ppgtt(ctx, vm);
>
> @@ -1069,8 +1044,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
> }
>
> unlock:
> - mutex_unlock(&ctx->i915->drm.struct_mutex);
> -
> + mutex_unlock(&ctx->mutex);
> out:
> i915_vm_put(vm);
> return err;
> @@ -1953,7 +1927,7 @@ static int clone_vm(struct i915_gem_context *dst,
>
> rcu_read_lock();
> do {
> - vm = READ_ONCE(src->vm);
> + vm = rcu_dereference(src->vm);
> if (!vm)
> break;
>
> @@ -1975,7 +1949,7 @@ static int clone_vm(struct i915_gem_context *dst,
> * it cannot be reallocated elsewhere.
> */
>
> - if (vm == READ_ONCE(src->vm))
> + if (vm == rcu_access_pointer(src->vm))
> break;
>
> i915_vm_put(vm);
> @@ -2077,12 +2051,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
> return -EIO;
> }
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> -
> ext_data.ctx = i915_gem_create_context(i915, args->flags);
> - mutex_unlock(&dev->struct_mutex);
> if (IS_ERR(ext_data.ctx))
> return PTR_ERR(ext_data.ctx);
>
> @@ -2209,10 +2178,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>
> case I915_CONTEXT_PARAM_GTT_SIZE:
> args->size = 0;
> - if (ctx->vm)
> - args->value = ctx->vm->total;
> + rcu_read_lock();
> + if (rcu_access_pointer(ctx->vm))
> + args->value = rcu_dereference(ctx->vm)->total;
> else
> args->value = to_i915(dev)->ggtt.vm.total;
> + rcu_read_unlock();
> break;
>
> case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
> @@ -2283,7 +2254,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
> int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
> void *data, struct drm_file *file)
> {
> - struct drm_i915_private *dev_priv = to_i915(dev);
> + struct drm_i915_private *i915 = to_i915(dev);
> struct drm_i915_reset_stats *args = data;
> struct i915_gem_context *ctx;
> int ret;
> @@ -2305,7 +2276,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
> */
>
> if (capable(CAP_SYS_ADMIN))
> - args->reset_count = i915_reset_count(&dev_priv->gpu_error);
> + args->reset_count = i915_reset_count(&i915->gpu_error);
> else
> args->reset_count = 0;
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> index 1b0df53436cf..4ee5dfc5794e 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> @@ -133,8 +133,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
> }
>
> /* i915_gem_context.c */
> -int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
> -void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
> +int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
> +void i915_gem_driver_release__contexts(struct drm_i915_private *i915);
>
> int i915_gem_context_open(struct drm_i915_private *i915,
> struct drm_file *file);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> index 6419da7c9f90..a3ecd19f2303 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> @@ -88,7 +88,7 @@ struct i915_gem_context {
> * In other modes, this is a NULL pointer with the expectation that
> * the caller uses the shared global GTT.
> */
> - struct i915_address_space *vm;
> + struct i915_address_space __rcu *vm;
>
> /**
> * @pid: process id of creator
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> index 88a881be12ec..98816c35ffc3 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> @@ -728,7 +728,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
> return -ENOENT;
>
> eb->gem_context = ctx;
> - if (ctx->vm)
> + if (rcu_access_pointer(ctx->vm))
> eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
>
> eb->context_flags = 0;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> index 27573ef903b4..7c225e1d2a11 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> @@ -757,7 +757,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
> * On almost all of the older hw, we cannot tell the GPU that
> * a page is readonly.
> */
> - vm = dev_priv->kernel_context->vm;
> + vm = rcu_dereference_protected(dev_priv->kernel_context->vm, true);
Hm, should it not be rcu_dereference here?
> if (!vm || !vm->has_read_only)
> return -ENODEV;
> }
> diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> index 98b2a6ccfcc1..e204e653b459 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> @@ -1322,7 +1322,7 @@ static int igt_ppgtt_pin_update(void *arg)
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *dev_priv = ctx->i915;
> unsigned long supported = INTEL_INFO(dev_priv)->page_sizes;
> - struct i915_address_space *vm = ctx->vm;
> + struct i915_address_space *vm = rcu_dereference_protected(ctx->vm, 1);
> struct drm_i915_gem_object *obj;
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
> @@ -1460,7 +1460,8 @@ static int igt_tmpfs_fallback(void *arg)
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *i915 = ctx->i915;
> struct vfsmount *gemfs = i915->mm.gemfs;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> + struct i915_address_space *vm =
> + rcu_dereference_protected(ctx->vm, true) ?: &i915->ggtt.vm;
> struct drm_i915_gem_object *obj;
> struct i915_vma *vma;
> u32 *vaddr;
> @@ -1517,7 +1518,8 @@ static int igt_shrink_thp(void *arg)
> {
> struct i915_gem_context *ctx = arg;
> struct drm_i915_private *i915 = ctx->i915;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> + struct i915_address_space *vm =
> + rcu_dereference_protected(ctx->vm, true) ?: &i915->ggtt.vm;
> struct drm_i915_gem_object *obj;
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
> @@ -1699,8 +1701,8 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
> goto out_unlock;
> }
>
> - if (ctx->vm)
> - ctx->vm->scrub_64K = true;
> + if (rcu_access_pointer(ctx->vm))
> + rcu_dereference_protected(ctx->vm, true)->scrub_64K = true;
>
> err = i915_subtests(tests, ctx);
>
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index a8a9293f4ac6..7de1c0078d6f 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -53,19 +53,17 @@ static int live_nop_switch(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
> if (!ctx) {
> err = -ENOMEM;
> - goto out_unlock;
> + goto out_file;
> }
>
> for (n = 0; n < nctx; n++) {
> ctx[n] = live_context(i915, file);
> if (IS_ERR(ctx[n])) {
> err = PTR_ERR(ctx[n]);
> - goto out_unlock;
> + goto out_file;
> }
> }
>
> @@ -79,7 +77,7 @@ static int live_nop_switch(void *arg)
> rq = igt_request_alloc(ctx[n], engine);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> - goto out_unlock;
> + goto out_file;
> }
> i915_request_add(rq);
> }
> @@ -87,7 +85,7 @@ static int live_nop_switch(void *arg)
> pr_err("Failed to populated %d contexts\n", nctx);
> intel_gt_set_wedged(&i915->gt);
> err = -EIO;
> - goto out_unlock;
> + goto out_file;
> }
>
> times[1] = ktime_get_raw();
> @@ -97,7 +95,7 @@ static int live_nop_switch(void *arg)
>
> err = igt_live_test_begin(&t, i915, __func__, engine->name);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> end_time = jiffies + i915_selftest.timeout_jiffies;
> for_each_prime_number_from(prime, 2, 8192) {
> @@ -107,7 +105,7 @@ static int live_nop_switch(void *arg)
> rq = igt_request_alloc(ctx[n % nctx], engine);
> if (IS_ERR(rq)) {
> err = PTR_ERR(rq);
> - goto out_unlock;
> + goto out_file;
> }
>
> /*
> @@ -143,7 +141,7 @@ static int live_nop_switch(void *arg)
>
> err = igt_live_test_end(&t);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
> engine->name,
> @@ -151,8 +149,7 @@ static int live_nop_switch(void *arg)
> prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
> }
>
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> +out_file:
> mock_file_free(i915, file);
> return err;
> }
> @@ -412,11 +409,9 @@ static int igt_ctx_exec(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, engine->name);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ncontexts = 0;
> ndwords = 0;
> @@ -428,7 +423,7 @@ static int igt_ctx_exec(void *arg)
> ctx = kernel_context(i915);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
> @@ -440,7 +435,7 @@ static int igt_ctx_exec(void *arg)
> err = PTR_ERR(obj);
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
> }
>
> @@ -449,17 +444,18 @@ static int igt_ctx_exec(void *arg)
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> engine->name,
> - yesno(!!ctx->vm), err);
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> err = throttle(ce, tq, ARRAY_SIZE(tq));
> if (err) {
> intel_context_put(ce);
> kernel_context_close(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (++dw == max_dwords(obj)) {
> @@ -489,11 +485,10 @@ static int igt_ctx_exec(void *arg)
> dw += rem;
> }
>
> -out_unlock:
> +out_file:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
>
> mock_file_free(i915, file);
> if (err)
> @@ -528,22 +523,20 @@ static int igt_shared_ctx_exec(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> parent = live_context(i915, file);
> if (IS_ERR(parent)) {
> err = PTR_ERR(parent);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (!parent->vm) { /* not full-ppgtt; nothing to share */
> err = 0;
> - goto out_unlock;
> + goto out_file;
> }
>
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> for_each_engine(engine, i915, id) {
> unsigned long ncontexts, ndwords, dw;
> @@ -587,7 +580,8 @@ static int igt_shared_ctx_exec(void *arg)
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> engine->name,
> - yesno(!!ctx->vm), err);
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> intel_context_put(ce);
> kernel_context_close(ctx);
> goto out_test;
> @@ -626,17 +620,13 @@ static int igt_shared_ctx_exec(void *arg)
> dw += rem;
> }
>
> - mutex_unlock(&i915->drm.struct_mutex);
> i915_gem_drain_freed_objects(i915);
> - mutex_lock(&i915->drm.struct_mutex);
> }
> out_test:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> +out_file:
> mock_file_free(i915, file);
> return err;
> }
> @@ -1008,8 +998,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
> if (flags & TEST_RESET)
> igt_global_reset_lock(&i915->gt);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> ret = PTR_ERR(ctx);
> @@ -1064,8 +1052,6 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
> i915_gem_object_put(obj);
>
> out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> if (flags & TEST_RESET)
> igt_global_reset_unlock(&i915->gt);
>
> @@ -1125,23 +1111,24 @@ static int igt_ctx_readonly(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ctx = live_context(i915, file);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> - vm = ctx->vm ?: &i915->ggtt.alias->vm;
> + rcu_read_lock();
> + vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm;
> if (!vm || !vm->has_read_only) {
> + rcu_read_unlock();
> err = 0;
> - goto out_unlock;
> + goto out_file;
> }
> + rcu_read_unlock();
>
> ndwords = 0;
> dw = 0;
> @@ -1159,7 +1146,7 @@ static int igt_ctx_readonly(void *arg)
> if (IS_ERR(obj)) {
> err = PTR_ERR(obj);
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (prandom_u32_state(&prng) & 1)
> @@ -1170,15 +1157,17 @@ static int igt_ctx_readonly(void *arg)
> if (err) {
> pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
> ndwords, dw, max_dwords(obj),
> - ce->engine->name, yesno(!!ctx->vm), err);
> + ce->engine->name,
> + yesno(!!rcu_access_pointer(ctx->vm)),
> + err);
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> err = throttle(ce, tq, ARRAY_SIZE(tq));
> if (err) {
> i915_gem_context_unlock_engines(ctx);
> - goto out_unlock;
> + goto out_file;
> }
>
> if (++dw == max_dwords(obj)) {
> @@ -1210,11 +1199,10 @@ static int igt_ctx_readonly(void *arg)
> dw += rem;
> }
>
> -out_unlock:
> +out_file:
> throttle_release(tq, ARRAY_SIZE(tq));
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
>
> mock_file_free(i915, file);
> return err;
> @@ -1223,7 +1211,7 @@ static int igt_ctx_readonly(void *arg)
> static int check_scratch(struct i915_gem_context *ctx, u64 offset)
> {
> struct drm_mm_node *node =
> - __drm_mm_interval_first(&ctx->vm->mm,
> + __drm_mm_interval_first(&rcu_dereference_protected(ctx->vm, true)->mm,
> offset, offset + sizeof(u32) - 1);
> if (!node || node->start > offset)
> return 0;
> @@ -1273,7 +1261,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
>
> intel_gt_chipset_flush(engine->gt);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vma = i915_vma_instance(obj,
> + rcu_dereference_protected(ctx->vm, true),
> + NULL);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> goto err;
> @@ -1372,7 +1362,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
>
> intel_gt_chipset_flush(engine->gt);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vma = i915_vma_instance(obj,
> + rcu_dereference_protected(ctx->vm, true),
> + NULL);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> goto err;
> @@ -1463,27 +1455,25 @@ static int igt_vm_isolation(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> err = igt_live_test_begin(&t, i915, __func__, "");
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> ctx_a = live_context(i915, file);
> if (IS_ERR(ctx_a)) {
> err = PTR_ERR(ctx_a);
> - goto out_unlock;
> + goto out_file;
> }
>
> ctx_b = live_context(i915, file);
> if (IS_ERR(ctx_b)) {
> err = PTR_ERR(ctx_b);
> - goto out_unlock;
> + goto out_file;
> }
>
> /* We can only test vm isolation, if the vm are distinct */
> if (ctx_a->vm == ctx_b->vm)
> - goto out_unlock;
> + goto out_file;
>
> vm_total = ctx_a->vm->total;
> GEM_BUG_ON(ctx_b->vm->total != vm_total);
> @@ -1512,7 +1502,7 @@ static int igt_vm_isolation(void *arg)
> err = read_from_scratch(ctx_b, engine,
> offset, &value);
> if (err)
> - goto out_unlock;
> + goto out_file;
>
> if (value) {
> pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
> @@ -1521,7 +1511,7 @@ static int igt_vm_isolation(void *arg)
> lower_32_bits(offset),
> this);
> err = -EINVAL;
> - goto out_unlock;
> + goto out_file;
> }
>
> this++;
> @@ -1531,11 +1521,9 @@ static int igt_vm_isolation(void *arg)
> pr_info("Checked %lu scratch offsets across %d engines\n",
> count, RUNTIME_INFO(i915)->num_engines);
>
> -out_unlock:
> +out_file:
> if (igt_live_test_end(&t))
> err = -EIO;
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> mock_file_free(i915, file);
> return err;
> }
> @@ -1567,13 +1555,9 @@ static int mock_context_barrier(void *arg)
> * a request; useful for retiring old state after loading new.
> */
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = mock_context(i915, "mock");
> - if (!ctx) {
> - err = -ENOMEM;
> - goto unlock;
> - }
> + if (!ctx)
> + return -ENOMEM;
>
> counter = 0;
> err = context_barrier_task(ctx, 0,
> @@ -1646,8 +1630,6 @@ static int mock_context_barrier(void *arg)
>
> out:
> mock_context_close(ctx);
> -unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> return err;
> #undef pr_fmt
> #define pr_fmt(x) x
> diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> index ebc46f098561..6ef86e7923a7 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> @@ -67,7 +67,7 @@ void mock_context_close(struct i915_gem_context *ctx)
>
> void mock_init_contexts(struct drm_i915_private *i915)
> {
> - init_contexts(i915);
> + init_contexts(&i915->gem.contexts);
> }
>
> struct i915_gem_context *
> @@ -76,8 +76,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file)
> struct i915_gem_context *ctx;
> int err;
>
> - lockdep_assert_held(&i915->drm.struct_mutex);
> -
> ctx = i915_gem_create_context(i915, 0);
> if (IS_ERR(ctx))
> return ctx;
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
> index 35a40c2820a2..107eb03d6dd4 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.c
> +++ b/drivers/gpu/drm/i915/gt/intel_context.c
> @@ -226,7 +226,13 @@ intel_context_init(struct intel_context *ce,
> kref_init(&ce->ref);
>
> ce->gem_context = ctx;
> - ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
> + rcu_read_lock();
> + ce->vm = rcu_dereference(ctx->vm);
> + if (ce->vm && !kref_get_unless_zero(&ce->vm->ref))
> + ce->vm = NULL;
> + if (!ce->vm)
> + ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
> + rcu_read_unlock();
> if (ctx->timeline)
> ce->timeline = intel_timeline_get(ctx->timeline);
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
> index 86cffbb0a9cb..7c838a57e174 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_context.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_context.c
> @@ -155,13 +155,9 @@ static int live_context_size(void *arg)
> * HW tries to write past the end of one.
> */
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = kernel_context(gt->i915);
> - if (IS_ERR(fixme)) {
> - err = PTR_ERR(fixme);
> - goto unlock;
> - }
> + if (IS_ERR(fixme))
> + return PTR_ERR(fixme);
>
> for_each_engine(engine, gt->i915, id) {
> struct {
> @@ -201,8 +197,6 @@ static int live_context_size(void *arg)
> }
>
> kernel_context_close(fixme);
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> return err;
> }
>
> @@ -305,12 +299,10 @@ static int live_active_context(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = live_context(gt->i915, file);
> if (IS_ERR(fixme)) {
> err = PTR_ERR(fixme);
> - goto unlock;
> + goto out_file;
> }
>
> for_each_engine(engine, gt->i915, id) {
> @@ -323,8 +315,7 @@ static int live_active_context(void *arg)
> break;
> }
>
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> +out_file:
> mock_file_free(gt->i915, file);
> return err;
> }
> @@ -418,12 +409,10 @@ static int live_remote_context(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> -
> fixme = live_context(gt->i915, file);
> if (IS_ERR(fixme)) {
> err = PTR_ERR(fixme);
> - goto unlock;
> + goto out_file;
> }
>
> for_each_engine(engine, gt->i915, id) {
> @@ -436,8 +425,7 @@ static int live_remote_context(void *arg)
> break;
> }
>
> -unlock:
> - mutex_unlock(>->i915->drm.struct_mutex);
> +out_file:
> mock_file_free(gt->i915, file);
> return err;
> }
> diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> index f2b5ab1e5a2e..1b836ff0a756 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> @@ -58,9 +58,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
> memset(h, 0, sizeof(*h));
> h->gt = gt;
>
> - mutex_lock(>->i915->drm.struct_mutex);
> h->ctx = kernel_context(gt->i915);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(h->ctx))
> return PTR_ERR(h->ctx);
>
> @@ -133,7 +131,8 @@ static struct i915_request *
> hang_create_request(struct hang *h, struct intel_engine_cs *engine)
> {
> struct intel_gt *gt = h->gt;
> - struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
> + struct i915_address_space *vm =
> + rcu_dereference_protected(h->ctx->vm, true) ?: &engine->gt->ggtt->vm;
> struct drm_i915_gem_object *obj;
> struct i915_request *rq = NULL;
> struct i915_vma *hws, *vma;
> @@ -382,9 +381,7 @@ static int igt_reset_nop(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> @@ -458,9 +455,7 @@ static int igt_reset_nop_engine(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> @@ -705,9 +700,7 @@ static int active_engine(void *data)
> return PTR_ERR(file);
>
> for (count = 0; count < ARRAY_SIZE(ctx); count++) {
> - mutex_lock(&engine->i915->drm.struct_mutex);
> ctx[count] = live_context(engine->i915, file);
> - mutex_unlock(&engine->i915->drm.struct_mutex);
> if (IS_ERR(ctx[count])) {
> err = PTR_ERR(ctx[count]);
> while (--count)
> @@ -1298,18 +1291,18 @@ static int igt_reset_evict_ppgtt(void *arg)
> if (IS_ERR(file))
> return PTR_ERR(file);
>
> - mutex_lock(>->i915->drm.struct_mutex);
> ctx = live_context(gt->i915, file);
> - mutex_unlock(>->i915->drm.struct_mutex);
> if (IS_ERR(ctx)) {
> err = PTR_ERR(ctx);
> goto out;
> }
>
> err = 0;
> - if (ctx->vm) /* aliasing == global gtt locking, covered above */
> - err = __igt_reset_evict_vma(gt, ctx->vm,
> + if (rcu_access_pointer(ctx->vm)) {
> + /* aliasing == global gtt locking, covered above */
> + err = __igt_reset_evict_vma(gt, rcu_dereference_protected(ctx->vm, true),
> evict_vma, EXEC_OBJECT_WRITE);
> + }
>
> out:
> mock_file_free(gt->i915, file);
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 3300acd9199d..e33c2f66c683 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -1453,7 +1453,9 @@ static int smoke_submit(struct preempt_smoke *smoke,
> int err = 0;
>
> if (batch) {
> - vma = i915_vma_instance(batch, ctx->vm, NULL);
> + vma = i915_vma_instance(batch,
> + rcu_dereference_protected(ctx->vm, true),
> + NULL);
> if (IS_ERR(vma))
> return PTR_ERR(vma);
>
> diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> index 06351fefbbf3..ed44333d9e20 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
> @@ -33,6 +33,8 @@ struct wa_lists {
> } engine[I915_NUM_ENGINES];
> };
>
> +#define ctx_vm(ctx) rcu_dereference_protected((ctx)->vm, true)
> +
> static void
> reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
> {
> @@ -362,7 +364,7 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx)
> if (IS_ERR(obj))
> return ERR_CAST(obj);
>
> - vma = i915_vma_instance(obj, ctx->vm, NULL);
> + vma = i915_vma_instance(obj, ctx_vm(ctx), NULL);
> if (IS_ERR(vma)) {
> err = PTR_ERR(vma);
> goto err_obj;
> @@ -468,7 +470,7 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx,
> int err = 0, i, v;
> u32 *cs, *results;
>
> - scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
> + scratch = create_scratch(ctx_vm(ctx), 2 * ARRAY_SIZE(values) + 1);
> if (IS_ERR(scratch))
> return PTR_ERR(scratch);
>
> @@ -1018,14 +1020,14 @@ static int live_isolated_whitelist(void *arg)
> goto err;
> }
>
> - client[i].scratch[0] = create_scratch(c->vm, 1024);
> + client[i].scratch[0] = create_scratch(ctx_vm(c), 1024);
> if (IS_ERR(client[i].scratch[0])) {
> err = PTR_ERR(client[i].scratch[0]);
> kernel_context_close(c);
> goto err;
> }
>
> - client[i].scratch[1] = create_scratch(c->vm, 1024);
> + client[i].scratch[1] = create_scratch(ctx_vm(c), 1024);
> if (IS_ERR(client[i].scratch[1])) {
> err = PTR_ERR(client[i].scratch[1]);
> i915_vma_unpin_and_release(&client[i].scratch[0], 0);
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 03f567084548..ce5cff01f44c 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -365,7 +365,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
> struct i915_gem_context *ctx)
> {
> struct intel_vgpu_mm *mm = workload->shadow_mm;
> - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
> + struct i915_ppgtt *ppgtt =
> + i915_vm_to_ppgtt(rcu_dereference_protected(ctx->vm, true));
> int i = 0;
>
> if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
> @@ -1230,20 +1231,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> struct intel_vgpu_submission *s = &vgpu->submission;
> struct intel_engine_cs *engine;
> struct i915_gem_context *ctx;
> + struct i915_ppgtt *ppgtt;
> enum intel_engine_id i;
> int ret;
>
> - mutex_lock(&i915->drm.struct_mutex);
> -
> ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX);
> - if (IS_ERR(ctx)) {
> - ret = PTR_ERR(ctx);
> - goto out_unlock;
> - }
> + if (IS_ERR(ctx))
> + return PTR_ERR(ctx);
>
> i915_gem_context_set_force_single_submission(ctx);
>
> - i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm));
> + ppgtt = i915_vm_to_ppgtt(rcu_dereference_protected(ctx->vm, true));
> + i915_context_ppgtt_root_save(s, ppgtt);
>
> for_each_engine(engine, i915, i) {
> struct intel_context *ce;
> @@ -1289,11 +1288,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
>
> i915_gem_context_put(ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> return 0;
>
> out_shadow_ctx:
> - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm));
> + i915_context_ppgtt_root_restore(s, ppgtt);
> for_each_engine(engine, i915, i) {
> if (IS_ERR(s->shadow[i]))
> break;
> @@ -1302,8 +1300,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
> intel_context_put(s->shadow[i]);
> }
> i915_gem_context_put(ctx);
> -out_unlock:
> - mutex_unlock(&i915->drm.struct_mutex);
> return ret;
> }
>
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 8caaa446490f..d4a7cda60679 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -315,12 +315,18 @@ static void print_context_stats(struct seq_file *m,
> struct drm_i915_private *i915)
> {
> struct file_stats kstats = {};
> - struct i915_gem_context *ctx;
> + struct i915_gem_context *ctx, *cn;
>
> - list_for_each_entry(ctx, &i915->contexts.list, link) {
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> for_each_gem_engine(ce,
> i915_gem_context_lock_engines(ctx), it) {
> intel_context_lock_pinned(ce);
> @@ -337,7 +343,9 @@ static void print_context_stats(struct seq_file *m,
> i915_gem_context_unlock_engines(ctx);
>
> if (!IS_ERR_OR_NULL(ctx->file_priv)) {
> - struct file_stats stats = { .vm = ctx->vm, };
> + struct file_stats stats = {
> + .vm = rcu_access_pointer(ctx->vm),
> + };
> struct drm_file *file = ctx->file_priv->file;
> struct task_struct *task;
> char name[80];
> @@ -354,7 +362,12 @@ static void print_context_stats(struct seq_file *m,
>
> print_file_stats(m, name, stats);
> }
> +
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> }
> + spin_unlock(&i915->gem.contexts.lock);
>
> print_file_stats(m, "[k]contexts", kstats);
> }
> @@ -362,7 +375,6 @@ static void print_context_stats(struct seq_file *m,
> static int i915_gem_object_info(struct seq_file *m, void *data)
> {
> struct drm_i915_private *i915 = node_to_i915(m->private);
> - int ret;
>
> seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
> i915->mm.shrink_count,
> @@ -371,12 +383,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>
> seq_putc(m, '\n');
>
> - ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
> - if (ret)
> - return ret;
> -
> print_context_stats(m, i915);
> - mutex_unlock(&i915->drm.struct_mutex);
>
> return 0;
> }
> @@ -1490,19 +1497,19 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
>
> static int i915_context_status(struct seq_file *m, void *unused)
> {
> - struct drm_i915_private *dev_priv = node_to_i915(m->private);
> - struct drm_device *dev = &dev_priv->drm;
> - struct i915_gem_context *ctx;
> - int ret;
> -
> - ret = mutex_lock_interruptible(&dev->struct_mutex);
> - if (ret)
> - return ret;
> + struct drm_i915_private *i915 = node_to_i915(m->private);
> + struct i915_gem_context *ctx, *cn;
>
> - list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> struct i915_gem_engines_iter it;
> struct intel_context *ce;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> seq_puts(m, "HW context ");
> if (ctx->pid) {
> struct task_struct *task;
> @@ -1537,9 +1544,12 @@ static int i915_context_status(struct seq_file *m, void *unused)
> i915_gem_context_unlock_engines(ctx);
>
> seq_putc(m, '\n');
> - }
>
> - mutex_unlock(&dev->struct_mutex);
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> + }
> + spin_unlock(&i915->gem.contexts.lock);
>
> return 0;
> }
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 8cac6124d16a..a8ddac22d633 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1685,10 +1685,8 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
> {
> struct drm_i915_file_private *file_priv = file->driver_priv;
>
> - mutex_lock(&dev->struct_mutex);
> i915_gem_context_close(file);
> i915_gem_release(dev, file);
> - mutex_unlock(&dev->struct_mutex);
>
> kfree_rcu(file_priv, rcu);
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index ad7ed7f72da8..23f311172e75 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1543,25 +1543,6 @@ struct drm_i915_private {
> int audio_power_refcount;
> u32 audio_freq_cntrl;
>
> - struct {
> - struct mutex mutex;
> - struct list_head list;
> - struct llist_head free_list;
> - struct work_struct free_work;
> -
> - /* The hw wants to have a stable context identifier for the
> - * lifetime of the context (for OA, PASID, faults, etc).
> - * This is limited in execlists to 21 bits.
> - */
> - struct ida hw_ida;
> -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
> -#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
> -#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
> -/* in Gen12 ID 0x7FF is reserved to indicate idle */
> -#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1)
> - struct list_head hw_id_list;
These bits.
> - } contexts;
> -
> u32 fdi_rx_config;
>
> /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
> @@ -1717,6 +1698,14 @@ struct drm_i915_private {
>
> struct {
> struct notifier_block pm_notifier;
> +
> + struct i915_gem_contexts {
> + spinlock_t lock; /* locks list */
> + struct list_head list;
> +
> + struct llist_head free_list;
> + struct work_struct free_work;
> + } contexts;
> } gem;
>
> /* For i945gm vblank irq vs. C3 workaround */
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 9c2877441540..8814db7023db 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1263,7 +1263,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
> goto err_unlock;
> }
>
> - ret = i915_gem_contexts_init(dev_priv);
> + ret = i915_gem_init_contexts(dev_priv);
> if (ret) {
> GEM_BUG_ON(ret == -EIO);
> goto err_scratch;
> @@ -1345,7 +1345,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
> }
> err_context:
> if (ret != -EIO)
> - i915_gem_contexts_fini(dev_priv);
> + i915_gem_driver_release__contexts(dev_priv);
> err_scratch:
> intel_gt_driver_release(&dev_priv->gt);
> err_unlock:
> @@ -1413,11 +1413,9 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
>
> void i915_gem_driver_release(struct drm_i915_private *dev_priv)
> {
> - mutex_lock(&dev_priv->drm.struct_mutex);
> intel_engines_cleanup(dev_priv);
> - i915_gem_contexts_fini(dev_priv);
> + i915_gem_driver_release__contexts(dev_priv);
> intel_gt_driver_release(&dev_priv->gt);
> - mutex_unlock(&dev_priv->drm.struct_mutex);
>
> intel_wa_list_free(&dev_priv->gt_wa_list);
>
> @@ -1427,7 +1425,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
>
> i915_gem_drain_freed_objects(dev_priv);
>
> - WARN_ON(!list_empty(&dev_priv->contexts.list));
> + WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
> }
>
> void i915_gem_init_mmio(struct drm_i915_private *i915)
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 0d040517787b..db32e0fe78b1 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -1366,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm)
> if (vm->has_read_only &&
> vm->i915->kernel_context &&
> vm->i915->kernel_context->vm) {
> - struct i915_address_space *clone = vm->i915->kernel_context->vm;
> + struct i915_address_space *clone =
> + rcu_dereference_protected(vm->i915->kernel_context->vm,
> + true);
What is the protection here?
>
> GEM_BUG_ON(!clone->has_read_only);
>
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index d36ba248d438..bfe6e9b6d71d 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -1853,8 +1853,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> };
> #undef ctx_flexeuN
> struct intel_engine_cs *engine;
> - struct i915_gem_context *ctx;
> - int i;
> + struct i915_gem_context *ctx, *cn;
> + int i, err;
>
> for (i = 2; i < ARRAY_SIZE(regs); i++)
> regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg);
> @@ -1877,16 +1877,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> * context. Contexts idle at the time of reconfiguration are not
> * trapped behind the barrier.
> */
> - list_for_each_entry(ctx, &i915->contexts.list, link) {
> - int err;
> -
> + spin_lock(&i915->gem.contexts.lock);
> + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
> if (ctx == i915->kernel_context)
> continue;
>
> + if (!kref_get_unless_zero(&ctx->ref))
> + continue;
> +
> + spin_unlock(&i915->gem.contexts.lock);
> +
> err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs));
> - if (err)
> + if (err) {
> + i915_gem_context_put(ctx);
> return err;
> + }
> +
> + spin_lock(&i915->gem.contexts.lock);
> + list_safe_reset_next(ctx, cn, link);
> + i915_gem_context_put(ctx);
> }
> + spin_unlock(&i915->gem.contexts.lock);
>
> /*
> * After updating all other contexts, we need to modify ourselves.
> @@ -1895,7 +1906,6 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream,
> */
> for_each_uabi_engine(engine, i915) {
> struct intel_context *ce = engine->kernel_context;
> - int err;
>
> if (engine->class != RENDER_CLASS)
> continue;
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index 6b88d934927a..585808c2ee84 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -144,9 +144,9 @@ static const struct attribute_group media_rc6_attr_group = {
> };
> #endif
>
> -static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
> +static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
> {
> - if (!HAS_L3_DPF(dev_priv))
> + if (!HAS_L3_DPF(i915))
> return -EPERM;
>
> if (offset % 4 != 0)
> @@ -164,31 +164,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
> loff_t offset, size_t count)
> {
> struct device *kdev = kobj_to_dev(kobj);
> - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
> - struct drm_device *dev = &dev_priv->drm;
> + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
> int slice = (int)(uintptr_t)attr->private;
> int ret;
>
> - count = round_down(count, 4);
> -
> - ret = l3_access_valid(dev_priv, offset);
> + ret = l3_access_valid(i915, offset);
> if (ret)
> return ret;
>
> + count = round_down(count, 4);
> count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
> + memset(buf, 0, count);
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> -
> - if (dev_priv->l3_parity.remap_info[slice])
> + spin_lock(&i915->gem.contexts.lock);
> + if (i915->l3_parity.remap_info[slice])
> memcpy(buf,
> - dev_priv->l3_parity.remap_info[slice] + (offset/4),
> + i915->l3_parity.remap_info[slice] + offset / 4,
> count);
> - else
> - memset(buf, 0, count);
> -
> - mutex_unlock(&dev->struct_mutex);
> + spin_unlock(&i915->gem.contexts.lock);
>
> return count;
> }
> @@ -199,46 +192,46 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
> loff_t offset, size_t count)
> {
> struct device *kdev = kobj_to_dev(kobj);
> - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
> - struct drm_device *dev = &dev_priv->drm;
> + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
> struct i915_gem_context *ctx;
> int slice = (int)(uintptr_t)attr->private;
> - u32 **remap_info;
> + u32 *remap_info, *freeme = NULL;
> int ret;
>
> - ret = l3_access_valid(dev_priv, offset);
> + ret = l3_access_valid(i915, offset);
> if (ret)
> return ret;
>
> - ret = i915_mutex_lock_interruptible(dev);
> - if (ret)
> - return ret;
> + remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
> + if (!remap_info)
> + return -ENOMEM;
>
> - remap_info = &dev_priv->l3_parity.remap_info[slice];
> - if (!*remap_info) {
> - *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
> - if (!*remap_info) {
> - ret = -ENOMEM;
> - goto out;
> - }
> + spin_lock(&i915->gem.contexts.lock);
> +
> + if (i915->l3_parity.remap_info[slice]) {
> + freeme = remap_info;
> + remap_info = i915->l3_parity.remap_info[slice];
> + } else {
> + i915->l3_parity.remap_info[slice] = remap_info;
> }
>
> - /* TODO: Ideally we really want a GPU reset here to make sure errors
> - * aren't propagated. Since I cannot find a stable way to reset the GPU
> - * at this point it is left as a TODO.
> - */
> - memcpy(*remap_info + (offset/4), buf, count);
> + count = round_down(count, 4);
Where was this round_down before, can't see it!
> + memcpy(remap_info + offset / 4, buf, count);
>
> /* NB: We defer the remapping until we switch to the context */
> - list_for_each_entry(ctx, &dev_priv->contexts.list, link)
> - ctx->remap_slice |= (1<<slice);
> + list_for_each_entry(ctx, &i915->gem.contexts.list, link)
> + ctx->remap_slice |= BIT(slice);
>
> - ret = count;
> + spin_unlock(&i915->gem.contexts.lock);
> + kfree(freeme);
>
> -out:
> - mutex_unlock(&dev->struct_mutex);
> + /*
> + * TODO: Ideally we really want a GPU reset here to make sure errors
> + * aren't propagated. Since I cannot find a stable way to reset the GPU
> + * at this point it is left as a TODO.
> + */
>
> - return ret;
> + return count;
> }
>
> static const struct bin_attribute dpf_attrs = {
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 1f2cf6cfafb5..7ef7a1e1664c 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -952,7 +952,7 @@ DECLARE_EVENT_CLASS(i915_context,
> TP_fast_assign(
> __entry->dev = ctx->i915->drm.primary->index;
> __entry->ctx = ctx;
> - __entry->vm = ctx->vm;
> + __entry->vm = rcu_access_pointer(ctx->vm);
> ),
>
> TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
> index 0346c3e5b6b6..bfa40a5b6d98 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
> @@ -138,11 +138,9 @@ static int igt_gem_suspend(void *arg)
> return PTR_ERR(file);
>
> err = -ENOMEM;
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = live_context(i915, file);
> if (!IS_ERR(ctx))
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (err)
> goto out;
>
> @@ -157,9 +155,7 @@ static int igt_gem_suspend(void *arg)
>
> pm_resume(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> out:
> mock_file_free(i915, file);
> return err;
> @@ -177,11 +173,9 @@ static int igt_gem_hibernate(void *arg)
> return PTR_ERR(file);
>
> err = -ENOMEM;
> - mutex_lock(&i915->drm.struct_mutex);
> ctx = live_context(i915, file);
> if (!IS_ERR(ctx))
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (err)
> goto out;
>
> @@ -196,9 +190,7 @@ static int igt_gem_hibernate(void *arg)
>
> pm_resume(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> err = switch_to_context(i915, ctx);
> - mutex_unlock(&i915->drm.struct_mutex);
> out:
> mock_file_free(i915, file);
> return err;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> index f39f0282e78c..0af9a58d011d 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
> @@ -473,7 +473,6 @@ static int igt_evict_contexts(void *arg)
> }
>
> count = 0;
> - mutex_lock(&i915->drm.struct_mutex);
> onstack_fence_init(&fence);
> do {
> struct i915_request *rq;
> @@ -510,8 +509,6 @@ static int igt_evict_contexts(void *arg)
> count++;
> err = 0;
> } while(1);
> - mutex_unlock(&i915->drm.struct_mutex);
> -
> onstack_fence_fini(&fence);
> pr_info("Submitted %lu contexts/requests on %s\n",
> count, engine->name);
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> index 9939dd40c6a8..6777a335bd44 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> @@ -1246,6 +1246,7 @@ static int exercise_mock(struct drm_i915_private *i915,
> unsigned long end_time))
> {
> const u64 limit = totalram_pages() << PAGE_SHIFT;
> + struct i915_address_space *vm;
> struct i915_gem_context *ctx;
> IGT_TIMEOUT(end_time);
> int err;
> @@ -1254,7 +1255,8 @@ static int exercise_mock(struct drm_i915_private *i915,
> if (!ctx)
> return -ENOMEM;
>
> - err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time);
> + vm = rcu_dereference_protected(ctx->vm, true);
> + err = func(i915, vm, 0, min(vm->total, limit), end_time);
>
> mock_context_close(ctx);
> return err;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index d1ee384a3260..4abf52733520 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -181,9 +181,7 @@ static int igt_request_rewind(void *arg)
> struct intel_context *ce;
> int err = -EINVAL;
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx[0] = mock_context(i915, "A");
> - mutex_unlock(&i915->drm.struct_mutex);
>
> ce = i915_gem_context_get_engine(ctx[0], RCS0);
> GEM_BUG_ON(IS_ERR(ce));
> @@ -197,9 +195,7 @@ static int igt_request_rewind(void *arg)
> i915_request_get(request);
> i915_request_add(request);
>
> - mutex_lock(&i915->drm.struct_mutex);
> ctx[1] = mock_context(i915, "B");
> - mutex_unlock(&i915->drm.struct_mutex);
>
> ce = i915_gem_context_get_engine(ctx[1], RCS0);
> GEM_BUG_ON(IS_ERR(ce));
> @@ -438,9 +434,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
> }
>
> for (n = 0; n < t.ncontexts; n++) {
> - mutex_lock(&t.engine->i915->drm.struct_mutex);
> t.contexts[n] = mock_context(t.engine->i915, "mock");
> - mutex_unlock(&t.engine->i915->drm.struct_mutex);
> if (!t.contexts[n]) {
> ret = -ENOMEM;
> goto out_contexts;
> @@ -734,7 +728,8 @@ static int live_empty_request(void *arg)
> static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
> {
> struct i915_gem_context *ctx = i915->kernel_context;
> - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
> + struct i915_address_space *vm =
> + rcu_dereference_protected(ctx->vm, true) ?: &i915->ggtt.vm;
> struct drm_i915_gem_object *obj;
> const int gen = INTEL_GEN(i915);
> struct i915_vma *vma;
> @@ -1109,9 +1104,7 @@ static int live_breadcrumbs_smoketest(void *arg)
> }
>
> for (n = 0; n < t[0].ncontexts; n++) {
> - mutex_lock(&i915->drm.struct_mutex);
> t[0].contexts[n] = live_context(i915, file);
> - mutex_unlock(&i915->drm.struct_mutex);
> if (!t[0].contexts[n]) {
> ret = -ENOMEM;
> goto out_contexts;
> diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
> index ac1ff558eb90..f6e6b0aae38c 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_vma.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
> @@ -38,7 +38,7 @@ static bool assert_vma(struct i915_vma *vma,
> {
> bool ok = true;
>
> - if (vma->vm != ctx->vm) {
> + if (vma->vm != rcu_access_pointer(ctx->vm)) {
> pr_err("VMA created with wrong VM\n");
> ok = false;
> }
> @@ -113,7 +113,8 @@ static int create_vmas(struct drm_i915_private *i915,
> list_for_each_entry(obj, objects, st_link) {
> for (pinned = 0; pinned <= 1; pinned++) {
> list_for_each_entry(ctx, contexts, link) {
> - struct i915_address_space *vm = ctx->vm;
> + struct i915_address_space *vm =
> + rcu_dereference_protected(ctx->vm, true);
> struct i915_vma *vma;
> int err;
>
> diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> index efe40c29fcf2..0ed21a7b8682 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
> @@ -59,11 +59,9 @@ static void mock_device_release(struct drm_device *dev)
>
> i915_gem_drain_workqueue(i915);
>
> - mutex_lock(&i915->drm.struct_mutex);
> for_each_engine(engine, i915, id)
> mock_engine_free(engine);
> - i915_gem_contexts_fini(i915);
> - mutex_unlock(&i915->drm.struct_mutex);
> + i915_gem_driver_release__contexts(i915);
>
> intel_timelines_fini(i915);
>
> @@ -207,7 +205,7 @@ struct drm_i915_private *mock_gem_device(void)
> return i915;
>
> err_context:
> - i915_gem_contexts_fini(i915);
> + i915_gem_driver_release__contexts(i915);
> err_engine:
> mock_engine_free(i915->engine[RCS0]);
> err_unlock:
>
Regards,
Tvrtko
More information about the Intel-gfx
mailing list