[Intel-gfx] [PATCH 22/24] drm/i915: Move context management under GEM
Chris Wilson
chris at chris-wilson.co.uk
Mon Jul 15 08:09:44 UTC 2019
Keep track of the GEM contexts underneath i915->gem.contexts and assign
them their own lock for the purposes of list management.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin at intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 116 +++++++-----------
drivers/gpu/drm/i915/gem/i915_gem_context.h | 4 +-
.../gpu/drm/i915/gem/selftests/mock_context.c | 2 +-
drivers/gpu/drm/i915/i915_debugfs.c | 20 +--
drivers/gpu/drm/i915/i915_drv.c | 2 -
drivers/gpu/drm/i915/i915_drv.h | 24 ++--
drivers/gpu/drm/i915/i915_gem.c | 8 +-
drivers/gpu/drm/i915/i915_perf.c | 8 +-
drivers/gpu/drm/i915/i915_sysfs.c | 38 +++---
.../gpu/drm/i915/selftests/mock_gem_device.c | 4 +-
10 files changed, 92 insertions(+), 134 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 2669e038661e..3ee352d389c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -217,9 +217,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ mutex_lock(&ctx->i915->gem.contexts.mutex);
+ list_del(&ctx->link);
+ mutex_unlock(&ctx->i915->gem.contexts.mutex);
+
if (ctx->vm)
i915_vm_put(ctx->vm);
@@ -232,56 +235,40 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
kfree(ctx->name);
put_pid(ctx->pid);
- list_del(&ctx->link);
mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
-static void contexts_free(struct drm_i915_private *i915)
+static void contexts_free_all(struct llist_node *list)
{
- struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
struct i915_gem_context *ctx, *cn;
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- llist_for_each_entry_safe(ctx, cn, freed, free_link)
+ llist_for_each_entry_safe(ctx, cn, list, free_link)
i915_gem_context_free(ctx);
}
-static void contexts_free_first(struct drm_i915_private *i915)
+static void contexts_flush_free(struct i915_gem_contexts *gc)
{
- struct i915_gem_context *ctx;
- struct llist_node *freed;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- freed = llist_del_first(&i915->contexts.free_list);
- if (!freed)
- return;
-
- ctx = container_of(freed, typeof(*ctx), free_link);
- i915_gem_context_free(ctx);
+ contexts_free_all(llist_del_all(&gc->free_list));
}
static void contexts_free_worker(struct work_struct *work)
{
- struct drm_i915_private *i915 =
- container_of(work, typeof(*i915), contexts.free_work);
+ struct i915_gem_contexts *gc =
+ container_of(work, typeof(*gc), free_work);
- mutex_lock(&i915->drm.struct_mutex);
- contexts_free(i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ contexts_flush_free(gc);
}
void i915_gem_context_release(struct kref *ref)
{
struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
- struct drm_i915_private *i915 = ctx->i915;
+ struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
trace_i915_context_free(ctx);
- if (llist_add(&ctx->free_link, &i915->contexts.free_list))
- queue_work(i915->wq, &i915->contexts.free_work);
+ if (llist_add(&ctx->free_link, &gc->free_list))
+ queue_work(ctx->i915->wq, &gc->free_work);
}
static void context_close(struct i915_gem_context *ctx)
@@ -339,7 +326,6 @@ __create_context(struct drm_i915_private *i915)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &i915->contexts.list);
ctx->i915 = i915;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
@@ -369,6 +355,10 @@ __create_context(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+ mutex_lock(&i915->gem.contexts.mutex);
+ list_add_tail(&ctx->link, &i915->gem.contexts.list);
+ mutex_unlock(&i915->gem.contexts.mutex);
+
return ctx;
err_free:
@@ -399,27 +389,25 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
+i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_context *ctx;
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
- !HAS_EXECLISTS(dev_priv))
+ !HAS_EXECLISTS(i915))
return ERR_PTR(-EINVAL);
- /* Reap the most stale context */
- contexts_free_first(dev_priv);
+ /* Reap the stale contexts */
+ contexts_flush_free(&i915->gem.contexts);
- ctx = __create_context(dev_priv);
+ ctx = __create_context(i915);
if (IS_ERR(ctx))
return ctx;
- if (HAS_FULL_PPGTT(dev_priv)) {
+ if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv);
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -434,7 +422,7 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
struct intel_timeline *timeline;
- timeline = intel_timeline_create(&dev_priv->gt, NULL);
+ timeline = intel_timeline_create(&i915->gt, NULL);
if (IS_ERR(timeline)) {
context_close(ctx);
return ERR_CAST(timeline);
@@ -462,18 +450,13 @@ struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev)
{
struct i915_gem_context *ctx;
- int ret;
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
return ERR_PTR(-ENODEV);
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
-
ctx = i915_gem_create_context(to_i915(dev), 0);
if (IS_ERR(ctx))
- goto out;
+ return ctx;
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_set_closed(ctx); /* not user accessible */
@@ -483,8 +466,6 @@ i915_gem_context_create_gvt(struct drm_device *dev)
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
-out:
- mutex_unlock(&dev->struct_mutex);
return ctx;
}
@@ -519,48 +500,40 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
return ctx;
}
-static void init_contexts(struct drm_i915_private *i915)
+static void init_contexts(struct i915_gem_contexts *gc)
{
- mutex_init(&i915->contexts.mutex);
- INIT_LIST_HEAD(&i915->contexts.list);
-
- /* Using the simple ida interface, the max is limited by sizeof(int) */
- BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&i915->contexts.hw_ida);
- INIT_LIST_HEAD(&i915->contexts.hw_id_list);
+ mutex_init(&gc->mutex);
+ INIT_LIST_HEAD(&gc->list);
- INIT_WORK(&i915->contexts.free_work, contexts_free_worker);
- init_llist_head(&i915->contexts.free_list);
+ INIT_WORK(&gc->free_work, contexts_free_worker);
+ init_llist_head(&gc->free_list);
}
-int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
+int i915_gem_init_contexts(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx;
/* Reassure ourselves we are only called once */
- GEM_BUG_ON(dev_priv->kernel_context);
+ GEM_BUG_ON(i915->kernel_context);
- init_contexts(dev_priv);
+ init_contexts(&i915->gem.contexts);
/* lowest priority; idle task */
- ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
+ ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN);
if (IS_ERR(ctx)) {
DRM_ERROR("Failed to create default global context\n");
return PTR_ERR(ctx);
}
- dev_priv->kernel_context = ctx;
+ i915->kernel_context = ctx;
DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(dev_priv)->has_logical_contexts ?
+ DRIVER_CAPS(i915)->has_logical_contexts ?
"logical" : "fake");
return 0;
}
-void i915_gem_contexts_fini(struct drm_i915_private *i915)
+void i915_gem_fini_contexts(struct drm_i915_private *i915)
{
- lockdep_assert_held(&i915->drm.struct_mutex);
-
destroy_kernel_context(&i915->kernel_context);
}
@@ -620,9 +593,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
idr_init(&file_priv->context_idr);
idr_init_base(&file_priv->vm_idr, 1);
- mutex_lock(&i915->drm.struct_mutex);
ctx = i915_gem_create_context(i915, 0);
- mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -2004,12 +1975,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
-
ext_data.ctx = i915_gem_create_context(i915, args->flags);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(ext_data.ctx))
return PTR_ERR(ext_data.ctx);
@@ -2207,7 +2173,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
void *data, struct drm_file *file)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_reset_stats *args = data;
struct i915_gem_context *ctx;
int ret;
@@ -2229,7 +2195,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
*/
if (capable(CAP_SYS_ADMIN))
- args->reset_count = i915_reset_count(&dev_priv->gpu_error);
+ args->reset_count = i915_reset_count(&i915->gpu_error);
else
args->reset_count = 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 6fb3ad7e03fc..738ca18f085b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -118,8 +118,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
}
/* i915_gem_context.c */
-int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
-void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
+int __must_check i915_gem_init_contexts(struct drm_i915_private *i915);
+void i915_gem_fini_contexts(struct drm_i915_private *i915);
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 0104f16b1327..e36af5a5ce42 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -65,7 +65,7 @@ void mock_context_close(struct i915_gem_context *ctx)
void mock_init_contexts(struct drm_i915_private *i915)
{
- init_contexts(i915);
+ init_contexts(&i915->gem.contexts);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b7ec9f3cdc2c..bcf617ac2bc0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -242,8 +242,6 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
- lockdep_assert_held(&obj->base.dev->struct_mutex);
-
stats->count++;
stats->total += obj->base.size;
if (!atomic_read(&obj->bind_count))
@@ -251,6 +249,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (obj->base.name || obj->base.dma_buf)
stats->shared += obj->base.size;
+ spin_lock(&obj->vma.lock);
list_for_each_entry(vma, &obj->vma.list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@@ -270,6 +269,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (i915_vma_is_closed(vma))
stats->closed += vma->node.size;
}
+ spin_unlock(&obj->vma.lock);
return 0;
}
@@ -294,7 +294,8 @@ static void print_context_stats(struct seq_file *m,
struct file_stats kstats = {};
struct i915_gem_context *ctx;
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ lockdep_assert_held(&i915->gem.contexts.mutex);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -342,12 +343,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
seq_putc(m, '\n');
- ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
if (ret)
return ret;
print_context_stats(m, i915);
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&i915->gem.contexts.mutex);
return 0;
}
@@ -1567,16 +1568,15 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
static int i915_context_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct i915_gem_context *ctx;
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -1613,7 +1613,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, '\n');
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&i915->gem.contexts.mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7c209743e478..0244421241fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2056,10 +2056,8 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
i915_gem_context_close(file);
i915_gem_release(dev, file);
- mutex_unlock(&dev->struct_mutex);
kfree(file_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8fbedbbb39dc..fbaaaa7c12ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1565,23 +1565,6 @@ struct drm_i915_private {
struct mutex av_mutex;
int audio_power_refcount;
- struct {
- struct mutex mutex;
- struct list_head list;
- struct llist_head free_list;
- struct work_struct free_work;
-
- /* The hw wants to have a stable context identifier for the
- * lifetime of the context (for OA, PASID, faults, etc).
- * This is limited in execlists to 21 bits.
- */
- struct ida hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
-#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
- struct list_head hw_id_list;
- } contexts;
-
u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -1840,6 +1823,13 @@ struct drm_i915_private {
* off the idle_work.
*/
struct work_struct idle_work;
+
+ struct i915_gem_contexts {
+ struct mutex mutex;
+ struct list_head list;
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } contexts;
} gem;
/* For i945gm vblank irq vs. C3 workaround */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 46ed90e59a8d..6ca700189a86 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1469,7 +1469,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_unlock;
}
- ret = i915_gem_contexts_init(dev_priv);
+ ret = i915_gem_init_contexts(dev_priv);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_scratch;
@@ -1557,7 +1557,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
err_context:
if (ret != -EIO)
- i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_contexts(dev_priv);
err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
@@ -1624,7 +1624,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
intel_engines_cleanup(dev_priv);
- i915_gem_contexts_fini(dev_priv);
+ i915_gem_fini_contexts(dev_priv);
i915_gem_fini_scratch(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1638,7 +1638,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->contexts.list));
+ WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
}
void i915_gem_init_mmio(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index df02e0fe0701..60c10dce1cb0 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1871,7 +1871,8 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
* context. Contexts idle at the time of reconfiguration are not
* trapped behind the barrier.
*/
- list_for_each_entry(ctx, &i915->contexts.list, link) {
+ mutex_lock(&i915->gem.contexts.mutex);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link) {
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -1902,8 +1903,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *i915,
}
i915_gem_context_unlock_engines(ctx);
if (err)
- return err;
+ break;
}
+ mutex_unlock(&i915->gem.contexts.mutex);
+ if (err)
+ return err;
/*
* After updating all other contexts, we need to modify ourselves.
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index ecac1c386109..6836e689f025 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -142,9 +142,9 @@ static const struct attribute_group media_rc6_attr_group = {
};
#endif
-static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset)
+static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
{
- if (!HAS_L3_DPF(dev_priv))
+ if (!HAS_L3_DPF(i915))
return -EPERM;
if (offset % 4 != 0)
@@ -162,31 +162,30 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
int slice = (int)(uintptr_t)attr->private;
int ret;
count = round_down(count, 4);
- ret = l3_access_valid(dev_priv, offset);
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
- ret = i915_mutex_lock_interruptible(dev);
+ ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
if (ret)
return ret;
- if (dev_priv->l3_parity.remap_info[slice])
+ if (i915->l3_parity.remap_info[slice])
memcpy(buf,
- dev_priv->l3_parity.remap_info[slice] + (offset/4),
+ i915->l3_parity.remap_info[slice] + offset / 4,
count);
else
memset(buf, 0, count);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&i915->gem.contexts.mutex);
return count;
}
@@ -197,22 +196,23 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
loff_t offset, size_t count)
{
struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct i915_gem_context *ctx;
int slice = (int)(uintptr_t)attr->private;
u32 **remap_info;
int ret;
- ret = l3_access_valid(dev_priv, offset);
+ count = round_down(count, 4);
+
+ ret = l3_access_valid(i915, offset);
if (ret)
return ret;
- ret = i915_mutex_lock_interruptible(dev);
+ ret = mutex_lock_interruptible(&i915->gem.contexts.mutex);
if (ret)
return ret;
- remap_info = &dev_priv->l3_parity.remap_info[slice];
+ remap_info = &i915->l3_parity.remap_info[slice];
if (!*remap_info) {
*remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
if (!*remap_info) {
@@ -221,20 +221,20 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
}
}
- /* TODO: Ideally we really want a GPU reset here to make sure errors
+ /*
+ * TODO: Ideally we really want a GPU reset here to make sure errors
* aren't propagated. Since I cannot find a stable way to reset the GPU
* at this point it is left as a TODO.
*/
memcpy(*remap_info + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->contexts.list, link)
- ctx->remap_slice |= (1<<slice);
+ list_for_each_entry(ctx, &i915->gem.contexts.list, link)
+ ctx->remap_slice |= BIT(slice);
ret = count;
-
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&i915->gem.contexts.mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fd4cc4809eb8..107049afe201 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -65,7 +65,7 @@ static void mock_device_release(struct drm_device *dev)
mutex_lock(&i915->drm.struct_mutex);
for_each_engine(engine, i915, id)
mock_engine_free(engine);
- i915_gem_contexts_fini(i915);
+ i915_gem_fini_contexts(i915);
mutex_unlock(&i915->drm.struct_mutex);
intel_timelines_fini(i915);
@@ -220,7 +220,7 @@ struct drm_i915_private *mock_gem_device(void)
return i915;
err_context:
- i915_gem_contexts_fini(i915);
+ i915_gem_fini_contexts(i915);
err_engine:
mock_engine_free(i915->engine[RCS0]);
err_unlock:
--
2.22.0
More information about the Intel-gfx
mailing list