[Intel-gfx] [PATCH 1/3] drm/i915: Group all the global context information together
Chris Wilson
chris at chris-wilson.co.uk
Wed Mar 29 18:28:19 UTC 2017
Create a substruct to hold all the global context state under
drm_i915_private.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 4 +--
drivers/gpu/drm/i915/i915_drv.c | 2 +-
drivers/gpu/drm/i915/i915_drv.h | 20 +++++++-------
drivers/gpu/drm/i915/i915_gem.c | 1 -
drivers/gpu/drm/i915/i915_gem_context.c | 34 +++++++++++++-----------
drivers/gpu/drm/i915/i915_sysfs.c | 2 +-
drivers/gpu/drm/i915/intel_lrc.c | 2 +-
drivers/gpu/drm/i915/selftests/mock_context.c | 2 +-
drivers/gpu/drm/i915/selftests/mock_gem_device.c | 2 +-
9 files changed, 36 insertions(+), 33 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8a7f57318a87..a75d848901d4 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1956,7 +1956,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
seq_printf(m, "HW context %u ", ctx->hw_id);
if (ctx->pid) {
struct task_struct *task;
@@ -2062,7 +2062,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
if (ret)
return ret;
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
for_each_engine(engine, dev_priv, id)
i915_dump_lrc_obj(m, ctx, engine);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e98f6c90efe0..111874c3a140 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -560,7 +560,7 @@ static void i915_gem_fini(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->context_list));
+ WARN_ON(!list_empty(&dev_priv->contexts.list));
}
static int i915_load_modeset_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f263715f65c9..45bc9a65ec53 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2287,13 +2287,6 @@ struct drm_i915_private {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
- /* The hw wants to have a stable context identifier for the lifetime
- * of the context (for OA, PASID, faults, etc). This is limited
- * in execlists to 21 bits.
- */
- struct ida context_hw_ida;
-#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
-
/* Kernel Modesetting */
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -2372,8 +2365,17 @@ struct drm_i915_private {
*/
struct mutex av_mutex;
- uint32_t hw_context_size;
- struct list_head context_list;
+ struct {
+ struct list_head list;
+ u32 hw_size;
+
+ /* The hw wants to have a stable context identifier for the
+ * lifetime of the context (for OA, PASID, faults, etc).
+ * This is limited in execlists to 21 bits.
+ */
+ struct ida hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+ } contexts;
u32 fdi_rx_config;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 10f2d26cb2a9..fef212821994 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4784,7 +4784,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
if (err)
goto err_dependencies;
- INIT_LIST_HEAD(&dev_priv->context_list);
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
init_llist_head(&dev_priv->mm.free_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 5aab9f97385c..c0f3acece66a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -214,7 +214,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
list_del(&ctx->link);
- ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
+ ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
kfree(ctx);
}
@@ -270,7 +270,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
{
int ret;
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0) {
/* Contexts are only released when no longer active.
@@ -278,7 +278,7 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
* stale contexts and try again.
*/
i915_gem_retire_requests(dev_priv);
- ret = ida_simple_get(&dev_priv->context_hw_ida,
+ ret = ida_simple_get(&dev_priv->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -330,7 +330,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
}
kref_init(&ctx->ref);
- list_add_tail(&ctx->link, &dev_priv->context_list);
+ list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->vma_lut.ht_bits = VMA_HT_BITS;
@@ -344,11 +344,11 @@ __create_hw_context(struct drm_i915_private *dev_priv,
INIT_WORK(&ctx->vma_lut.resize, resize_vma_ht);
- if (dev_priv->hw_context_size) {
+ if (dev_priv->contexts.hw_size) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = alloc_context_obj(dev_priv, dev_priv->hw_context_size);
+ obj = alloc_context_obj(dev_priv, dev_priv->contexts.hw_size);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
goto err_lut;
@@ -511,6 +511,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
if (WARN_ON(dev_priv->kernel_context))
return 0;
+ INIT_LIST_HEAD(&dev_priv->contexts.list);
+
if (intel_vgpu_active(dev_priv) &&
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
if (!i915.enable_execlists) {
@@ -521,20 +523,20 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
/* Using the simple ida interface, the max is limited by sizeof(int) */
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
- ida_init(&dev_priv->context_hw_ida);
+ ida_init(&dev_priv->contexts.hw_ida);
if (i915.enable_execlists) {
/* NB: intentionally left blank. We will allocate our own
* backing objects as we need them, thank you very much */
- dev_priv->hw_context_size = 0;
+ dev_priv->contexts.hw_size = 0;
} else if (HAS_HW_CONTEXTS(dev_priv)) {
- dev_priv->hw_context_size =
+ dev_priv->contexts.hw_size =
round_up(get_context_size(dev_priv),
I915_GTT_PAGE_SIZE);
- if (dev_priv->hw_context_size > (1<<20)) {
+ if (dev_priv->contexts.hw_size > (1<<20)) {
DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
- dev_priv->hw_context_size);
- dev_priv->hw_context_size = 0;
+ dev_priv->contexts.hw_size);
+ dev_priv->contexts.hw_size = 0;
}
}
@@ -558,7 +560,7 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("%s context support initialized\n",
i915.enable_execlists ? "LR" :
- dev_priv->hw_context_size ? "HW" : "fake");
+ dev_priv->contexts.hw_size ? "HW" : "fake");
return 0;
}
@@ -583,7 +585,7 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv)
if (!i915.enable_execlists) {
struct i915_gem_context *ctx;
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
if (!i915_gem_context_is_default(ctx))
continue;
@@ -613,7 +615,7 @@ void i915_gem_context_fini(struct drm_i915_private *dev_priv)
context_close(dctx);
dev_priv->kernel_context = NULL;
- ida_destroy(&dev_priv->context_hw_ida);
+ ida_destroy(&dev_priv->contexts.hw_ida);
}
static int context_idr_cleanup(int id, void *p, void *data)
@@ -1023,7 +1025,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
static bool contexts_enabled(struct drm_device *dev)
{
- return i915.enable_execlists || to_i915(dev)->hw_context_size;
+ return i915.enable_execlists || to_i915(dev)->contexts.hw_size;
}
static bool client_is_banned(struct drm_i915_file_private *file_priv)
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index f3fdfda5e558..94929f7fb998 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -214,7 +214,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
/* NB: We defer the remapping until we switch to the context */
- list_for_each_entry(ctx, &dev_priv->context_list, link)
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link)
ctx->remap_slice |= (1<<slice);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c8f7c631fc1f..0596ad517273 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2016,7 +2016,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
* So to avoid that we reset the context images upon resume. For
* simplicity, we just zero everything out.
*/
- list_for_each_entry(ctx, &dev_priv->context_list, link) {
+ list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
for_each_engine(engine, dev_priv, id) {
struct intel_context *ce = &ctx->engine[engine->id];
u32 *reg;
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index f8b9cc212b02..243325b97d4c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -48,7 +48,7 @@ mock_context(struct drm_i915_private *i915,
if (!ctx->vma_lut.ht)
goto err_free;
- ret = ida_simple_get(&i915->context_hw_ida,
+ ret = ida_simple_get(&i915->contexts.hw_ida,
0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
if (ret < 0)
goto err_vma_ht;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 6a8258eacdcb..a356db346ac7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -156,7 +156,7 @@ struct drm_i915_private *mock_gem_device(void)
INIT_LIST_HEAD(&i915->mm.unbound_list);
INIT_LIST_HEAD(&i915->mm.bound_list);
- ida_init(&i915->context_hw_ida);
+ ida_init(&i915->contexts.hw_ida);
INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
--
2.11.0
More information about the Intel-gfx
mailing list