[Intel-gfx] [PATCH 35/66] drm/i915: Create VMAs (part 4) - Error capture
Ben Widawsky
ben at bwidawsk.net
Fri Jun 28 01:30:36 CEST 2013
Since the active/inactive lists are per VM, we need to modify the error
capture code to be aware of this, and also extend it to capture the
buffers from all the VMs. For now all the code assumes only 1 VM, but it
will become more generic over the next few patches.
NOTE: If the number of VMs in a real world system grows significantly
we'll have to focus on only capturing the guilty VM, or else it's likely
there won't be enough space for error capture.
Signed-off-by: Ben Widawsky <ben at bwidawsk.net>
---
drivers/gpu/drm/i915/i915_debugfs.c | 8 +--
drivers/gpu/drm/i915/i915_drv.h | 4 +-
drivers/gpu/drm/i915/i915_irq.c | 104 ++++++++++++++++++++++++------------
3 files changed, 77 insertions(+), 39 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index cf50389..7d01fb6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -842,13 +842,13 @@ static int i915_error_state(struct i915_error_state_file_priv *error_priv,
if (error->active_bo)
print_error_buffers(m, "Active",
- error->active_bo,
- error->active_bo_count);
+ error->active_bo[0],
+ error->active_bo_count[0]);
if (error->pinned_bo)
print_error_buffers(m, "Pinned",
- error->pinned_bo,
- error->pinned_bo_count);
+ error->pinned_bo[0],
+ error->pinned_bo_count[0]);
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
struct drm_i915_error_object *obj;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9042376..b0d1008 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -320,8 +320,8 @@ struct drm_i915_error_state {
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
- } *active_bo, *pinned_bo;
- u32 active_bo_count, pinned_bo_count;
+ } **active_bo, **pinned_bo;
+ u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 050eea3..b786fcd 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1607,7 +1607,11 @@ i915_error_state_free(struct kref *error_ref)
kfree(error->ring[i].requests);
}
+ /* FIXME: Assume always 1 VM for now */
+ kfree(error->active_bo[0]);
kfree(error->active_bo);
+ kfree(error->active_bo_count);
+ kfree(error->pinned_bo_count);
kfree(error->overlay);
kfree(error->display);
kfree(error);
@@ -1705,6 +1709,7 @@ static struct drm_i915_error_object *
i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
u32 seqno;
@@ -1727,20 +1732,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
}
seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, ggtt_list(active_list), mm_list) {
- if (obj->ring != ring)
- continue;
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
+ if (obj->ring != ring)
+ continue;
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
+ if (!(obj->base.read_domains & I915_GEM_DOMAIN_COMMAND))
+ continue;
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj);
+ /* We need to copy these to an anonymous buffer as the
+ * simplest method to avoid being overwritten by
+ * userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
}
return NULL;
@@ -1855,40 +1863,70 @@ static void i915_gem_record_rings(struct drm_device *dev,
}
}
-static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct i915_address_space *vm,
+ const int ndx)
{
+ struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
struct drm_i915_gem_object *obj;
int i;
i = 0;
- list_for_each_entry(obj, ggtt_list(active_list), mm_list)
+ list_for_each_entry(obj, &vm->active_list, mm_list)
i++;
- error->active_bo_count = i;
+ error->active_bo_count[ndx] = i;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
i++;
- error->pinned_bo_count = i - error->active_bo_count;
+ error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
if (i) {
- error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
- GFP_ATOMIC);
- if (error->active_bo)
- error->pinned_bo =
- error->active_bo + error->active_bo_count;
- }
-
- if (error->active_bo)
- error->active_bo_count =
- capture_active_bo(error->active_bo,
- error->active_bo_count,
- ggtt_list(active_list));
-
- if (error->pinned_bo)
- error->pinned_bo_count =
- capture_pinned_bo(error->pinned_bo,
- error->pinned_bo_count,
+ active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+ if (active_bo)
+ pinned_bo = active_bo + error->active_bo_count[ndx];
+ }
+
+ if (active_bo)
+ error->active_bo_count[ndx] =
+ capture_active_bo(active_bo,
+ error->active_bo_count[ndx],
+ &vm->active_list);
+
+ if (pinned_bo)
+ error->pinned_bo_count[ndx] =
+ capture_pinned_bo(pinned_bo,
+ error->pinned_bo_count[ndx],
&dev_priv->mm.bound_list);
+ error->active_bo[ndx] = active_bo;
+ error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm;
+ int cnt = 0;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ cnt++;
+
+ if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
+ cnt = 1;
+
+ vm = &dev_priv->gtt.base;
+
+ error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+ error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+ error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+ GFP_ATOMIC);
+ error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+ GFP_ATOMIC);
+
+ i915_gem_capture_vm(dev_priv, error, vm, 0);
}
/**
--
1.8.3.1
More information about the Intel-gfx
mailing list