[Intel-gfx] [PATCH 154/190] drm/i915: Move per-request pid from request to ctx
Chris Wilson
chris at chris-wilson.co.uk
Mon Jan 11 03:00:55 PST 2016
Since contexts are not currently shared between userspace processes, we
have an exact correspondence between context creator and guilty batch
submitter. Therefore we can save some per-batch work by inspecting the
context->pid upon error instead. Note that we take the context's
creator's pid rather than the file's pid in order to better track fd
passed over sockets.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_debugfs.c | 21 ++++++++++++---------
drivers/gpu/drm/i915/i915_drv.h | 2 ++
drivers/gpu/drm/i915/i915_gem_context.c | 5 +++++
drivers/gpu/drm/i915/i915_gem_request.c | 5 -----
drivers/gpu/drm/i915/i915_gem_request.h | 3 ---
drivers/gpu/drm/i915/i915_gpu_error.c | 13 ++++++++++---
6 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f15ed7793969..4cd05b730b4c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -480,6 +480,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
print_batch_pool_stats(m, dev_priv);
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_request *request;
struct task_struct *task;
memset(&stats, 0, sizeof(stats));
@@ -493,8 +495,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
+ request = list_first_entry_or_null(&file_priv->mm.request_list,
+ struct drm_i915_gem_request,
+ client_list);
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
+ task = pid_task(request && request->ctx->pid ?
+ request->ctx->pid : file->pid,
+ PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
}
@@ -681,12 +688,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
seq_printf(m, "%s requests: %d\n", ring->name, count);
list_for_each_entry(req, &ring->request_list, link) {
+ struct pid *pid = req->ctx->pid;
struct task_struct *task;
rcu_read_lock();
- task = NULL;
- if (req->pid)
- task = pid_task(req->pid, PIDTYPE_PID);
+ task = pid ? pid_task(pid, PIDTYPE_PID) : NULL;
seq_printf(m, " %x @ %d: %s [%d]\n",
req->fence.seqno,
(int) (jiffies - req->emitted_jiffies),
@@ -1953,13 +1959,10 @@ static int i915_context_status(struct seq_file *m, void *unused)
continue;
seq_puts(m, "HW context ");
- if (IS_ERR(ctx->file_priv)) {
- seq_puts(m, "(deleted) ");
- } else if (ctx->file_priv) {
- struct pid *pid = ctx->file_priv->file->pid;
+ if (ctx->pid) {
struct task_struct *task;
- task = get_pid_task(pid, PIDTYPE_PID);
+ task = get_pid_task(ctx->pid, PIDTYPE_PID);
if (task) {
seq_printf(m, "(%s [%d]) ",
task->comm, task->pid);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 84693d4c4e52..dcff2f2066d0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -565,6 +565,7 @@ struct drm_i915_error_state {
struct drm_i915_error_request {
long jiffies;
+ pid_t pid;
u32 seqno;
u32 head;
u32 tail;
@@ -878,6 +879,7 @@ struct intel_context {
struct drm_i915_file_private *file_priv;
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
+ struct pid *pid;
unsigned flags;
#define CONTEXT_NO_ZEROMAP (1<<0)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 0a5f1d5fa788..b57112db1c3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -147,6 +147,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (ctx->legacy_hw_ctx.rcs_state)
drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base);
+
+ put_pid(ctx->pid);
list_del(&ctx->link);
kfree(ctx);
}
@@ -256,6 +258,9 @@ __create_hw_context(struct drm_device *dev,
ret = DEFAULT_CONTEXT_HANDLE;
ctx->file_priv = file_priv;
+ if (file_priv)
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+
ctx->user_handle = ret;
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 74be71e7d113..d922b78614bd 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -298,8 +298,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
- req->pid = get_pid(task_pid(current));
-
return 0;
}
@@ -315,9 +313,6 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- put_pid(request->pid);
- request->pid = NULL;
}
static void __i915_gem_request_release(struct drm_i915_gem_request *request)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index d87136edf117..67bc1f919af0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -86,9 +86,6 @@ struct drm_i915_gem_request {
/** file_priv list entry for this request */
struct list_head client_list;
- /** process identifier submitting this request */
- struct pid *pid;
-
/** Execlist link in the submission queue.*/
struct list_head execlist_link; /* guarded by engine->execlist_lock */
};
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 494dee1f724d..f3c428d5627b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -457,7 +457,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
dev_priv->ring[i].name,
error->ring[i].num_requests);
for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, head 0x%08x tail 0x%08x\n",
+ err_printf(m, " pid %d, seqno 0x%08x, emitted %ld, head 0x%08x tail 0x%08x\n",
+ error->ring[i].requests[j].pid,
error->ring[i].requests[j].seqno,
error->ring[i].requests[j].jiffies,
error->ring[i].requests[j].head,
@@ -983,6 +984,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
if (request) {
struct i915_address_space *vm;
struct intel_ring *ring;
+ struct pid *pid;
vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
@@ -1002,11 +1004,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
i915_error_object_create(dev_priv,
engine->scratch.vma);
- if (request->pid) {
+ pid = request->ctx->pid;
+ if (pid) {
struct task_struct *task;
rcu_read_lock();
- task = pid_task(request->pid, PIDTYPE_PID);
+ task = pid_task(pid, PIDTYPE_PID);
if (task) {
strcpy(error->ring[i].comm, task->comm);
error->ring[i].pid = task->pid;
@@ -1070,6 +1073,10 @@ static void i915_gem_record_rings(struct drm_device *dev,
erq->jiffies = request->emitted_jiffies;
erq->head = request->head;
erq->tail = request->tail;
+
+ rcu_read_lock();
+ erq->pid = request->ctx ? pid_nr(request->ctx->pid) : 0;
+ rcu_read_unlock();
}
}
}
--
2.7.0.rc3
More information about the Intel-gfx
mailing list