[Intel-gfx] [RFC 4/5] drm/i915: Output the user virtual addresses for GFX buffers
Praveen Paneri
praveen.paneri at intel.com
Thu Mar 22 14:14:57 UTC 2018
From: Sourab Gupta <sourab.gupta at intel.com>
This patch adds the functionality to output the virtual addresses
of each GFX buffer, mapped into process address space (via interfaces
such as mmap and mmap_gtt).
Signed-off-by: Sourab Gupta <sourab.gupta at intel.com>
Signed-off-by: Akash Goel <akash.goel at intel.com>
Signed-off-by: Nidhi Gupta <nidhi1.gupta at intel.com>
Signed-off-by: Praveen Paneri <praveen.paneri at intel.com>
---
drivers/gpu/drm/i915/i915_gem.c | 112 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 107 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2cb6c98..25845652 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -85,10 +85,16 @@ struct pid_stat_entry {
int pid_num;
};
+struct drm_i915_obj_virt_addr {
+ struct list_head head;
+ unsigned long user_virt_addr;
+};
+
struct drm_i915_obj_pid_info {
struct list_head head;
pid_t tgid;
int open_handle_count;
+ struct list_head virt_addr_head;
};
struct drm_i915_gem_client_pid {
@@ -145,6 +151,9 @@ static void async_mmput(struct mm_struct *mm)
static void i915_gem_obj_remove_all_pids(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj);
static void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj);
+static int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj,
+ unsigned long addr, bool is_map_gtt,
+ bool is_mutex_locked);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
@@ -1839,6 +1848,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
struct drm_i915_gem_mmap *args = data;
struct drm_i915_gem_object *obj;
unsigned long addr;
+ int ret;
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
@@ -1884,6 +1894,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
if (IS_ERR((void *)addr))
return addr;
+ ret = i915_obj_insert_virt_addr(obj, addr, false, false);
+ if (ret)
+ return ret;
+
args->addr_ptr = (uint64_t) addr;
return 0;
@@ -2085,6 +2099,10 @@ int i915_gem_fault(struct vm_fault *vmf)
list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
GEM_BUG_ON(!obj->userfault_count);
+ ret = i915_obj_insert_virt_addr(obj,
+ (unsigned long)area->vm_start,
+ true, true);
+
i915_vma_set_ggtt_write(vma);
err_fence:
@@ -6298,6 +6316,7 @@ static int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj)
}
entry->tgid = current_tgid;
entry->open_handle_count = 1;
+ INIT_LIST_HEAD(&entry->virt_addr_head);
list_add_tail(&entry->head, &obj->pid_info);
}
@@ -6309,6 +6328,7 @@ static void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj)
{
pid_t current_tgid = task_tgid_nr(current);
struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+ struct drm_i915_obj_virt_addr *virt_entry, *virt_next;
int found = 0;
if (!i915_modparams.memtrack_debug)
@@ -6321,6 +6341,13 @@ static void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj)
pid_entry->open_handle_count--;
found = 1;
if (pid_entry->open_handle_count == 0) {
+ list_for_each_entry_safe(virt_entry,
+ virt_next,
+ &pid_entry->virt_addr_head,
+ head) {
+ list_del(&virt_entry->head);
+ kfree(virt_entry);
+ }
list_del(&pid_entry->head);
kfree(pid_entry);
}
@@ -6338,9 +6365,18 @@ static void i915_gem_obj_remove_all_pids(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj)
{
struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+ struct drm_i915_obj_virt_addr *virt_entry, *virt_next;
mutex_lock(&i915->drm.struct_mutex);
list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) {
+ list_for_each_entry_safe(virt_entry,
+ virt_next,
+ &pid_entry->virt_addr_head,
+ head) {
+ list_del(&virt_entry->head);
+ kfree(virt_entry);
+ }
+
/* Remove object from that pid's client list */
while (pid_entry->open_handle_count--)
i915_gem_client_remove(i915, pid_entry->tgid, true);
@@ -6351,6 +6387,61 @@ static void i915_gem_obj_remove_all_pids(struct drm_i915_private *i915,
mutex_unlock(&i915->drm.struct_mutex);
}
+static int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj,
+ unsigned long addr,
+ bool is_map_gtt,
+ bool is_mutex_locked)
+{
+ struct drm_i915_obj_pid_info *pid_entry;
+ pid_t current_tgid = task_tgid_nr(current);
+ int ret = 0, found = 0;
+
+ if (!i915_modparams.memtrack_debug)
+ return 0;
+
+ if (is_map_gtt)
+ addr |= 1;
+
+ if (!is_mutex_locked) {
+ ret = i915_mutex_lock_interruptible(obj->base.dev);
+ if (ret)
+ return ret;
+ }
+
+ list_for_each_entry(pid_entry, &obj->pid_info, head) {
+ if (pid_entry->tgid == current_tgid) {
+ struct drm_i915_obj_virt_addr *virt_entry, *new_entry;
+
+ list_for_each_entry(virt_entry,
+ &pid_entry->virt_addr_head,
+ head) {
+ if (virt_entry->user_virt_addr == addr) {
+ found = 1;
+ break;
+ }
+ }
+ if (found)
+ break;
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (new_entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_entry->user_virt_addr = addr;
+ list_add_tail(&new_entry->head,
+ &pid_entry->virt_addr_head);
+ break;
+ }
+ }
+
+out:
+ if (!is_mutex_locked)
+ mutex_unlock(&obj->base.dev->struct_mutex);
+
+ return ret;
+}
+
static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj,
struct pid_stat_entry *pid_entry,
bool *found)
@@ -6421,6 +6512,7 @@ static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
{
struct i915_vma *vma;
struct drm_i915_obj_pid_info *pid_info_entry;
+ struct drm_i915_obj_virt_addr *virt_entry;
struct drm_i915_error_state_buf *m = obj_stat_buf->m;
struct pid_stat_entry *pid_entry = obj_stat_buf->entry;
struct per_file_obj_mem_info *stats = &pid_entry->stats;
@@ -6475,12 +6567,22 @@ static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
i915_error_printf(m, " %08llx ", vma->node.start);
}
if (list_empty(&obj->vma_list))
- err_puts(m, " ");
-
- list_for_each_entry(pid_info_entry, &obj->pid_info, head)
- i915_error_printf(m, " (%d: %d)",
+ err_puts(m, " ");
+ list_for_each_entry(pid_info_entry, &obj->pid_info, head) {
+ err_printf(m, " (%d: %d:",
pid_info_entry->tgid,
pid_info_entry->open_handle_count);
+ list_for_each_entry(virt_entry,
+ &pid_info_entry->virt_addr_head, head) {
+ if (virt_entry->user_virt_addr & 1)
+ err_printf(m, " %p",
+ (void *)(virt_entry->user_virt_addr & ~1));
+ else
+ err_printf(m, " %p*",
+ (void *)virt_entry->user_virt_addr);
+ }
+ err_puts(m, ") ");
+ }
err_puts(m, "\n");
@@ -6792,7 +6894,7 @@ static int i915_gem_object_pid_order(int id, void *ptr, void *data)
i915_error_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " ");
err_puts(m,
- "\n Obj Identifier Obj-Size Resident-Size Pin Tiling Dirty Shared Vmap Stolen Mappable AllocState Global/PP GttOffset (PID: handle count)\n");
+ "\n Obj Identifier Obj-Size Resident-Size Pin Tiling Dirty Shared Vmap Stolen Mappable AllocState Global/PP GttOffset (PID: handle count: user virt addrs)\n");
list_for_each_entry(file, &dev->filelist, lhead) {
struct get_obj_stats_buf obj_stat_buf;
--
1.9.1
More information about the Intel-gfx
mailing list