[Intel-gfx] [RFC 1/5] drm/i915: Sysfs interface to get GFX shmem usage stats per process
Praveen Paneri
praveen.paneri at intel.com
Thu Mar 22 14:14:54 UTC 2018
From: Sourab Gupta <sourab.gupta at intel.com>
There's a need for an interface which provide information about GFX
memory usage per process, at any instantaneous point of time.
Such information is useful to analyze GFX memory usage of the processes,
on the lines of other system memory usage tools (e.g. procrank).
This is especially useful for the GFX intensive processes, since the
tools such as procrank may not be able to provide a precise view of
GFX memory consumption, with its specific intricacies (such as
differentiation between private, shared and purgeable buffers).
The patch provides a sysfs interface to list GFX memory usage for
each client process. The interface also provides GFX buffers/memory
usage information categorized into various buckets according to buffer
type such as purgeable, private, shared.
v2:
-Move from per drm file accounting to per process accounting to
address shared drm fd cases (Daniel)
-Use div64_u64 for 64 bit divisions (Sagar)
Signed-off-by: Sourab Gupta <sourab.gupta at intel.com>
Signed-off-by: Akash Goel <akash.goel at intel.com>
Signed-off-by: Nidhi Gupta <nidhi1.gupta at intel.com>
Signed-off-by: Praveen Paneri <praveen.paneri at intel.com>
---
drivers/gpu/drm/i915/i915_drv.c | 1 +
drivers/gpu/drm/i915/i915_drv.h | 4 +
drivers/gpu/drm/i915/i915_gem.c | 548 +++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/i915_gem_object.h | 1 +
drivers/gpu/drm/i915/i915_gpu_error.c | 9 +-
drivers/gpu/drm/i915/i915_gpu_error.h | 4 +
drivers/gpu/drm/i915/i915_params.c | 2 +
drivers/gpu/drm/i915/i915_params.h | 3 +-
drivers/gpu/drm/i915/i915_sysfs.c | 78 +++++
9 files changed, 647 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a7d3275..06c0901 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2847,6 +2847,7 @@ static int intel_runtime_resume(struct device *kdev)
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
+ .gem_open_object = i915_gem_open_object,
.gem_close_object = i915_gem_close_object,
.gem_free_object_unlocked = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c9c3b2b..c23ba28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1660,6 +1660,7 @@ struct drm_i915_private {
bool preserve_bios_swizzle;
+ struct kobject memtrack_kobj;
/* overlay */
struct intel_overlay *overlay;
@@ -2884,6 +2885,7 @@ struct drm_i915_gem_object *
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size);
+int i915_gem_open_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);
@@ -3352,6 +3354,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
+int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev);
/* intel_lpe_audio.c */
int intel_lpe_audio_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 802df8e..49555ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -45,8 +45,83 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/sched/mm.h>
+
+/**< Size of key hash table. Must be power of 2. */
+#define DRM_DEBUG_MAGIC_HASH_ORDER 4
+
+struct per_file_obj_mem_info {
+ int num_obj;
+ int num_obj_shared;
+ int num_obj_private;
+ int num_obj_gtt_bound;
+ int num_obj_purged;
+ int num_obj_purgeable;
+ int num_obj_allocated;
+ int num_obj_fault_mappable;
+ int num_obj_stolen;
+ size_t gtt_space_allocated_shared;
+ size_t gtt_space_allocated_priv;
+ size_t phys_space_allocated_shared;
+ size_t phys_space_allocated_priv;
+ size_t phys_space_purgeable;
+ size_t phys_space_shared_proportion;
+ size_t fault_mappable_size;
+ size_t stolen_space_allocated;
+ char *process_name;
+};
+
+struct name_entry {
+ struct list_head head;
+ struct drm_hash_item hash_item;
+};
+
+struct pid_stat_entry {
+ struct list_head head;
+ struct list_head namefree;
+ struct drm_open_hash namelist;
+ struct per_file_obj_mem_info stats;
+ struct pid *tgid;
+ int pid_num;
+};
+
+struct drm_i915_obj_pid_info {
+ struct list_head head;
+ pid_t tgid;
+ int open_handle_count;
+};
+
+struct get_obj_stats_buf {
+ struct pid_stat_entry *entry;
+ struct drm_i915_error_state_buf *m;
+};
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+/*
+ * If this mmput call is the last one, it will tear down the mmaps of the
+ * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex.
+ * Instead, asynchronously schedule mmput function here, to avoid recursive
+ * calls to acquire i915_mutex.
+ */
+static void async_mmput_func(void *data, async_cookie_t cookie)
+{
+ struct mm_struct *mm = data;
+
+ mmput(mm);
+}
+
+static void async_mmput(struct mm_struct *mm)
+{
+ async_schedule(async_mmput_func, mm);
+}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
+static int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj);
+static int i915_get_pid_cmdline(struct task_struct *task, char *buffer);
+static void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj);
+static void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
@@ -3553,6 +3628,13 @@ static void __sleep_rcu(struct rcu_head *rcu)
}
}
+int i915_gem_open_object(struct drm_gem_object *gem, struct drm_file *file)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(gem);
+
+ return i915_gem_obj_insert_pid(obj);
+}
+
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_private *i915 = to_i915(gem->dev);
@@ -3588,6 +3670,9 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
}
mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_obj_remove_pid(obj);
+
}
static unsigned long to_wait_timeout(s64 timeout_ns)
@@ -4582,6 +4667,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
obj->mm.madv = I915_MADV_WILLNEED;
+ INIT_LIST_HEAD(&obj->pid_info);
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_page.lock);
@@ -4760,6 +4846,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
GEM_BUG_ON(!list_empty(&obj->lut_list));
+ i915_gem_obj_remove_all_pids(obj);
+
if (obj->ops->release)
obj->ops->release(obj);
@@ -6004,3 +6092,463 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
#include "selftests/i915_gem_object.c"
#include "selftests/i915_gem_coherency.c"
#endif
+
+static int i915_get_pid_cmdline(struct task_struct *task, char *buffer)
+{
+ int res = 0;
+ unsigned int len;
+ struct mm_struct *mm = get_task_mm(task);
+
+ if (!mm)
+ goto out;
+ if (!mm->arg_end)
+ goto out_mm;
+
+ len = mm->arg_end - mm->arg_start;
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+ if (res < 0) {
+ async_mmput(mm);
+ return res;
+ }
+
+ if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE)
+ buffer[res-1] = '\0';
+out_mm:
+ async_mmput(mm);
+out:
+ return 0;
+}
+
+static unsigned long
+i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj)
+{
+ unsigned long ret;
+
+ if (obj->base.filp) {
+ struct inode *inode = file_inode(obj->base.filp);
+ struct shmem_inode_info *info = SHMEM_I(inode);
+
+ if (!inode)
+ return 0;
+ spin_lock(&info->lock);
+ ret = inode->i_mapping->nrpages;
+ spin_unlock(&info->lock);
+ return ret;
+ }
+ return 0;
+}
+
+static int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj)
+{
+ int found = 0;
+ struct drm_i915_obj_pid_info *entry;
+ pid_t current_tgid = task_tgid_nr(current);
+
+ if (!i915_modparams.memtrack_debug)
+ return 0;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ list_for_each_entry(entry, &obj->pid_info, head) {
+ if (entry->tgid == current_tgid) {
+ entry->open_handle_count++;
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return -ENOMEM;
+ }
+ entry->tgid = current_tgid;
+ entry->open_handle_count = 1;
+ list_add_tail(&entry->head, &obj->pid_info);
+ }
+
+ mutex_unlock(&obj->base.dev->struct_mutex);
+ return 0;
+}
+
+static void i915_gem_obj_remove_pid(struct drm_i915_gem_object *obj)
+{
+ pid_t current_tgid = task_tgid_nr(current);
+ struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+ int found = 0;
+
+ if (!i915_modparams.memtrack_debug)
+ return;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
+ list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) {
+ if (pid_entry->tgid == current_tgid) {
+ pid_entry->open_handle_count--;
+ found = 1;
+ if (pid_entry->open_handle_count == 0) {
+ list_del(&pid_entry->head);
+ kfree(pid_entry);
+ }
+ break;
+ }
+ }
+ mutex_unlock(&obj->base.dev->struct_mutex);
+
+ if (found == 0)
+ DRM_DEBUG("Couldn't find matching tgid %d for obj %p\n",
+ current_tgid, obj);
+}
+
+static void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_obj_pid_info *pid_entry, *pid_next;
+
+ list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) {
+ list_del(&pid_entry->head);
+ kfree(pid_entry);
+ }
+}
+
+static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj,
+ struct pid_stat_entry *pid_entry,
+ bool *found)
+{
+ struct drm_hash_item *hash_item;
+ int ret;
+
+ ret = drm_ht_find_item(&pid_entry->namelist,
+ (unsigned long)&obj->base, &hash_item);
+ /* Not found, insert in hash */
+ if (ret) {
+ struct name_entry *entry =
+ kzalloc(sizeof(*entry), GFP_NOWAIT);
+ if (entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ return -ENOMEM;
+ }
+ entry->hash_item.key = (unsigned long)&obj->base;
+ drm_ht_insert_item(&pid_entry->namelist,
+ &entry->hash_item);
+ list_add_tail(&entry->head, &pid_entry->namefree);
+ *found = false;
+ } else
+ *found = true;
+
+ return 0;
+}
+
+static int i915_obj_shared_count(struct drm_i915_gem_object *obj,
+ struct pid_stat_entry *pid_entry,
+ bool *discard)
+{
+ struct drm_i915_obj_pid_info *pid_info_entry;
+ int ret, obj_shared_count = 0;
+
+ /*
+ * The object can be shared among different processes by either flink
+ * or dma-buf mechanism, leading to shared count more than 1. For the
+ * objects not shared , return the shared count as 1.
+ * In case of shared dma-buf objects, there's a possibility that these
+ * may be external to i915. Detect this condition through
+ * 'import_attach' field.
+ */
+ if (!obj->base.name && !obj->base.dma_buf)
+ return 1;
+ else if (obj->base.import_attach) {
+ /* not our GEM obj */
+ *discard = true;
+ return 0;
+ }
+
+ ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(pid_info_entry, &obj->pid_info, head)
+ obj_shared_count++;
+
+ if (WARN_ON(obj_shared_count == 0))
+ return -EINVAL;
+
+ return obj_shared_count;
+}
+
+static int
+i915_drm_gem_obj_per_process_summary(struct drm_i915_gem_object *obj,
+ struct pid_stat_entry *pid_entry)
+{
+ struct per_file_obj_mem_info *stats = &pid_entry->stats;
+ int obj_shared_count = 0;
+ bool discard = false;
+
+ stats->num_obj++;
+
+ obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard);
+ if (obj_shared_count < 0)
+ return obj_shared_count;
+
+ if (discard)
+ return 0;
+
+ if (obj_shared_count > 1)
+ stats->num_obj_shared++;
+ else
+ stats->num_obj_private++;
+
+ if (obj->bind_count) {
+ stats->num_obj_gtt_bound++;
+ if (obj_shared_count > 1)
+ stats->gtt_space_allocated_shared += obj->base.size;
+ else
+ stats->gtt_space_allocated_priv += obj->base.size;
+ }
+
+ if (obj->stolen) {
+ stats->num_obj_stolen++;
+ stats->stolen_space_allocated += obj->base.size;
+ } else if (obj->mm.madv == __I915_MADV_PURGED) {
+ stats->num_obj_purged++;
+ } else {
+ u64 nr_bytes =
+ i915_obj_get_shmem_pages_alloced(obj)*PAGE_SIZE;
+
+ if (obj->mm.madv == I915_MADV_DONTNEED) {
+ stats->num_obj_purgeable++;
+ if (nr_bytes != 0)
+ stats->phys_space_purgeable += nr_bytes;
+ }
+
+ if (nr_bytes != 0) {
+ stats->num_obj_allocated++;
+ if (obj_shared_count > 1) {
+ stats->phys_space_allocated_shared += nr_bytes;
+ stats->phys_space_shared_proportion +=
+ div64_u64(nr_bytes, obj_shared_count);
+ } else
+ stats->phys_space_allocated_priv += nr_bytes;
+ }
+ }
+
+ if (!list_empty(&obj->userfault_link)) {
+ stats->num_obj_fault_mappable++;
+ stats->fault_mappable_size += obj->base.size;
+ }
+
+ return 0;
+}
+
+static int i915_gem_object_pid_order(int id, void *ptr, void *data)
+{
+ struct drm_i915_gem_object *obj = ptr;
+ struct list_head *per_pid_stats = data;
+ struct pid_stat_entry *pid_entry;
+ struct drm_i915_obj_pid_info *pid_info;
+ int ret = 0;
+
+ list_for_each_entry(pid_info, &obj->pid_info, head) {
+ int pid_num = pid_info->tgid;
+ int found = 0;
+
+ list_for_each_entry(pid_entry, per_pid_stats, head) {
+ if (pid_entry->pid_num == pid_num) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ char *process_name;
+ struct task_struct *task;
+ struct pid_stat_entry *new_entry =
+ kzalloc(sizeof(*new_entry), GFP_KERNEL);
+ if (new_entry == NULL) {
+ DRM_ERROR("alloc failed\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ new_entry->pid_num = pid_num;
+ new_entry->tgid = find_get_pid(pid_num);
+
+ ret = drm_ht_create(&new_entry->namelist,
+ DRM_DEBUG_MAGIC_HASH_ORDER);
+ if (ret) {
+ kfree(new_entry);
+ break;
+ }
+
+ list_add_tail(&new_entry->head, per_pid_stats);
+ INIT_LIST_HEAD(&new_entry->namefree);
+
+ process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (!process_name) {
+ kfree(new_entry);
+ ret = -ENOMEM;
+ break;
+ }
+
+ task = get_pid_task(new_entry->tgid, PIDTYPE_PID);
+ ret = i915_get_pid_cmdline(task, process_name);
+ if (ret) {
+ kfree(new_entry);
+ kfree(process_name);
+ break;
+ }
+ new_entry->stats.process_name = process_name;
+ pid_entry = new_entry;
+ }
+ /* Add this entry into per process accounting now */
+ i915_drm_gem_obj_per_process_summary(obj, pid_entry);
+
+ }
+
+ return ret;
+}
+
+static int
+__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev)
+{
+ struct drm_file *file;
+
+ struct name_entry *entry, *next;
+ struct pid_stat_entry *pid_entry, *temp_entry;
+ struct pid_stat_entry *new_pid_entry, *new_temp_entry;
+ struct list_head per_pid_stats, sorted_pid_stats;
+ int ret = 0;
+ size_t total_shared_prop_space = 0, total_priv_space = 0;
+
+ INIT_LIST_HEAD(&per_pid_stats);
+ INIT_LIST_HEAD(&sorted_pid_stats);
+
+ err_puts(m,
+ "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n");
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ spin_lock(&file->table_lock);
+ ret = idr_for_each(&file->object_idr,
+ &i915_gem_object_pid_order,
+ &per_pid_stats);
+ spin_unlock(&file->table_lock);
+ if (ret)
+ break;
+ }
+
+ list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) {
+ if (list_empty(&sorted_pid_stats)) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head, &sorted_pid_stats);
+ continue;
+ }
+
+ list_for_each_entry_safe(new_pid_entry, new_temp_entry,
+ &sorted_pid_stats, head) {
+ int prev_space =
+ pid_entry->stats.phys_space_shared_proportion +
+ pid_entry->stats.phys_space_allocated_priv;
+ int new_space =
+ new_pid_entry->
+ stats.phys_space_shared_proportion +
+ new_pid_entry->stats.phys_space_allocated_priv;
+ if (prev_space > new_space) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head,
+ &new_pid_entry->head);
+ break;
+ }
+ if (list_is_last(&new_pid_entry->head,
+ &sorted_pid_stats)) {
+ list_del(&pid_entry->head);
+ list_add_tail(&pid_entry->head,
+ &sorted_pid_stats);
+ }
+ }
+ }
+
+ list_for_each_entry_safe(pid_entry, temp_entry,
+ &sorted_pid_stats, head) {
+ struct task_struct *task = get_pid_task(pid_entry->tgid,
+ PIDTYPE_PID);
+ err_printf(m,
+ "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s",
+ pid_entry->pid_num,
+ pid_entry->stats.num_obj,
+ pid_entry->stats.num_obj_shared,
+ pid_entry->stats.num_obj_private,
+ pid_entry->stats.num_obj_purgeable,
+ pid_entry->stats.num_obj_allocated,
+ pid_entry->stats.phys_space_allocated_shared/1024,
+ pid_entry->stats.phys_space_shared_proportion/1024,
+ pid_entry->stats.phys_space_allocated_priv/1024,
+ pid_entry->stats.phys_space_purgeable/1024,
+ pid_entry->stats.process_name);
+
+ if (task == NULL)
+ err_puts(m, "*\n");
+ else
+ err_puts(m, "\n");
+
+ total_shared_prop_space +=
+ pid_entry->stats.phys_space_shared_proportion/1024;
+ total_priv_space +=
+ pid_entry->stats.phys_space_allocated_priv/1024;
+ list_del(&pid_entry->head);
+
+ list_for_each_entry_safe(entry, next,
+ &pid_entry->namefree, head) {
+ list_del(&entry->head);
+ drm_ht_remove_item(&pid_entry->namelist,
+ &entry->hash_item);
+ kfree(entry);
+ }
+ drm_ht_remove(&pid_entry->namelist);
+ kfree(pid_entry->stats.process_name);
+ kfree(pid_entry);
+ if (task)
+ put_task_struct(task);
+ }
+
+ err_puts(m,
+ "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n");
+ err_printf(m,
+ "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n",
+ total_shared_prop_space, total_priv_space);
+
+ if (ret)
+ return ret;
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev)
+{
+ int ret = 0;
+
+ /*
+ * Protect the access to global drm resources such as filelist. Protect
+ * against their removal under our noses, while in use.
+ * XXX: drm_global_mutex is undefined currently
+ */
+ /* mutex_lock(&drm_global_mutex); */
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ /* mutex_unlock(&drm_global_mutex); */
+ return ret;
+ }
+
+ ret = __i915_get_drm_clients_info(m, dev);
+
+ mutex_unlock(&dev->struct_mutex);
+ /* mutex_unlock(&drm_global_mutex); */
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index 54f00b3..8176d7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -279,6 +279,7 @@ struct drm_i915_gem_object {
void *gvt_info;
};
+ struct list_head pid_info;
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index effaf98..3e8fc79 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -90,6 +90,11 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
return true;
}
+bool i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+ return __i915_error_ok(e);
+}
+
static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
unsigned len)
{
@@ -161,8 +166,8 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
__i915_error_advance(e, len);
}
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
+void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str)
{
unsigned len;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ac57606..dac2a35 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -308,6 +308,10 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos);
+void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str);
+bool i915_error_ok(struct drm_i915_error_state_buf *e);
+
static inline void
i915_error_state_buf_release(struct drm_i915_error_state_buf *eb)
{
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 08108ce..746d45c 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -178,6 +178,8 @@ struct i915_params i915_modparams __read_mostly = {
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
+i915_param_named(memtrack_debug, bool, 0600, "use memtrack capability (default:true)");
+
static __always_inline void _print_param(struct drm_printer *p,
const char *name,
const char *type,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index c963603..40cd3ab 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -69,7 +69,8 @@
param(bool, nuclear_pageflip, false) \
param(bool, enable_dp_mst, true) \
param(bool, enable_dpcd_backlight, false) \
- param(bool, enable_gvt, false)
+ param(bool, enable_gvt, false) \
+ param(bool, memtrack_debug, true)
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e5e6f6b..d35c789 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -552,6 +552,36 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
return count;
}
+static ssize_t i915_gem_clients_state_read(struct file *filp,
+ struct kobject *memtrack_kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct kobject *kobj = memtrack_kobj->parent;
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = dev_get_drvdata(kdev);
+ struct drm_device *dev = minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
+ if (ret)
+ return ret;
+
+ ret = i915_get_drm_clients_info(&error_str, dev);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
static const struct bin_attribute error_state_attr = {
.attr.name = "error",
@@ -560,11 +590,51 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
.read = error_state_read,
.write = error_state_write,
};
+static struct bin_attribute i915_gem_client_state_attr = {
+ .attr.name = "i915_gem_meminfo",
+ .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH,
+ .size = 0,
+ .read = i915_gem_clients_state_read,
+};
+
+static struct attribute *memtrack_kobj_attrs[] = {NULL};
+
+static struct kobj_type memtrack_kobj_type = {
+ .release = NULL,
+ .sysfs_ops = NULL,
+ .default_attrs = memtrack_kobj_attrs,
+};
static void i915_setup_error_capture(struct device *kdev)
{
+ int ret;
+
if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
DRM_ERROR("error_state sysfs setup failed\n");
+
+ if (i915_modparams.memtrack_debug) {
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ /*
+ * Create the gfx_memtrack directory for memtrack sysfs files
+ */
+ ret = kobject_init_and_add(
+ &dev_priv->memtrack_kobj, &memtrack_kobj_type,
+ &kdev->kobj, "gfx_memtrack");
+ if (unlikely(ret != 0)) {
+ DRM_ERROR(
+ "i915 sysfs setup memtrack directory failed\n"
+ );
+ kobject_put(&dev_priv->memtrack_kobj);
+ } else {
+ ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj,
+ &i915_gem_client_state_attr);
+ if (ret)
+ DRM_ERROR(
+ "i915_gem_client_state sysfs setup failed\n"
+ );
+ }
+ }
}
static void i915_teardown_error_capture(struct device *kdev)
@@ -641,4 +711,12 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
#endif
+ if (i915_modparams.memtrack_debug) {
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ sysfs_remove_bin_file(&dev_priv->memtrack_kobj,
+ &i915_gem_client_state_attr);
+ kobject_del(&dev_priv->memtrack_kobj);
+ kobject_put(&dev_priv->memtrack_kobj);
+ }
}
--
1.9.1
More information about the Intel-gfx
mailing list