[Intel-gfx] [PATCH] drm/i915: Sysfs interface to get GFX shmem usage stats per process

Daniel Vetter daniel at ffwll.ch
Thu Aug 14 16:25:30 CEST 2014


On Thu, Aug 14, 2014 at 04:50:19PM +0300, Jani Nikula wrote:
> On Thu, 14 Aug 2014, sourab.gupta at intel.com wrote:
> > From: Sourab Gupta <sourab.gupta at intel.com>
> >
> > Currently the Graphics Driver provides an interface through which
> > one can get a snapshot of the overall Graphics memory consumption.
> > Also there is an interface available, which provides information
> > about the several memory related attributes of every single Graphics
> > buffer created by the various clients.
> >
> > There is a requirement of a new interface for achieving below
> > functionalities:
> > 1) Need to provide Client based detailed information about the
> > distribution of Graphics memory
> > 2) Need to provide an interface which can provide info about the
> > sharing of Graphics buffers between the clients.
> >
> > The client based interface would also aid in debugging of
> > memory usage/consumption by each client & debug memleak related issues.
> >
> > With this new interface,
> > 1) In case of memleak scenarios, we can easily zero in on the culprit
> > client which is unexpectedly holding on the Graphics buffers for an
> > inordinate amount of time.
> > 2) We can get an estimate of the instantaneous memory footprint of
> > every Graphics client.
> > 3) We can now trace all the processes sharing a particular Graphics buffer.
> >
> > By means of this patch we try to provide a sysfs interface to achieve
> > the mentioned functionalities.
> >
> > There are two files created in sysfs:
> > 'i915_gem_meminfo' will provide summary of the graphics resources used by
> > each graphics client.
> > 'i915_gem_objinfo' will provide detailed view of each object created by
> > individual clients.
> 
> Why sysfs instead of debugfs?
> 
> Please run your patch through checkpatch and fix the issues before
> sending v2.

There's the general issue really that currently gem memory isn't accounted
at all correctly. So I think if we want this for production usage (i.e. a
real interface as you propose it here for sysfs) and not just something in
debugfs, then I think we need to look at the entire picture. So not just a
way to see how much gem/shmem memory is used, but also a way to limit it.
And preferrably integrated into the core mm tracking.

No, I haven't thought through the details yet - we might need to sit
together with some core mm hackers for that.
-Daniel

> 
> BR,
> Jani.
> 
> 
> >
> > Signed-off-by: Sourab Gupta <sourab.gupta at intel.com>
> > Signed-off-by: Akash Goel <akash.goel at intel.com>
> > ---
> >  drivers/gpu/drm/i915/i915_dma.c        |   1 +
> >  drivers/gpu/drm/i915/i915_drv.c        |   2 +
> >  drivers/gpu/drm/i915/i915_drv.h        |  18 ++
> >  drivers/gpu/drm/i915/i915_gem.c        | 115 +++++++++++
> >  drivers/gpu/drm/i915/i915_gem_debug.c  | 366 +++++++++++++++++++++++++++++++++
> >  drivers/gpu/drm/i915/i915_gem_stolen.c |   2 +
> >  drivers/gpu/drm/i915/i915_gpu_error.c  |   2 +-
> >  drivers/gpu/drm/i915/i915_sysfs.c      | 107 ++++++++++
> >  8 files changed, 612 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
> > index 3f676f9..7d599f1 100644
> > --- a/drivers/gpu/drm/i915/i915_dma.c
> > +++ b/drivers/gpu/drm/i915/i915_dma.c
> > @@ -1984,6 +1984,7 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
> >  {
> >  	struct drm_i915_file_private *file_priv = file->driver_priv;
> >  
> > +	kfree(file_priv->process_name);
> >  	if (file_priv && file_priv->bsd_ring)
> >  		file_priv->bsd_ring = NULL;
> >  	kfree(file_priv);
> > diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> > index 01de977..1c4cd6d7 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.c
> > +++ b/drivers/gpu/drm/i915/i915_drv.c
> > @@ -1527,6 +1527,8 @@ static struct drm_driver driver = {
> >  	.debugfs_init = i915_debugfs_init,
> >  	.debugfs_cleanup = i915_debugfs_cleanup,
> >  #endif
> > +	.gem_open_object = i915_gem_open_object,
> > +	.gem_close_object = i915_gem_close_object,
> >  	.gem_free_object = i915_gem_free_object,
> >  	.gem_vm_ops = &i915_gem_vm_ops,
> >  
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index 541fb6f..ccb3db3 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -1846,6 +1846,12 @@ struct drm_i915_gem_object {
> >  			struct work_struct *work;
> >  		} userptr;
> >  	};
> > +
> > +#define MAX_OPEN_HANDLE 20
> > +	struct {
> > +		pid_t pid;
> > +		int open_handle_count;
> > +	} pid_array[MAX_OPEN_HANDLE];
> >  };
> >  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
> >  
> > @@ -1896,6 +1902,7 @@ struct drm_i915_gem_request {
> >  struct drm_i915_file_private {
> >  	struct drm_i915_private *dev_priv;
> >  	struct drm_file *file;
> > +	char *process_name;
> >  
> >  	struct {
> >  		spinlock_t lock;
> > @@ -2325,6 +2332,10 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
> >  		  struct i915_address_space *vm);
> >  void i915_gem_free_object(struct drm_gem_object *obj);
> >  void i915_gem_vma_destroy(struct i915_vma *vma);
> > +int i915_gem_open_object(struct drm_gem_object *gem_obj,
> > +			struct drm_file *file_priv);
> > +int i915_gem_close_object(struct drm_gem_object *gem_obj,
> > +			struct drm_file *file_priv);
> >  
> >  #define PIN_MAPPABLE 0x1
> >  #define PIN_NONBLOCK 0x2
> > @@ -2375,6 +2386,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
> >  			 struct drm_mode_create_dumb *args);
> >  int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
> >  		      uint32_t handle, uint64_t *offset);
> > +int i915_gem_obj_shmem_pages_alloced(struct drm_i915_gem_object *obj);
> >  /**
> >   * Returns true if seq1 is later than seq2.
> >   */
> > @@ -2643,6 +2655,10 @@ int i915_verify_lists(struct drm_device *dev);
> >  #else
> >  #define i915_verify_lists(dev) 0
> >  #endif
> > +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
> > +			struct drm_device *dev);
> > +int i915_gem_get_all_obj_info(struct drm_i915_error_state_buf *m,
> > +			struct drm_device *dev);
> >  
> >  /* i915_debugfs.c */
> >  int i915_debugfs_init(struct drm_minor *minor);
> > @@ -2656,6 +2672,8 @@ static inline void intel_display_crc_init(struct drm_device *dev) {}
> >  /* i915_gpu_error.c */
> >  __printf(2, 3)
> >  void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
> > +void i915_error_puts(struct drm_i915_error_state_buf *e,
> > +			    const char *str);
> >  int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
> >  			    const struct i915_error_state_file_priv *error);
> >  int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 6c2f0b8..c464a75 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -1819,6 +1819,25 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
> >  	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
> >  }
> >  
> > +int i915_gem_obj_shmem_pages_alloced(struct drm_i915_gem_object *obj)
> > +{
> > +	int ret;
> > +
> > +	if (obj->base.filp) {
> > +		struct inode *inode = file_inode(obj->base.filp);
> > +		struct shmem_inode_info *info = SHMEM_I(inode);
> > +
> > +		if (!inode)
> > +			return 0;
> > +		spin_lock(&info->lock);
> > +		ret = inode->i_mapping->nrpages;
> > +		spin_unlock(&info->lock);
> > +		return ret;
> > +	} else
> > +		return 0;
> > +
> > +}
> > +
> >  static inline int
> >  i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
> >  {
> > @@ -4402,6 +4421,57 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
> >  	return atomic_long_read(&obj->base.filp->f_count) == 1;
> >  }
> >  
> > +int
> > +i915_gem_open_object(struct drm_gem_object *gem_obj, struct drm_file *file_priv)
> > +{
> > +	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> > +	pid_t current_pid = task_pid_nr(current);
> > +	int i, free = -1;
> > +
> > +	for (i = 0; i < MAX_OPEN_HANDLE; i++) {
> > +		if (obj->pid_array[i].pid == current_pid) {
> > +			obj->pid_array[i].open_handle_count++;
> > +			break;
> > +		} else if (obj->pid_array[i].pid == 0)
> > +			free = i;
> > +	}
> > +
> > +	if (i == MAX_OPEN_HANDLE) {
> > +		if (free != -1) {
> > +			BUG_ON(obj->pid_array[free].open_handle_count);
> > +			obj->pid_array[free].open_handle_count = 1;
> > +			obj->pid_array[free].pid = current_pid;
> > +		} else
> > +			DRM_DEBUG("Max open handle count limit: obj 0x%x\n",
> > +					(u32) obj);
> > +	}
> > +	return 0;
> > +}
> > +
> > +int
> > +i915_gem_close_object(struct drm_gem_object *gem_obj,
> > +			struct drm_file *file_priv)
> > +{
> > +	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> > +	pid_t current_pid = task_pid_nr(current);
> > +	int i;
> > +
> > +	for (i = 0; i < MAX_OPEN_HANDLE; i++) {
> > +		if (obj->pid_array[i].pid == current_pid) {
> > +			obj->pid_array[i].open_handle_count--;
> > +			if (obj->pid_array[i].open_handle_count == 0)
> > +				obj->pid_array[i].pid = 0;
> > +			break;
> > +		}
> > +	}
> > +	if (i == MAX_OPEN_HANDLE)
> > +		DRM_DEBUG("Couldn't find matching pid %d for obj 0x%x\n",
> > +				current_pid, (u32) obj);
> > +	return 0;
> > +
> > +}
> > +
> > +
> >  void i915_gem_free_object(struct drm_gem_object *gem_obj)
> >  {
> >  	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> > @@ -4992,6 +5062,46 @@ i915_gem_file_idle_work_handler(struct work_struct *work)
> >  	atomic_set(&file_priv->rps_wait_boost, false);
> >  }
> >  
> > +static int i915_gem_get_pid_cmdline(struct task_struct *task, char *buffer)
> > +{
> > +	int res = 0;
> > +	unsigned int len;
> > +	struct mm_struct *mm = get_task_mm(task);
> > +
> > +	if (!mm)
> > +		goto out;
> > +	if (!mm->arg_end)
> > +		goto out_mm;
> > +
> > +	len = mm->arg_end - mm->arg_start;
> > +
> > +	if (len > PAGE_SIZE)
> > +		len = PAGE_SIZE;
> > +
> > +	res = access_process_vm(task, mm->arg_start, buffer, len, 0);
> > +
> > +	/* If the null at the end of args has been overwritten, then
> > +	 * assume application is using setproctitle(3).
> > +	 */
> > +	if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
> > +		len = strnlen(buffer, res);
> > +		if (len < res) {
> > +			res = len;
> > +		} else {
> > +			len = mm->env_end - mm->env_start;
> > +			if (len > PAGE_SIZE - res)
> > +				len = PAGE_SIZE - res;
> > +			res += access_process_vm(task, mm->env_start,
> > +					buffer+res, len, 0);
> > +			res = strnlen(buffer, res);
> > +		}
> > +	}
> > +out_mm:
> > +	mmput(mm);
> > +out:
> > +	return res;
> > +}
> > +
> >  int i915_gem_open(struct drm_device *dev, struct drm_file *file)
> >  {
> >  	struct drm_i915_file_private *file_priv;
> > @@ -5006,6 +5116,11 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
> >  	file->driver_priv = file_priv;
> >  	file_priv->dev_priv = dev->dev_private;
> >  	file_priv->file = file;
> > +	file_priv->process_name =  kzalloc(PAGE_SIZE, GFP_ATOMIC);
> > +	if (!file_priv->process_name)
> > +		return -ENOMEM;
> > +
> > +	i915_gem_get_pid_cmdline(current, file_priv->process_name);
> >  
> >  	spin_lock_init(&file_priv->mm.lock);
> >  	INIT_LIST_HEAD(&file_priv->mm.request_list);
> > diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
> > index f462d1b..36d1980 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_debug.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_debug.c
> > @@ -116,3 +116,369 @@ i915_verify_lists(struct drm_device *dev)
> >  	return warned = err;
> >  }
> >  #endif /* WATCH_LIST */
> > +
> > +struct per_file_obj_mem_info {
> > +	int NumofObjects;
> > +	int NumofShared;
> > +	int NumofPrivate;
> > +	int NumofGttBinded;
> > +	int NumofPurged;
> > +	int NumofPurgeable;
> > +	int NumofAllocated;
> > +	int NumFaultMappable;
> > +	int NumofStolen;
> > +	size_t GttSpaceAllocatedShared;
> > +	size_t GttSpaceAllocatedPriv;
> > +	size_t PhysicalSpaceAllocatedShared;
> > +	size_t PhysicalSpaceAllocatedPriv;
> > +	size_t PhysicalSpacePurgeable;
> > +	size_t PhysicalSpaceSharedProportion;
> > +	size_t FaultMappableSize;
> > +	size_t StolenSpaceAllocated;
> > +	char *process_name;
> > +};
> > +
> > +struct name_entry {
> > +	struct list_head head;
> > +	struct drm_hash_item hash_item;
> > +};
> > +
> > +struct pid_stat_entry {
> > +	struct list_head head;
> > +	struct list_head namefree;
> > +	struct drm_open_hash namelist;
> > +	struct per_file_obj_mem_info stats;
> > +	struct pid *pid;
> > +	int pid_nr;
> > +};
> > +
> > +static struct list_head per_pid_stats;
> > +
> > +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
> > +#define err_puts(e, s) i915_error_puts(e, s)
> > +
> > +static const char *get_pin_flag(struct drm_i915_gem_object *obj)
> > +{
> > +	if (obj->user_pin_count > 0)
> > +		return "P";
> > +	else if (i915_gem_obj_is_pinned(obj))
> > +		return "p";
> > +	else
> > +		return " ";
> > +}
> > +
> > +static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
> > +{
> > +	switch (obj->tiling_mode) {
> > +	default:
> > +	case I915_TILING_NONE: return " ";
> > +	case I915_TILING_X: return "X";
> > +	case I915_TILING_Y: return "Y";
> > +	}
> > +}
> > +
> > +static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj)
> > +{
> > +	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
> > +	struct drm_device *dev = gem_obj->dev;
> > +	struct drm_file *file;
> > +	struct pid *pid;
> > +	int pid_nr, i, present;
> > +
> > +	/* Run a sanity check on pid_array. All entries in pid_array should
> > +	 * be subset of the the drm filelist pid entries.
> > +	 */
> > +	for (i = 0; i < MAX_OPEN_HANDLE; i++) {
> > +		present = 0;
> > +		list_for_each_entry(file, &dev->filelist, lhead) {
> > +			pid = file->pid;
> > +			pid_nr = pid->numbers[pid->level].nr;
> > +			if (pid_nr == obj->pid_array[i].pid) {
> > +				present = 1;
> > +				break;
> > +			}
> > +		}
> > +		if (present == 0) {
> > +			obj->pid_array[i].open_handle_count = 0;
> > +			obj->pid_array[i].pid = 0;
> > +		}
> > +	}
> > +}
> > +
> > +
> > +static int
> > +i915_describe_obj(struct drm_i915_error_state_buf *m,
> > +			struct drm_i915_gem_object *obj)
> > +{
> > +	int i;
> > +	struct i915_vma *vma;
> > +
> > +	err_printf(m, "%p: %8zdK  %s    %s     %s      %s     %s      %s       %s     ",
> > +		   &obj->base,
> > +		   obj->base.size / 1024,
> > +		   get_pin_flag(obj),
> > +		   get_tiling_flag(obj),
> > +		   obj->dirty ? "Y" : "N",
> > +		   obj->base.name ? "Y" : "N",
> > +		   (obj->userptr.mm != 0) ? "Y" : "N",
> > +		   obj->stolen ? "Y" : "N",
> > +		   (obj->pin_mappable || obj->fault_mappable) ? "Y" : "N");
> > +
> > +	if (obj->madv == __I915_MADV_PURGED)
> > +		err_printf(m, " purged    ");
> > +	else if (obj->madv == I915_MADV_DONTNEED)
> > +		err_printf(m, " purgeable   ");
> > +	else if (i915_gem_obj_shmem_pages_alloced(obj) != 0)
> > +		err_printf(m, " allocated   ");
> > +
> > +
> > +	list_for_each_entry(vma, &obj->vma_list, vma_link) {
> > +		if (!i915_is_ggtt(vma->vm))
> > +			err_puts(m, " PP    ");
> > +		else
> > +			err_puts(m, " G     ");
> > +		err_printf(m, "  %08lx ", vma->node.start);
> > +	}
> > +
> > +	for (i = 0; i < MAX_OPEN_HANDLE; i++) {
> > +		if (obj->pid_array[i].pid != 0) {
> > +			err_printf(m, " (%d: %d) ",
> > +			obj->pid_array[i].pid,
> > +			obj->pid_array[i].open_handle_count);
> > +		}
> > +	}
> > +
> > +	err_printf(m, "\n");
> > +
> > +	if (m->bytes == 0 && m->err)
> > +		return m->err;
> > +
> > +	return 0;
> > +}
> > +
> > +static int
> > +i915_drm_gem_obj_info(int id, void *ptr, void *data)
> > +{
> > +	struct drm_i915_gem_object *obj = ptr;
> > +	struct drm_i915_error_state_buf *m = data;
> > +	int ret;
> > +
> > +	i915_obj_pidarray_validate(&obj->base);
> > +	ret = i915_describe_obj(m, obj);
> > +
> > +	return ret;
> > +}
> > +
> > +static int
> > +i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data)
> > +{
> > +	struct pid_stat_entry *pid_entry = data;
> > +	struct drm_i915_gem_object *obj = ptr;
> > +	struct per_file_obj_mem_info *stats = &pid_entry->stats;
> > +	struct drm_hash_item *hash_item;
> > +	int i, num_pages, obj_shared_count = 0;
> > +
> > +	i915_obj_pidarray_validate(&obj->base);
> > +
> > +	stats->NumofObjects++;
> > +
> > +	if (obj->base.name) {
> > +
> > +		if (drm_ht_find_item(&pid_entry->namelist,
> > +				(unsigned long)obj->base.name, &hash_item)) {
> > +			struct name_entry *entry =
> > +				kzalloc(sizeof(struct name_entry), GFP_KERNEL);
> > +			if (entry == NULL) {
> > +				DRM_ERROR("alloc failed\n");
> > +				return -ENOMEM;
> > +			}
> > +			entry->hash_item.key = obj->base.name;
> > +			drm_ht_insert_item(&pid_entry->namelist,
> > +						&entry->hash_item);
> > +			list_add_tail(&entry->head, &pid_entry->namefree);
> > +		} else {
> > +			DRM_DEBUG("Duplicate obj with name %d for process %s\n",
> > +				obj->base.name, stats->process_name);
> > +			return 0;
> > +		}
> > +		for (i = 0; i < MAX_OPEN_HANDLE; i++) {
> > +			if (obj->pid_array[i].pid != 0)
> > +				obj_shared_count++;
> > +		}
> > +		BUG_ON(obj_shared_count == 0);
> > +		DRM_DEBUG("Obj: %p, shared count =%d\n",
> > +				&obj->base, obj_shared_count);
> > +
> > +		if (obj_shared_count > 1)
> > +			stats->NumofShared++;
> > +		else
> > +			stats->NumofPrivate++;
> > +	} else {
> > +		obj_shared_count = 1;
> > +		stats->NumofPrivate++;
> > +	}
> > +
> > +	num_pages = i915_gem_obj_shmem_pages_alloced(obj);
> > +	if (obj->stolen) {
> > +		stats->NumofStolen++;
> > +		stats->StolenSpaceAllocated += obj->base.size;
> > +	} else if (obj->madv == __I915_MADV_PURGED) {
> > +		stats->NumofPurged++;
> > +	} else {
> > +		if (obj->madv == I915_MADV_DONTNEED) {
> > +			stats->NumofPurgeable++;
> > +			stats->PhysicalSpacePurgeable += num_pages*PAGE_SIZE;
> > +		}
> > +		if (num_pages > 0)
> > +			stats->NumofAllocated++;
> > +
> > +		if (obj_shared_count > 1) {
> > +			stats->PhysicalSpaceAllocatedShared +=
> > +					num_pages*PAGE_SIZE;
> > +			stats->PhysicalSpaceSharedProportion +=
> > +				(num_pages*PAGE_SIZE)/obj_shared_count;
> > +		} else
> > +			stats->PhysicalSpaceAllocatedPriv +=
> > +					num_pages*PAGE_SIZE;
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m,
> > +			struct drm_device *dev)
> > +{
> > +	struct drm_file *file;
> > +	struct drm_i915_private *dev_priv = dev->dev_private;
> > +
> > +	struct name_entry *entry, *next;
> > +	struct pid_stat_entry *pid_entry, *temp_entry;
> > +	int total_shared_prop_space = 0, total_priv_space = 0;
> > +
> > +	INIT_LIST_HEAD(&per_pid_stats);
> > +
> > +	err_printf(m, "\n\n  pid   Total  Shared  Priv   Purgeable  Alloced  SharedPHYsize   SharedPHYprop    PrivPHYsize   PurgeablePHYsize   process\n");
> > +
> > +	mutex_lock(&dev->struct_mutex);
> > +	list_for_each_entry(file, &dev->filelist, lhead) {
> > +
> > +		struct per_file_obj_mem_info *stats;
> > +		struct pid_stat_entry *pid_entry;
> > +		struct pid *pid = file->pid;
> > +		int pid_nr = pid->numbers[pid->level].nr;
> > +		struct drm_i915_file_private *file_priv = file->driver_priv;
> > +		int found = 0;
> > +
> > +		list_for_each_entry(pid_entry, &per_pid_stats, head) {
> > +			if (pid_entry->pid_nr == pid_nr) {
> > +				found = 1;
> > +				break;
> > +			}
> > +		}
> > +
> > +		if (!found) {
> > +			struct pid_stat_entry *new_entry =
> > +			kzalloc(sizeof(struct pid_stat_entry), GFP_KERNEL);
> > +
> > +			if (new_entry == NULL) {
> > +				DRM_ERROR("alloc failed\n");
> > +				return -ENOMEM;
> > +			}
> > +			new_entry->pid = pid;
> > +			new_entry->pid_nr = pid_nr;
> > +			list_add_tail(&new_entry->head, &per_pid_stats);
> > +			drm_ht_create(&new_entry->namelist,
> > +					DRM_MAGIC_HASH_ORDER);
> > +			INIT_LIST_HEAD(&new_entry->namefree);
> > +			new_entry->stats.process_name = file_priv->process_name;
> > +			pid_entry = new_entry;
> > +		}
> > +
> > +		idr_for_each(&file->object_idr,
> > +			&i915_drm_gem_object_per_file_summary, pid_entry);
> > +	}
> > +
> > +	list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) {
> > +		struct task_struct *task =
> > +			get_pid_task(pid_entry->pid, PIDTYPE_PID);
> > +
> > +		err_printf(m, "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK  %14zdK     %s",
> > +			   pid_entry->pid_nr,
> > +			   pid_entry->stats.NumofObjects,
> > +			   pid_entry->stats.NumofShared,
> > +			   pid_entry->stats.NumofPrivate,
> > +			   pid_entry->stats.NumofPurgeable,
> > +			   pid_entry->stats.NumofAllocated,
> > +			   pid_entry->stats.PhysicalSpaceAllocatedShared/1024,
> > +			   pid_entry->stats.PhysicalSpaceSharedProportion/1024,
> > +			   pid_entry->stats.PhysicalSpaceAllocatedPriv/1024,
> > +			   pid_entry->stats.PhysicalSpacePurgeable/1024,
> > +			   pid_entry->stats.process_name);
> > +
> > +		if (task == NULL)
> > +			err_printf(m, "*\n");
> > +		else
> > +			err_printf(m, "\n");
> > +
> > +		total_shared_prop_space +=
> > +			pid_entry->stats.PhysicalSpaceSharedProportion/1024;
> > +		total_priv_space +=
> > +			pid_entry->stats.PhysicalSpaceAllocatedPriv/1024;
> > +		list_del(&pid_entry->head);
> > +
> > +		list_for_each_entry_safe(entry, next,
> > +				&pid_entry->namefree, head) {
> > +			list_del(&entry->head);
> > +			drm_ht_remove_item(&pid_entry->namelist,
> > +				&entry->hash_item);
> > +			kfree(entry);
> > +		}
> > +		drm_ht_remove(&pid_entry->namelist);
> > +		kfree(pid_entry);
> > +	}
> > +
> > +	err_printf(m, "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n");
> > +	err_printf(m, "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n",
> > +			total_shared_prop_space, total_priv_space);
> > +
> > +	mutex_unlock(&dev->struct_mutex);
> > +
> > +	if (m->bytes == 0 && m->err)
> > +		return m->err;
> > +
> > +	return 0;
> > +}
> > +
> > +int i915_gem_get_all_obj_info(struct drm_i915_error_state_buf *m,
> > +				struct drm_device *dev)
> > +{
> > +	struct drm_file *file;
> > +	int ret;
> > +
> > +	mutex_lock(&dev->struct_mutex);
> > +	list_for_each_entry(file, &dev->filelist, lhead) {
> > +		struct pid *pid = file->pid;
> > +		int pid_nr = pid->numbers[pid->level].nr;
> > +		struct drm_i915_file_private *file_priv = file->driver_priv;
> > +
> > +		err_printf(m, "\n\n  PID  process\n");
> > +
> > +		err_printf(m, "%5d  %s\n",
> > +			   pid_nr, file_priv->process_name);
> > +
> > +		err_printf(m, "\n Obj Identifier        Size Pin Tiling Dirty Shared Vmap Stolen Mappable  AllocState Global/PP  GttOffset PIDs\n");
> > +		ret = idr_for_each(&file->object_idr,
> > +				&i915_drm_gem_obj_info, m);
> > +		if (ret)
> > +			break;
> > +	}
> > +	mutex_unlock(&dev->struct_mutex);
> > +
> > +	if (ret)
> > +		return ret;
> > +	if (m->bytes == 0 && m->err)
> > +		return m->err;
> > +
> > +	return 0;
> > +}
> > +
> > diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > index 21c025a..1650253 100644
> > --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> > @@ -353,6 +353,8 @@ i915_pages_create_for_stolen(struct drm_device *dev,
> >  	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
> >  	sg_dma_len(sg) = size;
> >  
> > +	dev_priv->mm.stolen_phys_mem_total += size;
> > +
> >  	return st;
> >  }
> >  
> > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> > index fc11ac6..fbecf96 100644
> > --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> > @@ -161,7 +161,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
> >  	__i915_error_advance(e, len);
> >  }
> >  
> > -static void i915_error_puts(struct drm_i915_error_state_buf *e,
> > +void i915_error_puts(struct drm_i915_error_state_buf *e,
> >  			    const char *str)
> >  {
> >  	unsigned len;
> > diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> > index ae7fd8f..fc17ad7 100644
> > --- a/drivers/gpu/drm/i915/i915_sysfs.c
> > +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> > @@ -582,6 +582,86 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
> >  	return count;
> >  }
> >  
> > +static ssize_t i915_gem_clients_state_read(struct file *filp,
> > +				struct kobject *kobj,
> > +				struct bin_attribute *attr, char *buf,
> > +				loff_t off, size_t count)
> > +{
> > +
> > +	struct device *kdev = container_of(kobj, struct device, kobj);
> > +	struct drm_minor *minor = dev_to_drm_minor(kdev);
> > +	struct drm_device *dev = minor->dev;
> > +	struct drm_i915_error_state_buf error_str;
> > +	ssize_t ret_count = 0;
> > +	int ret;
> > +
> > +	ret = i915_error_state_buf_init(&error_str, count, off);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i915_get_drm_clients_info(&error_str, dev);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret_count = count < error_str.bytes ? count : error_str.bytes;
> > +
> > +	memcpy(buf, error_str.buf, ret_count);
> > +out:
> > +	i915_error_state_buf_release(&error_str);
> > +
> > +	return ret ?: ret_count;
> > +}
> > +
> > +static ssize_t i915_gem_clients_state_write(struct file *file,
> > +				struct kobject *kobj,
> > +				struct bin_attribute *attr, char *buf,
> > +				loff_t off, size_t count)
> > +{
> > +	/* Nothing to do*/
> > +
> > +	return count;
> > +}
> > +
> > +static ssize_t i915_gem_objects_state_read(struct file *filp,
> > +				struct kobject *kobj,
> > +				struct bin_attribute *attr, char *buf,
> > +				loff_t off, size_t count)
> > +{
> > +
> > +	struct device *kdev = container_of(kobj, struct device, kobj);
> > +	struct drm_minor *minor = dev_to_drm_minor(kdev);
> > +	struct drm_device *dev = minor->dev;
> > +	struct drm_i915_error_state_buf error_str;
> > +	ssize_t ret_count = 0;
> > +	int ret;
> > +
> > +	ret = i915_error_state_buf_init(&error_str, count, off);
> > +	if (ret)
> > +		return ret;
> > +
> > +	ret = i915_gem_get_all_obj_info(&error_str, dev);
> > +	if (ret)
> > +		goto out;
> > +
> > +	ret_count = count < error_str.bytes ? count : error_str.bytes;
> > +
> > +	memcpy(buf, error_str.buf, ret_count);
> > +out:
> > +	i915_error_state_buf_release(&error_str);
> > +
> > +	return ret ?: ret_count;
> > +}
> > +
> > +static ssize_t i915_gem_objects_state_write(struct file *file,
> > +				struct kobject *kobj,
> > +				struct bin_attribute *attr, char *buf,
> > +				loff_t off, size_t count)
> > +{
> > +	/* Nothing to do*/
> > +
> > +	return count;
> > +}
> > +
> >  static struct bin_attribute error_state_attr = {
> >  	.attr.name = "error",
> >  	.attr.mode = S_IRUSR | S_IWUSR,
> > @@ -590,6 +670,22 @@ static struct bin_attribute error_state_attr = {
> >  	.write = error_state_write,
> >  };
> >  
> > +static struct bin_attribute i915_gem_client_state_attr = {
> > +	.attr.name = "i915_gem_meminfo",
> > +	.attr.mode = S_IRUSR | S_IWUSR,
> > +	.size = 0,
> > +	.read = i915_gem_clients_state_read,
> > +	.write = i915_gem_clients_state_write,
> > +};
> > +
> > +static struct bin_attribute i915_gem_objects_state_attr = {
> > +	.attr.name = "i915_gem_objinfo",
> > +	.attr.mode = S_IRUSR | S_IWUSR,
> > +	.size = 0,
> > +	.read = i915_gem_objects_state_read,
> > +	.write = i915_gem_objects_state_write,
> > +};
> > +
> >  void i915_setup_sysfs(struct drm_device *dev)
> >  {
> >  	int ret;
> > @@ -627,6 +723,17 @@ void i915_setup_sysfs(struct drm_device *dev)
> >  				    &error_state_attr);
> >  	if (ret)
> >  		DRM_ERROR("error_state sysfs setup failed\n");
> > +
> > +	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
> > +				    &i915_gem_client_state_attr);
> > +	if (ret)
> > +		DRM_ERROR("i915_gem_client_state sysfs setup failed\n");
> > +
> > +	ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
> > +				    &i915_gem_objects_state_attr);
> > +	if (ret)
> > +		DRM_ERROR("i915_gem_objects_state sysfs setup failed\n");
> > +
> >  }
> >  
> >  void i915_teardown_sysfs(struct drm_device *dev)
> > -- 
> > 1.8.5.1
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> -- 
> Jani Nikula, Intel Open Source Technology Center
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch



More information about the Intel-gfx mailing list