[Intel-gfx] [PATCH 03/28] drm/i915: Combine unbound/bound list tracking for objects
Matthew Auld
matthew.william.auld at gmail.com
Mon Jun 10 11:01:17 UTC 2019
On Mon, 10 Jun 2019 at 08:21, Chris Wilson <chris at chris-wilson.co.uk> wrote:
>
> With async binding, we don't want to manage a bound/unbound list as we
> may end up running before we even acquire the pages. All that is
> required is keeping track of shrinkable objects, so reduce it to the
> minimum list.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/gem/i915_gem_domain.c | 12 +-
> drivers/gpu/drm/i915/gem/i915_gem_object.c | 5 +-
> .../gpu/drm/i915/gem/i915_gem_object_types.h | 2 +-
> drivers/gpu/drm/i915/gem/i915_gem_pages.c | 13 +-
> drivers/gpu/drm/i915/gem/i915_gem_pm.c | 3 +-
> drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 30 ++-
> drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 4 +-
> drivers/gpu/drm/i915/i915_debugfs.c | 189 +-----------------
> drivers/gpu/drm/i915/i915_drv.h | 14 +-
> drivers/gpu/drm/i915/i915_gem.c | 30 ++-
> drivers/gpu/drm/i915/i915_vma.c | 30 +--
> .../gpu/drm/i915/selftests/i915_gem_evict.c | 16 +-
> drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 2 +-
> 13 files changed, 75 insertions(+), 275 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
> index e5deae62681f..6115109a2810 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
> @@ -219,7 +219,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> * rewrite the PTE in the belief that doing so tramples upon less
> * state and so involves less work.
> */
> - if (obj->bind_count) {
> + if (atomic_read(&obj->bind_count)) {
> /* Before we change the PTE, the GPU must not be accessing it.
> * If we wait upon the object, we know that all the bound
> * VMA are no longer active.
> @@ -475,14 +475,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
> }
> mutex_unlock(&i915->ggtt.vm.mutex);
>
> - if (i915_gem_object_is_shrinkable(obj) &&
> - obj->mm.madv == I915_MADV_WILLNEED) {
> - struct list_head *list;
> -
> + if (i915_gem_object_is_shrinkable(obj)) {
> spin_lock(&i915->mm.obj_lock);
> - list = obj->bind_count ?
> - &i915->mm.bound_list : &i915->mm.unbound_list;
> - list_move_tail(&obj->mm.link, list);
> + if (obj->mm.madv == I915_MADV_WILLNEED)
> + list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
> spin_unlock(&i915->mm.obj_lock);
> }
> }
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> index a0bc8f7ab780..7a07e726ec83 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
> @@ -214,7 +214,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
>
> mutex_unlock(&i915->drm.struct_mutex);
>
> - GEM_BUG_ON(obj->bind_count);
> + GEM_BUG_ON(atomic_read(&obj->bind_count));
> GEM_BUG_ON(obj->userfault_count);
> GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
> GEM_BUG_ON(!list_empty(&obj->lut_list));
> @@ -329,7 +329,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>
> obj->mm.madv = I915_MADV_DONTNEED;
>
> - if (i915_gem_object_has_pages(obj)) {
> + if (i915_gem_object_has_pages(obj) &&
> + i915_gem_object_is_shrinkable(obj)) {
Should be covered by discard_backing_storage() I guess. Meh.
>
> static int i915_gem_object_info(struct seq_file *m, void *data)
> {
> - struct drm_i915_private *dev_priv = node_to_i915(m->private);
> - struct drm_device *dev = &dev_priv->drm;
> - struct i915_ggtt *ggtt = &dev_priv->ggtt;
> - u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
> - u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
> - struct drm_i915_gem_object *obj;
> - unsigned int page_sizes = 0;
> - char buf[80];
> + struct drm_i915_private *i915 = node_to_i915(m->private);
> int ret;
>
> seq_printf(m, "%u shrinkable objects, %llu bytes\n",
> - dev_priv->mm.shrink_count,
> - dev_priv->mm.shrink_memory);
> -
> - size = count = 0;
> - mapped_size = mapped_count = 0;
> - purgeable_size = purgeable_count = 0;
> - huge_size = huge_count = 0;
> -
> - spin_lock(&dev_priv->mm.obj_lock);
> - list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
> - size += obj->base.size;
> - ++count;
> -
> - if (obj->mm.madv == I915_MADV_DONTNEED) {
> - purgeable_size += obj->base.size;
> - ++purgeable_count;
> - }
> -
> - if (obj->mm.mapping) {
> - mapped_count++;
> - mapped_size += obj->base.size;
> - }
> -
> - if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
> - huge_count++;
> - huge_size += obj->base.size;
> - page_sizes |= obj->mm.page_sizes.sg;
> - }
> - }
> - seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
> -
> - size = count = dpy_size = dpy_count = 0;
> - list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
> - size += obj->base.size;
> - ++count;
> -
> - if (obj->pin_global) {
> - dpy_size += obj->base.size;
> - ++dpy_count;
> - }
> -
> - if (obj->mm.madv == I915_MADV_DONTNEED) {
> - purgeable_size += obj->base.size;
> - ++purgeable_count;
> - }
> -
> - if (obj->mm.mapping) {
> - mapped_count++;
> - mapped_size += obj->base.size;
> - }
> -
> - if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
> - huge_count++;
> - huge_size += obj->base.size;
> - page_sizes |= obj->mm.page_sizes.sg;
> - }
> - }
> - spin_unlock(&dev_priv->mm.obj_lock);
> -
> - seq_printf(m, "%u bound objects, %llu bytes\n",
> - count, size);
> - seq_printf(m, "%u purgeable objects, %llu bytes\n",
> - purgeable_count, purgeable_size);
> - seq_printf(m, "%u mapped objects, %llu bytes\n",
> - mapped_count, mapped_size);
> - seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
> - huge_count,
> - stringify_page_sizes(page_sizes, buf, sizeof(buf)),
> - huge_size);
> - seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
> - dpy_count, dpy_size);
Worth keeping some of this tracking through
mm.shrink_list/mm.purge_list? Or perhaps not much value?
Totally not a blocker,
Reviewed-by: Matthew Auld <matthew.auld at intel.com>
More information about the Intel-gfx
mailing list