[Intel-gfx] [PATCH 6/6] drm/i915: Make the kmem slab for i915_buddy_block a global
Daniel Vetter
daniel at ffwll.ch
Wed Jul 21 18:56:50 UTC 2021
On Wed, Jul 21, 2021 at 05:25:41PM +0100, Matthew Auld wrote:
> On 21/07/2021 16:23, Jason Ekstrand wrote:
> > There's no reason that I can tell why this should be per-i915_buddy_mm
> > and doing so causes KMEM_CACHE to throw dmesg warnings because it tries
> > to create a debugfs entry with the name i915_buddy_block multiple times.
> > We could handle this by carefully giving each slab its own name but that
> > brings its own pain because then we have to store that string somewhere
> > and manage the lifetimes of the different slabs. The most likely
> > outcome would be a global atomic which we increment to get a new name or
> > something like that.
> >
> > The much easier solution is to use the i915_globals system like we do
> > for every other slab in i915. This ensures that we have exactly one of
> > them for each i915 driver load and it gets neatly created on module load
> > and destroyed on module unload. Using the globals system also means
> > that its now tied into the shrink handler so we can properly respond to
> > low-memory situations.
> >
> > Signed-off-by: Jason Ekstrand <jason at jlekstrand.net>
> > Fixes: 88be9a0a06b7 ("drm/i915/ttm: add ttm_buddy_man")
> > Cc: Matthew Auld <matthew.auld at intel.com>
> > Cc: Christian König <christian.koenig at amd.com>
>
> It was intentionally ripped it out with the idea that we would be moving the
> buddy stuff into ttm, and so part of that was trying to get rid of the some
> of the i915 specifics, like this globals thing.
>
> Reviewed-by: Matthew Auld <matthew.auld at intel.com>
I just sent out a patch to put i915_globals on a diet, so maybe we can
hold this patch here a bit when there's other reasons for why this is
special?
Or at least no make this use the i915_globals stuff and instead just link
up the init/exit function calls directly into Jason's new table, so that
we don't have a merge conflict here?
-Daniel
>
> > ---
> > drivers/gpu/drm/i915/i915_buddy.c | 44 ++++++++++++++++++++++-------
> > drivers/gpu/drm/i915/i915_buddy.h | 3 +-
> > drivers/gpu/drm/i915/i915_globals.c | 2 ++
> > 3 files changed, 38 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
> > index 29dd7d0310c1f..911feedad4513 100644
> > --- a/drivers/gpu/drm/i915/i915_buddy.c
> > +++ b/drivers/gpu/drm/i915/i915_buddy.c
> > @@ -8,8 +8,14 @@
> > #include "i915_buddy.h"
> > #include "i915_gem.h"
> > +#include "i915_globals.h"
> > #include "i915_utils.h"
> > +static struct i915_global_buddy {
> > + struct i915_global base;
> > + struct kmem_cache *slab_blocks;
> > +} global;
> > +
> > static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
> > struct i915_buddy_block *parent,
> > unsigned int order,
> > @@ -19,7 +25,7 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
> > GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
> > - block = kmem_cache_zalloc(mm->slab_blocks, GFP_KERNEL);
> > + block = kmem_cache_zalloc(global.slab_blocks, GFP_KERNEL);
> > if (!block)
> > return NULL;
> > @@ -34,7 +40,7 @@ static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
> > static void i915_block_free(struct i915_buddy_mm *mm,
> > struct i915_buddy_block *block)
> > {
> > - kmem_cache_free(mm->slab_blocks, block);
> > + kmem_cache_free(global.slab_blocks, block);
> > }
> > static void mark_allocated(struct i915_buddy_block *block)
> > @@ -85,15 +91,11 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
> > GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
> > - mm->slab_blocks = KMEM_CACHE(i915_buddy_block, SLAB_HWCACHE_ALIGN);
> > - if (!mm->slab_blocks)
> > - return -ENOMEM;
> > -
> > mm->free_list = kmalloc_array(mm->max_order + 1,
> > sizeof(struct list_head),
> > GFP_KERNEL);
> > if (!mm->free_list)
> > - goto out_destroy_slab;
> > + return -ENOMEM;
> > for (i = 0; i <= mm->max_order; ++i)
> > INIT_LIST_HEAD(&mm->free_list[i]);
> > @@ -145,8 +147,6 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
> > kfree(mm->roots);
> > out_free_list:
> > kfree(mm->free_list);
> > -out_destroy_slab:
> > - kmem_cache_destroy(mm->slab_blocks);
> > return -ENOMEM;
> > }
> > @@ -161,7 +161,6 @@ void i915_buddy_fini(struct i915_buddy_mm *mm)
> > kfree(mm->roots);
> > kfree(mm->free_list);
> > - kmem_cache_destroy(mm->slab_blocks);
> > }
> > static int split_block(struct i915_buddy_mm *mm,
> > @@ -410,3 +409,28 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
> > #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> > #include "selftests/i915_buddy.c"
> > #endif
> > +
> > +static void i915_global_buddy_shrink(void)
> > +{
> > + kmem_cache_shrink(global.slab_blocks);
> > +}
> > +
> > +static void i915_global_buddy_exit(void)
> > +{
> > + kmem_cache_destroy(global.slab_blocks);
> > +}
> > +
> > +static struct i915_global_buddy global = { {
> > + .shrink = i915_global_buddy_shrink,
> > + .exit = i915_global_buddy_exit,
> > +} };
> > +
> > +int __init i915_global_buddy_init(void)
> > +{
> > + global.slab_blocks = KMEM_CACHE(i915_buddy_block, 0);
> > + if (!global.slab_blocks)
> > + return -ENOMEM;
> > +
> > + i915_global_register(&global.base);
> > + return 0;
> > +}
> > diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
> > index 37f8c42071d12..d8f26706de52f 100644
> > --- a/drivers/gpu/drm/i915/i915_buddy.h
> > +++ b/drivers/gpu/drm/i915/i915_buddy.h
> > @@ -47,7 +47,6 @@ struct i915_buddy_block {
> > * i915_buddy_alloc* and i915_buddy_free* should suffice.
> > */
> > struct i915_buddy_mm {
> > - struct kmem_cache *slab_blocks;
> > /* Maintain a free list for each order. */
> > struct list_head *free_list;
> > @@ -130,4 +129,6 @@ void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
> > void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
> > +int i915_global_buddy_init(void);
> > +
> > #endif
> > diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
> > index 87267e1d2ad92..e57102a4c8d16 100644
> > --- a/drivers/gpu/drm/i915/i915_globals.c
> > +++ b/drivers/gpu/drm/i915/i915_globals.c
> > @@ -8,6 +8,7 @@
> > #include <linux/workqueue.h>
> > #include "i915_active.h"
> > +#include "i915_buddy.h"
> > #include "gem/i915_gem_context.h"
> > #include "gem/i915_gem_object.h"
> > #include "i915_globals.h"
> > @@ -87,6 +88,7 @@ static void __i915_globals_cleanup(void)
> > static __initconst int (* const initfn[])(void) = {
> > i915_global_active_init,
> > + i915_global_buddy_init,
> > i915_global_context_init,
> > i915_global_gem_context_init,
> > i915_global_objects_init,
> >
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the Intel-gfx
mailing list