[PATCH] drm/ttm: make the pool shrinker lock a mutex
Daniel Vetter
daniel at ffwll.ch
Tue Jan 12 14:03:40 UTC 2021
On Mon, Jan 11, 2021 at 2:57 PM Christian König
<ckoenig.leichtzumerken at gmail.com> wrote:
>
> set_pages_wb() might sleep and so we can't do this in an atomic context.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> Reported-by: Mikhail Gavrilov <mikhail.v.gavrilov at gmail.com>
> Fixes: d099fc8f540a ("drm/ttm: new TT backend allocation pool v3")
Hm I guess long term proper fix would be to pull the shrinker into two
parts, first part takes the right amount of entries of the list,
holding the lock. 2nd part does the releasing (or maybe at least in
batches). lru locks should be cheap, doing expensive stuff like
flushing or rewriting ptes might not be the best idea.
-Daniel
> ---
> drivers/gpu/drm/ttm/ttm_pool.c | 20 ++++++++++----------
> 1 file changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
> index a00b7ab9c14c..6a6eeba423d1 100644
> --- a/drivers/gpu/drm/ttm/ttm_pool.c
> +++ b/drivers/gpu/drm/ttm/ttm_pool.c
> @@ -66,7 +66,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
> static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
> static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
>
> -static spinlock_t shrinker_lock;
> +static struct mutex shrinker_lock;
> static struct list_head shrinker_list;
> static struct shrinker mm_shrinker;
>
> @@ -249,9 +249,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
> spin_lock_init(&pt->lock);
> INIT_LIST_HEAD(&pt->pages);
>
> - spin_lock(&shrinker_lock);
> + mutex_lock(&shrinker_lock);
> list_add_tail(&pt->shrinker_list, &shrinker_list);
> - spin_unlock(&shrinker_lock);
> + mutex_unlock(&shrinker_lock);
> }
>
> /* Remove a pool_type from the global shrinker list and free all pages */
> @@ -259,9 +259,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
> {
> struct page *p, *tmp;
>
> - spin_lock(&shrinker_lock);
> + mutex_lock(&shrinker_lock);
> list_del(&pt->shrinker_list);
> - spin_unlock(&shrinker_lock);
> + mutex_unlock(&shrinker_lock);
>
> list_for_each_entry_safe(p, tmp, &pt->pages, lru)
> ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
> @@ -302,7 +302,7 @@ static unsigned int ttm_pool_shrink(void)
> unsigned int num_freed;
> struct page *p;
>
> - spin_lock(&shrinker_lock);
> + mutex_lock(&shrinker_lock);
> pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
>
> p = ttm_pool_type_take(pt);
> @@ -314,7 +314,7 @@ static unsigned int ttm_pool_shrink(void)
> }
>
> list_move_tail(&pt->shrinker_list, &shrinker_list);
> - spin_unlock(&shrinker_lock);
> + mutex_unlock(&shrinker_lock);
>
> return num_freed;
> }
> @@ -564,7 +564,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
> {
> unsigned int i;
>
> - spin_lock(&shrinker_lock);
> + mutex_lock(&shrinker_lock);
>
> seq_puts(m, "\t ");
> for (i = 0; i < MAX_ORDER; ++i)
> @@ -600,7 +600,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
> seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
> atomic_long_read(&allocated_pages), page_pool_size);
>
> - spin_unlock(&shrinker_lock);
> + mutex_unlock(&shrinker_lock);
>
> return 0;
> }
> @@ -644,7 +644,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
> if (!page_pool_size)
> page_pool_size = num_pages;
>
> - spin_lock_init(&shrinker_lock);
> + mutex_init(&shrinker_lock);
> INIT_LIST_HEAD(&shrinker_list);
>
> for (i = 0; i < MAX_ORDER; ++i) {
> --
> 2.25.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
More information about the dri-devel
mailing list