[PATCH v2 1/4] drm/ttm: Create pinned list
Christian König
ckoenig.leichtzumerken at gmail.com
Fri Aug 27 06:19:27 UTC 2021
Am 26.08.21 um 19:27 schrieb Andrey Grodzovsky:
> This list will be used to capture all non VRAM BOs not
> on LRU so when device is hot unplugged we can iterate
> the list and unmap DMA mappings before device is removed.
>
> v2:
> Reanme function to ttm_bo_move_to_pinned
> Keep deleting BOs from LRU in the new function
> if they have no resource struct assigned to them.
>
> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
> Suggested-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/ttm/ttm_bo.c | 30 ++++++++++++++++++++++++++----
> drivers/gpu/drm/ttm/ttm_resource.c | 1 +
> include/drm/ttm/ttm_resource.h | 1 +
> 3 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index 1b950b45cf4b..64594819e9e7 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -69,7 +69,29 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
> }
> }
>
> -static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
> +static inline void ttm_bo_move_to_pinned_or_del(struct ttm_buffer_object *bo)
> +{
> + struct ttm_device *bdev = bo->bdev;
> + struct ttm_resource_manager *man = NULL;
> +
> + if (bo->resource)
> + man = ttm_manager_type(bdev, bo->resource->mem_type);
> +
> + /*
> + * Some BOs might be in transient state where they don't belong
> + * to any domain at the moment, simply remove them from whatever
> + * LRU list they are still hanged on to keep previous functionality
> + */
> + if (man && man->use_tt)
> + list_move_tail(&bo->lru, &man->pinned);
> + else
> + list_del_init(&bo->lru);
Mhm, I'm wondering if we shouldn't keep the pinned list per device then.
But either way patch is Reviewed-by: Christian König
<christian.koenig at amd.com>
Thanks,
Christian.
> +
> + if (bdev->funcs->del_from_lru_notify)
> + bdev->funcs->del_from_lru_notify(bo);
> +}
> +
> +static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
> {
> struct ttm_device *bdev = bo->bdev;
>
> @@ -98,7 +120,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
> dma_resv_assert_held(bo->base.resv);
>
> if (bo->pin_count) {
> - ttm_bo_del_from_lru(bo);
> + ttm_bo_move_to_pinned_or_del(bo);
> return;
> }
>
> @@ -339,7 +361,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
> return ret;
> }
>
> - ttm_bo_del_from_lru(bo);
> + ttm_bo_move_to_pinned_or_del(bo);
> list_del_init(&bo->ddestroy);
> spin_unlock(&bo->bdev->lru_lock);
> ttm_bo_cleanup_memtype_use(bo);
> @@ -1154,7 +1176,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
> return 0;
> }
>
> - ttm_bo_del_from_lru(bo);
> + ttm_bo_move_to_pinned_or_del(bo);
> /* TODO: Cleanup the locking */
> spin_unlock(&bo->bdev->lru_lock);
>
> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
> index 2431717376e7..91165f77fe0e 100644
> --- a/drivers/gpu/drm/ttm/ttm_resource.c
> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
> @@ -85,6 +85,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
>
> for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
> INIT_LIST_HEAD(&man->lru[i]);
> + INIT_LIST_HEAD(&man->pinned);
> man->move = NULL;
> }
> EXPORT_SYMBOL(ttm_resource_manager_init);
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 140b6b9a8bbe..1ec0d5ebb59f 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -130,6 +130,7 @@ struct ttm_resource_manager {
> */
>
> struct list_head lru[TTM_MAX_BO_PRIORITY];
> + struct list_head pinned;
>
> /*
> * Protected by @move_lock.
More information about the amd-gfx
mailing list