[PATCH 13/13] drm/ttm: flip the switch for driver allocated resources
Nirmoy
nirmodas at amd.com
Mon May 3 16:15:43 UTC 2021
Hi Christian,
On 4/30/21 11:25 AM, Christian König wrote:
> Instead of both driver and TTM allocating memory finalize embedding the
> ttm_resource object as base into the driver backends.
>
> Signed-off-by: Christian König <christian.koenig at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 44 ++++++--------
> drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 2 +-
> .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 5 +-
> drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 60 +++++++++----------
> drivers/gpu/drm/drm_gem_vram_helper.c | 3 +-
> drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +--
> drivers/gpu/drm/nouveau/nouveau_mem.c | 11 ++--
> drivers/gpu/drm/nouveau/nouveau_mem.h | 14 ++---
> drivers/gpu/drm/nouveau/nouveau_ttm.c | 32 +++++-----
> drivers/gpu/drm/ttm/ttm_range_manager.c | 23 +++----
> drivers/gpu/drm/ttm/ttm_resource.c | 15 +----
> drivers/gpu/drm/ttm/ttm_sys_manager.c | 12 ++--
> drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 24 ++++----
> drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 27 ++++-----
> include/drm/ttm/ttm_range_manager.h | 3 +-
> include/drm/ttm/ttm_resource.h | 43 ++++++-------
> 16 files changed, 140 insertions(+), 186 deletions(-)
<snip>
> static const struct ttm_resource_manager_func ttm_sys_manager_func = {
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> index 82a5e6489810..354219a27f31 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
> @@ -52,16 +52,16 @@ static struct vmwgfx_gmrid_man *to_gmrid_manager(struct ttm_resource_manager *ma
> static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
> struct ttm_buffer_object *bo,
> const struct ttm_place *place,
> - struct ttm_resource *mem)
> + struct ttm_resource **res)
> {
> struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
> int id;
>
> - mem->mm_node = kmalloc(sizeof(*mem), GFP_KERNEL);
> - if (!mem->mm_node)
> + *res = kmalloc(sizeof(*res), GFP_KERNEL);
This should be sizeof(**res) or sizeof(struct ttm_resource).
Regards,
Nirmoy
> + if (!*res)
> return -ENOMEM;
>
> - ttm_resource_init(bo, place, mem->mm_node);
> + ttm_resource_init(bo, place, *res);
>
> id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
> if (id < 0)
> @@ -70,34 +70,34 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man,
> spin_lock(&gman->lock);
>
> if (gman->max_gmr_pages > 0) {
> - gman->used_gmr_pages += mem->num_pages;
> + gman->used_gmr_pages += (*res)->num_pages;
> if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
> goto nospace;
> }
>
> - mem->mm_node = gman;
> - mem->start = id;
> + (*res)->start = id;
>
> spin_unlock(&gman->lock);
> return 0;
>
> nospace:
> - gman->used_gmr_pages -= mem->num_pages;
> + gman->used_gmr_pages -= (*res)->num_pages;
> spin_unlock(&gman->lock);
> ida_free(&gman->gmr_ida, id);
> + kfree(*res);
> return -ENOSPC;
> }
>
> static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man,
> - struct ttm_resource *mem)
> + struct ttm_resource *res)
> {
> struct vmwgfx_gmrid_man *gman = to_gmrid_manager(man);
>
> - ida_free(&gman->gmr_ida, mem->start);
> + ida_free(&gman->gmr_ida, res->start);
> spin_lock(&gman->lock);
> - gman->used_gmr_pages -= mem->num_pages;
> + gman->used_gmr_pages -= res->num_pages;
> spin_unlock(&gman->lock);
> - kfree(mem->mm_node);
> + kfree(res);
> }
>
> static const struct ttm_resource_manager_func vmw_gmrid_manager_func;
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> index 8765835696ac..2a3d3468e4e0 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
> @@ -51,7 +51,7 @@ static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
> static int vmw_thp_get_node(struct ttm_resource_manager *man,
> struct ttm_buffer_object *bo,
> const struct ttm_place *place,
> - struct ttm_resource *mem)
> + struct ttm_resource **res)
> {
> struct vmw_thp_manager *rman = to_thp_manager(man);
> struct drm_mm *mm = &rman->mm;
> @@ -78,26 +78,27 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
> spin_lock(&rman->lock);
> if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
> align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
> - if (mem->num_pages >= align_pages) {
> + if (node->base.num_pages >= align_pages) {
> ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
> - align_pages, place, mem,
> - lpfn, mode);
> + align_pages, place,
> + &node->base, lpfn, mode);
> if (!ret)
> goto found_unlock;
> }
> }
>
> align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
> - if (mem->num_pages >= align_pages) {
> + if (node->base.num_pages >= align_pages) {
> ret = vmw_thp_insert_aligned(bo, mm, &node->mm_nodes[0],
> - align_pages, place, mem, lpfn,
> - mode);
> + align_pages, place, &node->base,
> + lpfn, mode);
> if (!ret)
> goto found_unlock;
> }
>
> ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
> - mem->num_pages, bo->page_alignment, 0,
> + node->base.num_pages,
> + bo->page_alignment, 0,
> place->fpfn, lpfn, mode);
> found_unlock:
> spin_unlock(&rman->lock);
> @@ -105,20 +106,18 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
> if (unlikely(ret)) {
> kfree(node);
> } else {
> - mem->mm_node = &node->mm_nodes[0];
> - mem->start = node->mm_nodes[0].start;
> + node->base.start = node->mm_nodes[0].start;
> + *res = &node->base;
> }
>
> return ret;
> }
>
> -
> -
> static void vmw_thp_put_node(struct ttm_resource_manager *man,
> - struct ttm_resource *mem)
> + struct ttm_resource *res)
> {
> + struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
> struct vmw_thp_manager *rman = to_thp_manager(man);
> - struct ttm_range_mgr_node * node = mem->mm_node;
>
> spin_lock(&rman->lock);
> drm_mm_remove_node(&node->mm_nodes[0]);
> diff --git a/include/drm/ttm/ttm_range_manager.h b/include/drm/ttm/ttm_range_manager.h
> index e02b6c8d355e..22b6fa42ac20 100644
> --- a/include/drm/ttm/ttm_range_manager.h
> +++ b/include/drm/ttm/ttm_range_manager.h
> @@ -30,8 +30,7 @@ struct ttm_range_mgr_node {
> static inline struct ttm_range_mgr_node *
> to_ttm_range_mgr_node(struct ttm_resource *res)
> {
> - return container_of(res->mm_node, struct ttm_range_mgr_node,
> - mm_nodes[1]);
> + return container_of(res, struct ttm_range_mgr_node, base);
> }
>
> int ttm_range_man_init(struct ttm_device *bdev,
> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
> index 803e4875d779..4abb95b9fd11 100644
> --- a/include/drm/ttm/ttm_resource.h
> +++ b/include/drm/ttm/ttm_resource.h
> @@ -45,46 +45,38 @@ struct ttm_resource_manager_func {
> *
> * @man: Pointer to a memory type manager.
> * @bo: Pointer to the buffer object we're allocating space for.
> - * @placement: Placement details.
> - * @flags: Additional placement flags.
> - * @mem: Pointer to a struct ttm_resource to be filled in.
> + * @place: Placement details.
> + * @res: Resulting pointer to the ttm_resource.
> *
> * This function should allocate space in the memory type managed
> - * by @man. Placement details if
> - * applicable are given by @placement. If successful,
> - * @mem::mm_node should be set to a non-null value, and
> - * @mem::start should be set to a value identifying the beginning
> + * by @man. Placement details if applicable are given by @place. If
> + * successful, a filled in ttm_resource object should be returned in
> + * @res. @res::start should be set to a value identifying the beginning
> * of the range allocated, and the function should return zero.
> - * If the memory region accommodate the buffer object, @mem::mm_node
> - * should be set to NULL, and the function should return 0.
> + * If the manager can't fulfill the request -ENOSPC should be returned.
> * If a system error occurred, preventing the request to be fulfilled,
> * the function should return a negative error code.
> *
> - * Note that @mem::mm_node will only be dereferenced by
> - * struct ttm_resource_manager functions and optionally by the driver,
> - * which has knowledge of the underlying type.
> - *
> - * This function may not be called from within atomic context, so
> - * an implementation can and must use either a mutex or a spinlock to
> - * protect any data structures managing the space.
> + * This function may not be called from within atomic context and needs
> + * to take care of its own locking to protect any data structures
> + * managing the space.
> */
> int (*alloc)(struct ttm_resource_manager *man,
> struct ttm_buffer_object *bo,
> const struct ttm_place *place,
> - struct ttm_resource *mem);
> + struct ttm_resource **res);
>
> /**
> * struct ttm_resource_manager_func member free
> *
> * @man: Pointer to a memory type manager.
> - * @mem: Pointer to a struct ttm_resource to be filled in.
> + * @res: Pointer to a struct ttm_resource to be freed.
> *
> - * This function frees memory type resources previously allocated
> - * and that are identified by @mem::mm_node and @mem::start. May not
> - * be called from within atomic context.
> + * This function frees memory type resources previously allocated.
> + * May not be called from within atomic context.
> */
> void (*free)(struct ttm_resource_manager *man,
> - struct ttm_resource *mem);
> + struct ttm_resource *res);
>
> /**
> * struct ttm_resource_manager_func member debug
> @@ -158,9 +150,9 @@ struct ttm_bus_placement {
> /**
> * struct ttm_resource
> *
> - * @mm_node: Memory manager node.
> - * @size: Requested size of memory region.
> - * @num_pages: Actual size of memory region in pages.
> + * @start: Start of the allocation.
> + * @num_pages: Actual size of resource in pages.
> + * @mem_type: Resource type of the allocation.
> * @placement: Placement flags.
> * @bus: Placement on io bus accessible to the CPU
> *
> @@ -168,7 +160,6 @@ struct ttm_bus_placement {
> * buffer object.
> */
> struct ttm_resource {
> - void *mm_node;
> unsigned long start;
> unsigned long num_pages;
> uint32_t mem_type;
More information about the dri-devel
mailing list