[PATCH 02/11] drm/ttm: add common accounting to the resource mgr v3
Christian König
ckoenig.leichtzumerken at gmail.com
Mon Feb 14 15:36:29 UTC 2022
Am 14.02.22 um 15:29 schrieb Matthew Auld:
> On Mon, 14 Feb 2022 at 13:23, Christian König
> <ckoenig.leichtzumerken at gmail.com> wrote:
>> Am 14.02.22 um 11:34 schrieb Matthew Auld:
>>> On Mon, 14 Feb 2022 at 09:34, Christian König
>>> <ckoenig.leichtzumerken at gmail.com> wrote:
>>>> It makes sense to have this in the common manager for debugging and
>>>> accounting of how much resources are used.
>>>>
>>>> v2: cleanup kerneldoc a bit
>>>> v3: drop the atomic, update counter under lock instead
>>>>
>>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>>> Reviewed-by: Huang Rui <ray.huang at amd.com> (v1)
>>>> Tested-by: Bas Nieuwenhuizen <bas at basnieuwenhuizen.nl>
>>>> ---
>>>> drivers/gpu/drm/ttm/ttm_resource.c | 30 ++++++++++++++++++++++++++++++
>>>> include/drm/ttm/ttm_resource.h | 11 +++++++++--
>>>> 2 files changed, 39 insertions(+), 2 deletions(-)
>>>>
>>>> diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
>>>> index ae40e144e728..bbb8a0f7aa14 100644
>>>> --- a/drivers/gpu/drm/ttm/ttm_resource.c
>>>> +++ b/drivers/gpu/drm/ttm/ttm_resource.c
>>>> @@ -41,6 +41,8 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>>>> const struct ttm_place *place,
>>>> struct ttm_resource *res)
>>>> {
>>>> + struct ttm_resource_manager *man;
>>>> +
>>>> res->start = 0;
>>>> res->num_pages = PFN_UP(bo->base.size);
>>>> res->mem_type = place->mem_type;
>>>> @@ -50,6 +52,11 @@ void ttm_resource_init(struct ttm_buffer_object *bo,
>>>> res->bus.is_iomem = false;
>>>> res->bus.caching = ttm_cached;
>>>> res->bo = bo;
>>>> +
>>>> + man = ttm_manager_type(bo->bdev, place->mem_type);
>>>> + spin_lock(&bo->bdev->lru_lock);
>>>> + man->usage += bo->base.size;
>>>> + spin_unlock(&bo->bdev->lru_lock);
>>>> }
>>>> EXPORT_SYMBOL(ttm_resource_init);
>>>>
>>>> @@ -65,6 +72,9 @@ EXPORT_SYMBOL(ttm_resource_init);
>>>> void ttm_resource_fini(struct ttm_resource_manager *man,
>>>> struct ttm_resource *res)
>>>> {
>>>> + spin_lock(&man->bdev->lru_lock);
>>>> + man->usage -= res->bo->base.size;
>>>> + spin_unlock(&man->bdev->lru_lock);
>>>> }
>>>> EXPORT_SYMBOL(ttm_resource_fini);
>>>>
>>>> @@ -166,6 +176,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
>>>> spin_lock_init(&man->move_lock);
>>>> man->bdev = bdev;
>>>> man->size = size;
>>>> + man->usage = 0;
>>>>
>>>> for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
>>>> INIT_LIST_HEAD(&man->lru[i]);
>>>> @@ -226,6 +237,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
>>>> }
>>>> EXPORT_SYMBOL(ttm_resource_manager_evict_all);
>>>>
>>>> +/**
>>>> + * ttm_resource_manager_usage
>>>> + *
>>>> + * @man: A memory manager object.
>>>> + *
>>>> + * Return how many resources are currently used.
>>> Maybe mention the units here?
>>>
>>> "Return how many resources are currently used, in bytes."
>> Well exactly that's not correct. The whole idea here is that these are
>> driver defined units.
> Ok, I was assuming bo->base.size to operate in bytes(the kernel-doc
> seems to indicate that) and ttm_resource_{init, fini} are using this
> to track the man->usage.
Yeah, good point. That one is the next on my TODO list to fix.
Christian.
>
>> E.g. for the AMDGPU OA and GWS resources it's essentially a hardware block.
>>
>> Regards,
>> Christian.
>>
>>> Anyway,
>>> Reviewed-by: Matthew Auld <matthew.auld at intel.com>
>>>
>>>> + */
>>>> +uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
>>>> +{
>>>> + uint64_t usage;
>>>> +
>>>> + spin_lock(&man->bdev->lru_lock);
>>>> + usage = man->usage;
>>>> + spin_unlock(&man->bdev->lru_lock);
>>>> + return usage;
>>>> +}
>>>> +EXPORT_SYMBOL(ttm_resource_manager_usage);
>>>> +
>>>> /**
>>>> * ttm_resource_manager_debug
>>>> *
>>>> @@ -238,6 +267,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
>>>> drm_printf(p, " use_type: %d\n", man->use_type);
>>>> drm_printf(p, " use_tt: %d\n", man->use_tt);
>>>> drm_printf(p, " size: %llu\n", man->size);
>>>> + drm_printf(p, " usage: %llu\n", ttm_resource_manager_usage(man));
>>>> if (man->func->debug)
>>>> man->func->debug(man, p);
>>>> }
>>>> diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
>>>> index 555a11fb8a7f..323c14a30c6b 100644
>>>> --- a/include/drm/ttm/ttm_resource.h
>>>> +++ b/include/drm/ttm/ttm_resource.h
>>>> @@ -27,6 +27,7 @@
>>>>
>>>> #include <linux/types.h>
>>>> #include <linux/mutex.h>
>>>> +#include <linux/atomic.h>
>>>> #include <linux/dma-buf-map.h>
>>>> #include <linux/dma-fence.h>
>>>> #include <drm/drm_print.h>
>>>> @@ -130,10 +131,15 @@ struct ttm_resource_manager {
>>>> struct dma_fence *move;
>>>>
>>>> /*
>>>> - * Protected by the global->lru_lock.
>>>> + * Protected by the bdev->lru_lock.
>>>> */
>>>> -
>>>> struct list_head lru[TTM_MAX_BO_PRIORITY];
>>>> +
>>>> + /**
>>>> + * @usage: How much of the resources are used, protected by the
>>>> + * bdev->lru_lock.
>>>> + */
>>>> + uint64_t usage;
>>>> };
>>>>
>>>> /**
>>>> @@ -283,6 +289,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
>>>> int ttm_resource_manager_evict_all(struct ttm_device *bdev,
>>>> struct ttm_resource_manager *man);
>>>>
>>>> +uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man);
>>>> void ttm_resource_manager_debug(struct ttm_resource_manager *man,
>>>> struct drm_printer *p);
>>>>
>>>> --
>>>> 2.25.1
>>>>
More information about the dri-devel
mailing list