[PATCH v2 3/3] drm/amdgpu: allow temporary GTT allocation under memory pressure
Yu, Lang
Lang.Yu at amd.com
Mon May 31 12:59:21 UTC 2021
[AMD Official Use Only]
>-----Original Message-----
>From: Koenig, Christian <Christian.Koenig at amd.com>
>Sent: Monday, May 31, 2021 8:49 PM
>To: Yu, Lang <Lang.Yu at amd.com>; amd-gfx at lists.freedesktop.org; dri-
>devel at lists.freedesktop.org
>Cc: Thomas Hellströ <thomas_os at shipmail.org>; Olsak, Marek
><Marek.Olsak at amd.com>; Huang, Ray <Ray.Huang at amd.com>; Deucher,
>Alexander <Alexander.Deucher at amd.com>
>Subject: Re: [PATCH v2 3/3] drm/amdgpu: allow temporary GTT allocation under
>memory pressure
>
>On which branch are you working? I have problems applying that one to amd-
>staging-drm-next.
>
[Yu, Lang]
amd-staging-drm-next.
Regards,
Lang
>Christian.
>
>Am 31.05.21 um 10:22 schrieb Lang Yu:
>> Currently, we have a limitted GTT memory size and need a bounce buffer
>> when doing buffer migration between VRAM and SYSTEM domain.
>>
>> The problem is under GTT memory pressure we can't do buffer migration
>> between VRAM and SYSTEM domain. But in some cases we really need that.
>> Eespecially when validating a VRAM backing store BO which resides in
>> SYSTEM domain.
>>
>> To solve the problem, we allow temporary GTT allocation under memory
>> pressure and do the following:
>>
>> 1. Change mgr->available into mgr->used (invert the value).
>> 2. Always account all GTT BOs to the used space.
>> 3. Only when it is not a temporary allocation bail out.
>>
>> v2: still account temporary GTT allocations
>>
>> Signed-off-by: Lang Yu <Lang.Yu at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 27 ++++++++++-----------
>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 2 +-
>> 3 files changed, 17 insertions(+), 16 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> index 8860545344c7..393f55f412b7 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
>> @@ -111,14 +111,11 @@ static int amdgpu_gtt_mgr_new(struct
>ttm_resource_manager *man,
>> struct amdgpu_gtt_node *node;
>> int r;
>>
>> - spin_lock(&mgr->lock);
>> - if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
>> - atomic64_read(&mgr->available) < mem->num_pages) {
>> - spin_unlock(&mgr->lock);
>> + if ((atomic64_add_return(mem->num_pages, &mgr->used) > man->size)
>&&
>> + !(mem->placement & TTM_PL_FLAG_TEMPORARY)) {
>> + atomic64_sub(mem->num_pages, &mgr->used);
>> return -ENOSPC;
>> }
>> - atomic64_sub(mem->num_pages, &mgr->available);
>> - spin_unlock(&mgr->lock);
>>
>> if (!place->lpfn) {
>> mem->mm_node = NULL;
>> @@ -152,7 +149,7 @@ static int amdgpu_gtt_mgr_new(struct
>ttm_resource_manager *man,
>> kfree(node);
>>
>> err_out:
>> - atomic64_add(mem->num_pages, &mgr->available);
>> + atomic64_sub(mem->num_pages, &mgr->used);
>>
>> return r;
>> }
>> @@ -178,7 +175,7 @@ static void amdgpu_gtt_mgr_del(struct
>ttm_resource_manager *man,
>> kfree(node);
>> }
>>
>> - atomic64_add(mem->num_pages, &mgr->available);
>> + atomic64_sub(mem->num_pages, &mgr->used);
>> }
>>
>> /**
>> @@ -191,9 +188,8 @@ static void amdgpu_gtt_mgr_del(struct
>ttm_resource_manager *man,
>> uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
>> {
>> struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>> - s64 result = man->size - atomic64_read(&mgr->available);
>>
>> - return (result > 0 ? result : 0) * PAGE_SIZE;
>> + return atomic64_read(&mgr->used) * PAGE_SIZE;
>> }
>>
>> /**
>> @@ -234,14 +230,17 @@ static void amdgpu_gtt_mgr_debug(struct
>ttm_resource_manager *man,
>> struct drm_printer *printer)
>> {
>> struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
>> + uint64_t used, used_pages;
>>
>> spin_lock(&mgr->lock);
>> drm_mm_print(&mgr->mm, printer);
>> spin_unlock(&mgr->lock);
>>
>> - drm_printf(printer, "man size:%llu pages, gtt available:%lld pages,
>usage:%lluMB\n",
>> - man->size, (u64)atomic64_read(&mgr->available),
>> - amdgpu_gtt_mgr_usage(man) >> 20);
>> + used = amdgpu_gtt_mgr_usage(man);
>> + used_pages = used/PAGE_SIZE;
>> +
>> + drm_printf(printer, "man size:%llu pages, gtt available:%lld pages,
>usage:%lluMB\n",
>> + man->size, used_pages > man->size ? 0 : man->size - used_pages,
>> +used >> 20);
>> }
>>
>> static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func =
>> { @@ -274,7 +273,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev,
>uint64_t gtt_size)
>> size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
>> drm_mm_init(&mgr->mm, start, size);
>> spin_lock_init(&mgr->lock);
>> - atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
>> + atomic64_set(&mgr->used, 0);
>>
>> ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
>> if (ret) {
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> index c0aef327292a..129d39392859 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
>> @@ -152,9 +152,11 @@ static void amdgpu_evict_flags(struct
>ttm_buffer_object *bo,
>> abo->placements[0].lpfn = 0;
>> abo->placement.busy_placement = &abo-
>>placements[1];
>> abo->placement.num_busy_placement = 1;
>> + abo->placements[1].flags |=
>TTM_PL_FLAG_TEMPORARY;
>> } else {
>> /* Move to GTT memory */
>> amdgpu_bo_placement_from_domain(abo,
>AMDGPU_GEM_DOMAIN_GTT);
>> + abo->placements[0].flags |=
>TTM_PL_FLAG_TEMPORARY;
>> }
>> break;
>> case TTM_PL_TT:
>> @@ -538,7 +540,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object
>*bo, bool evict,
>> hop->fpfn = 0;
>> hop->lpfn = 0;
>> hop->mem_type = TTM_PL_TT;
>> - hop->flags = 0;
>> + hop->flags |= TTM_PL_FLAG_TEMPORARY;
>> return -EMULTIHOP;
>> }
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> index 2877a924086f..26b67af00550 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
>> @@ -52,7 +52,7 @@ struct amdgpu_gtt_mgr {
>> struct ttm_resource_manager manager;
>> struct drm_mm mm;
>> spinlock_t lock;
>> - atomic64_t available;
>> + atomic64_t used;
>> };
>>
>> struct amdgpu_preempt_mgr {
More information about the amd-gfx
mailing list