[PATCH libdrm 2/2] libdrm: clean up non list code path for vamgr
Christian König
ckoenig.leichtzumerken at gmail.com
Fri Jan 12 08:04:24 UTC 2018
Am 12.01.2018 um 06:45 schrieb Chunming Zhou:
> Change-Id: I7ca19a1f6404356a6c69ab4af27c8e13454f0279
> Signed-off-by: Chunming Zhou <david1.zhou at amd.com>
> ---
> amdgpu/amdgpu_internal.h | 2 -
> amdgpu/amdgpu_vamgr.c | 152 ++++++++++++++---------------------------------
> 2 files changed, 46 insertions(+), 108 deletions(-)
>
> diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
> index fd522f39..7484780b 100644
> --- a/amdgpu/amdgpu_internal.h
> +++ b/amdgpu/amdgpu_internal.h
> @@ -53,8 +53,6 @@ struct amdgpu_bo_va_hole {
> };
>
> struct amdgpu_bo_va_mgr {
> - /* the start virtual address */
> - uint64_t va_offset;
> uint64_t va_min;
> uint64_t va_max;
Is va_min and va_max actually still used after that series? If not I
would remove them as well.
Apart from that I would indeed add the warning when the calloc during
free fails.
With that fixed the series is Reviewed-by: Christian König
<christian.koenig at amd.com>.
Regards,
Christian.
> struct list_head va_holes;
> diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
> index 66bb8ecd..b826ac81 100644
> --- a/amdgpu/amdgpu_vamgr.c
> +++ b/amdgpu/amdgpu_vamgr.c
> @@ -61,13 +61,20 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
> drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
> uint64_t max, uint64_t alignment)
> {
> - mgr->va_offset = start;
> + struct amdgpu_bo_va_hole *n;
> +
> mgr->va_min = start;
> mgr->va_max = max;
> mgr->va_alignment = alignment;
>
> list_inithead(&mgr->va_holes);
> pthread_mutex_init(&mgr->bo_va_mutex, NULL);
> + pthread_mutex_lock(&mgr->bo_va_mutex);
> + n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
> + n->size = mgr->va_max;
> + n->offset = mgr->va_min;
> + list_add(&n->list, &mgr->va_holes);
> + pthread_mutex_unlock(&mgr->bo_va_mutex);
> }
>
> drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
> @@ -136,35 +143,8 @@ amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
> }
> }
>
> - if (base_required) {
> - if (base_required < mgr->va_offset) {
> - pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return AMDGPU_INVALID_VA_ADDRESS;
> - }
> - offset = mgr->va_offset;
> - waste = base_required - mgr->va_offset;
> - } else {
> - offset = mgr->va_offset;
> - waste = offset % alignment;
> - waste = waste ? alignment - waste : 0;
> - }
> -
> - if (offset + waste + size > mgr->va_max) {
> - pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return AMDGPU_INVALID_VA_ADDRESS;
> - }
> -
> - if (waste) {
> - n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
> - n->size = waste;
> - n->offset = offset;
> - list_add(&n->list, &mgr->va_holes);
> - }
> -
> - offset += waste;
> - mgr->va_offset += size + waste;
> pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return offset;
> + return AMDGPU_INVALID_VA_ADDRESS;
> }
>
> static uint64_t amdgpu_vamgr_find_va_in_range(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
> @@ -232,41 +212,14 @@ static uint64_t amdgpu_vamgr_find_va_in_range(struct amdgpu_bo_va_mgr *mgr, uint
> }
> }
>
> - if (mgr->va_offset > range_max) {
> - pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return AMDGPU_INVALID_VA_ADDRESS;
> - } else if (mgr->va_offset > range_min) {
> - offset = mgr->va_offset;
> - waste = offset % alignment;
> - waste = waste ? alignment - waste : 0;
> - if (offset + waste + size > range_max) {
> - pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return AMDGPU_INVALID_VA_ADDRESS;
> - }
> - } else {
> - offset = mgr->va_offset;
> - waste = range_min % alignment;
> - waste = waste ? alignment - waste : 0;
> - waste += range_min - offset ;
> - }
> -
> - if (waste) {
> - n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
> - n->size = waste;
> - n->offset = offset;
> - list_add(&n->list, &mgr->va_holes);
> - }
> -
> - offset += waste;
> - mgr->va_offset = size + offset;
> pthread_mutex_unlock(&mgr->bo_va_mutex);
> - return offset;
> + return AMDGPU_INVALID_VA_ADDRESS;
> }
>
> drm_private void
> amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
> {
> - struct amdgpu_bo_va_hole *hole;
> + struct amdgpu_bo_va_hole *hole, *next;
>
> if (va == AMDGPU_INVALID_VA_ADDRESS)
> return;
> @@ -274,61 +227,48 @@ amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
> size = ALIGN(size, mgr->va_alignment);
>
> pthread_mutex_lock(&mgr->bo_va_mutex);
> - if ((va + size) == mgr->va_offset) {
> - mgr->va_offset = va;
> - /* Delete uppermost hole if it reaches the new top */
> - if (!LIST_IS_EMPTY(&mgr->va_holes)) {
> - hole = container_of(mgr->va_holes.next, hole, list);
> - if ((hole->offset + hole->size) == va) {
> - mgr->va_offset = hole->offset;
> - list_del(&hole->list);
> - free(hole);
> - }
> - }
> - } else {
> - struct amdgpu_bo_va_hole *next;
>
> - hole = container_of(&mgr->va_holes, hole, list);
> - LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
> - if (next->offset < va)
> - break;
> - hole = next;
> - }
> + hole = container_of(&mgr->va_holes, hole, list);
> + LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
> + if (next->offset < va)
> + break;
> + hole = next;
> + }
>
> - if (&hole->list != &mgr->va_holes) {
> - /* Grow upper hole if it's adjacent */
> - if (hole->offset == (va + size)) {
> - hole->offset = va;
> - hole->size += size;
> - /* Merge lower hole if it's adjacent */
> - if (next != hole
> - && &next->list != &mgr->va_holes
> - && (next->offset + next->size) == va) {
> - next->size += hole->size;
> - list_del(&hole->list);
> - free(hole);
> - }
> - goto out;
> + if (&hole->list != &mgr->va_holes) {
> + /* Grow upper hole if it's adjacent */
> + if (hole->offset == (va + size)) {
> + hole->offset = va;
> + hole->size += size;
> + /* Merge lower hole if it's adjacent */
> + if (next != hole &&
> + &next->list != &mgr->va_holes &&
> + (next->offset + next->size) == va) {
> + next->size += hole->size;
> + list_del(&hole->list);
> + free(hole);
> }
> - }
> -
> - /* Grow lower hole if it's adjacent */
> - if (next != hole && &next->list != &mgr->va_holes &&
> - (next->offset + next->size) == va) {
> - next->size += size;
> goto out;
> }
> + }
>
> - /* FIXME on allocation failure we just lose virtual address space
> - * maybe print a warning
> - */
> - next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
> - if (next) {
> - next->size = size;
> - next->offset = va;
> - list_add(&next->list, &hole->list);
> - }
> + /* Grow lower hole if it's adjacent */
> + if (next != hole && &next->list != &mgr->va_holes &&
> + (next->offset + next->size) == va) {
> + next->size += size;
> + goto out;
> }
> +
> + /* FIXME on allocation failure we just lose virtual address space
> + * maybe print a warning
> + */
> + next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
> + if (next) {
> + next->size = size;
> + next->offset = va;
> + list_add(&next->list, &hole->list);
> + }
> +
> out:
> pthread_mutex_unlock(&mgr->bo_va_mutex);
> }
More information about the amd-gfx
mailing list