[PATCH 4/4] drm/amdgpu: use a macro to define no xcp partition case
Christian König
ckoenig.leichtzumerken at gmail.com
Tue Aug 8 06:30:24 UTC 2023
Am 18.07.23 um 07:13 schrieb Guchun Chen:
> ~0 as no xcp partition is used in several places, so improve its
> definition by a macro for code consistency.
>
> Suggested-by: Christian König <christian.koenig at amd.com>
> Signed-off-by: Guchun Chen <guchun.chen at amd.com>
> Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
I guess you already pushed this, so just for the record: Reviewed-by:
Christian König <christian.koenig at amd.com>.
I need to get faster catching up on mails,
Christian.
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++-
> drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 4 ++--
> drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 2 ++
> drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 4 ++--
> 4 files changed, 8 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> index a7f314ddd173..d34c3ef8f3ed 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
> @@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
> alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
> AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
> }
> - xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
> + xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
> + 0 : fpriv->xcp_id;
> } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
> domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
> alloc_flags = 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
> index d175e862f222..9c9cca129498 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
> @@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
> if (!adev->xcp_mgr)
> return 0;
>
> - fpriv->xcp_id = ~0;
> + fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
> for (i = 0; i < MAX_XCP; ++i) {
> if (!adev->xcp_mgr->xcp[i].ddev)
> break;
> @@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
> }
> }
>
> - fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
> + fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
> adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
> return 0;
> }
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
> index 0f8026d64ea5..9a1036aeec2a 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
> @@ -37,6 +37,8 @@
> #define AMDGPU_XCP_FL_NONE 0
> #define AMDGPU_XCP_FL_LOCKED (1 << 0)
>
> +#define AMDGPU_XCP_NO_PARTITION (~0)
> +
> struct amdgpu_fpriv;
>
> enum AMDGPU_XCP_IP_BLOCK {
> diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
> index 16471b81a1f5..72b629a78c62 100644
> --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
> +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
> @@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
> enum AMDGPU_XCP_IP_BLOCK ip_blk;
> uint32_t inst_mask;
>
> - ring->xcp_id = ~0;
> + ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
> if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
> return;
>
> @@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
> u32 sel_xcp_id;
> int i;
>
> - if (fpriv->xcp_id == ~0) {
> + if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
> u32 least_ref_cnt = ~0;
>
> fpriv->xcp_id = 0;
More information about the amd-gfx
mailing list