[PATCH 1/2] drm/amdgpu/vcn: identify unified queue in sw init

Alex Deucher alexdeucher at gmail.com
Wed Jul 10 18:17:31 UTC 2024


On Wed, Jul 10, 2024 at 2:10 PM <boyuan.zhang at amd.com> wrote:
>
> From: Boyuan Zhang <boyuan.zhang at amd.com>
>
> Determine whether VCN using unified queue in sw_init, instead of calling
> functions later on.
>
> Signed-off-by: Boyuan Zhang <boyuan.zhang at amd.com>
> ---
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 39 ++++++++++---------------
>  drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h |  1 +
>  2 files changed, 16 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> index dad5f9722e14..43bed7730bd1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
> @@ -139,6 +139,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
>                 }
>         }
>
> +       /* from vcn4 and above, only unified queue is used */
> +       adev->vcn.using_unified_queue =
> +               amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0) ? true : false;

You can drop the "? true : false" part.  A lot of static checkers will
complain about that as it's not necessary.

With that fixed, the series is:
Acked-by: Alex Deucher <alexander.deucher at amd.com>

> +
>         hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
>         adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
>
> @@ -266,18 +270,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
>         return 0;
>  }
>
> -/* from vcn4 and above, only unified queue is used */
> -static bool amdgpu_vcn_using_unified_queue(struct amdgpu_ring *ring)
> -{
> -       struct amdgpu_device *adev = ring->adev;
> -       bool ret = false;
> -
> -       if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0))
> -               ret = true;
> -
> -       return ret;
> -}
> -
>  bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
>  {
>         bool ret = false;
> @@ -747,12 +739,11 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>         struct amdgpu_job *job;
>         struct amdgpu_ib *ib;
>         uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
> -       bool sq = amdgpu_vcn_using_unified_queue(ring);
>         uint32_t *ib_checksum;
>         uint32_t ib_pack_in_dw;
>         int i, r;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 ib_size_dw += 8;
>
>         r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
> @@ -765,7 +756,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>         ib->length_dw = 0;
>
>         /* single queue headers */
> -       if (sq) {
> +       if (adev->vcn.using_unified_queue) {
>                 ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
>                                                 + 4 + 2; /* engine info + decoding ib in dw */
>                 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
> @@ -784,7 +775,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
>         for (i = ib->length_dw; i < ib_size_dw; ++i)
>                 ib->ptr[i] = 0x0;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
>
>         r = amdgpu_job_submit_direct(job, ring, &f);
> @@ -874,15 +865,15 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>                                          struct dma_fence **fence)
>  {
>         unsigned int ib_size_dw = 16;
> +       struct amdgpu_device *adev = ring->adev;
>         struct amdgpu_job *job;
>         struct amdgpu_ib *ib;
>         struct dma_fence *f = NULL;
>         uint32_t *ib_checksum = NULL;
>         uint64_t addr;
> -       bool sq = amdgpu_vcn_using_unified_queue(ring);
>         int i, r;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 ib_size_dw += 8;
>
>         r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
> @@ -896,7 +887,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>
>         ib->length_dw = 0;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
>
>         ib->ptr[ib->length_dw++] = 0x00000018;
> @@ -918,7 +909,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
>         for (i = ib->length_dw; i < ib_size_dw; ++i)
>                 ib->ptr[i] = 0x0;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
>
>         r = amdgpu_job_submit_direct(job, ring, &f);
> @@ -941,15 +932,15 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>                                           struct dma_fence **fence)
>  {
>         unsigned int ib_size_dw = 16;
> +       struct amdgpu_device *adev = ring->adev;
>         struct amdgpu_job *job;
>         struct amdgpu_ib *ib;
>         struct dma_fence *f = NULL;
>         uint32_t *ib_checksum = NULL;
>         uint64_t addr;
> -       bool sq = amdgpu_vcn_using_unified_queue(ring);
>         int i, r;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 ib_size_dw += 8;
>
>         r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
> @@ -963,7 +954,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>
>         ib->length_dw = 0;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
>
>         ib->ptr[ib->length_dw++] = 0x00000018;
> @@ -985,7 +976,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
>         for (i = ib->length_dw; i < ib_size_dw; ++i)
>                 ib->ptr[i] = 0x0;
>
> -       if (sq)
> +       if (adev->vcn.using_unified_queue)
>                 amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
>
>         r = amdgpu_job_submit_direct(job, ring, &f);
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> index 081695e74932..838c0935f683 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
> @@ -329,6 +329,7 @@ struct amdgpu_vcn {
>
>         uint16_t inst_mask;
>         uint8_t num_inst_per_aid;
> +       bool using_unified_queue;
>  };
>
>  struct amdgpu_fw_shared_rb_ptrs_struct {
> --
> 2.34.1
>


More information about the amd-gfx mailing list