[PATCH] drm/amdgpu: only export available rings to mesa for enabling kq|uq

Liang, Prike Prike.Liang at amd.com
Thu May 29 02:10:55 UTC 2025


[Public]

> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> On Behalf Of Liang, Prike
> Sent: Thursday, May 29, 2025 9:48 AM
> To: Koenig, Christian <Christian.Koenig at amd.com>; amd-gfx at lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
> Subject: RE: [PATCH] drm/amdgpu: only export available rings to mesa for enabling
> kq|uq
>
> [Public]
>
> [Public]
>
> > From: Koenig, Christian <Christian.Koenig at amd.com>
> > Sent: Wednesday, May 28, 2025 7:21 PM
> > To: Liang, Prike <Prike.Liang at amd.com>; amd-gfx at lists.freedesktop.org
> > Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
> > Subject: Re: [PATCH] drm/amdgpu: only export available rings to mesa
> > for enabling
> > kq|uq
> >
> > On 5/28/25 10:37, Prike Liang wrote:
> > > The kernel driver only requires exporting available rings to the
> > > mesa when the userq is disabled; otherwise, the userq IP mask will
> > > be cleaned up in the mesa.
> >
> > Hui? That doesn't sounds correct to me.
> >
> > That userq is disable in mesa when kernel queues are available is
> > intentionally for now.

Why does it need to disable UQ when the kernel queue is being enabled? If so, then how does the driver support the kernel queue and user queue enabled simultaneously?

Thanks,
Prike
>
> Currently, when the kernel driver sets user_queue to 1 for enabling the kernel queue
> and user queue, but the user queue IP mask will be cleaned up in the mesa, so the
> driver only supports the kernel queue and can't enable user queue simultaneously.
>
> Thanks,
> Prike
>
> > Regards,
> > Christian.
> >
> > >
> > > Signed-off-by: Prike Liang <Prike.Liang at amd.com>
> > > ---
> > >  drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 20 ++++++++++----------
> > >  1 file changed, 10 insertions(+), 10 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > > b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > > index d2ce7d86dbc8..43d86c09d8bb 100644
> > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
> > > @@ -409,7 +409,7 @@ static int amdgpu_hw_ip_info(struct
> > > amdgpu_device
> > *adev,
> > >             type = AMD_IP_BLOCK_TYPE_GFX;
> > >             for (i = 0; i < adev->gfx.num_gfx_rings; i++)
> > >                     if (adev->gfx.gfx_ring[i].sched.ready &&
> > > -                       !adev->gfx.gfx_ring[i].no_user_submission)
> > > +                       adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             ib_start_alignment = 32;
> > >             ib_size_alignment = 32;
> > > @@ -418,7 +418,7 @@ static int amdgpu_hw_ip_info(struct
> > > amdgpu_device
> > *adev,
> > >             type = AMD_IP_BLOCK_TYPE_GFX;
> > >             for (i = 0; i < adev->gfx.num_compute_rings; i++)
> > >                     if (adev->gfx.compute_ring[i].sched.ready &&
> > > -                       !adev->gfx.compute_ring[i].no_user_submission)
> > > +                       adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             ib_start_alignment = 32;
> > >             ib_size_alignment = 32;
> > > @@ -427,7 +427,7 @@ static int amdgpu_hw_ip_info(struct
> > > amdgpu_device
> > *adev,
> > >             type = AMD_IP_BLOCK_TYPE_SDMA;
> > >             for (i = 0; i < adev->sdma.num_instances; i++)
> > >                     if (adev->sdma.instance[i].ring.sched.ready &&
> > > -                       !adev->sdma.instance[i].ring.no_user_submission)
> > > +                           adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             ib_start_alignment = 256;
> > >             ib_size_alignment = 4;
> > > @@ -439,7 +439,7 @@ static int amdgpu_hw_ip_info(struct
> > > amdgpu_device
> > *adev,
> > >                             continue;
> > >
> > >                     if (adev->uvd.inst[i].ring.sched.ready &&
> > > -                       !adev->uvd.inst[i].ring.no_user_submission)
> > > +                       adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             }
> > >             ib_start_alignment = 256; @@ -449,7 +449,7 @@ static int
> > > amdgpu_hw_ip_info(struct amdgpu_device
> > *adev,
> > >             type = AMD_IP_BLOCK_TYPE_VCE;
> > >             for (i = 0; i < adev->vce.num_rings; i++)
> > >                     if (adev->vce.ring[i].sched.ready &&
> > > -                       !adev->vce.ring[i].no_user_submission)
> > > +                       adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             ib_start_alignment = 256;
> > >             ib_size_alignment = 4;
> > > @@ -462,7 +462,7 @@ static int amdgpu_hw_ip_info(struct
> > > amdgpu_device *adev,
> > >
> > >                     for (j = 0; j < adev->uvd.num_enc_rings; j++)
> > >                             if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
> > > -                               !adev->uvd.inst[i].ring_enc[j].no_user_submission)
> > > +                               adev->gfx.disable_uq)
> > >                                     ++num_rings;
> > >             }
> > >             ib_start_alignment = 256; @@ -475,7 +475,7 @@ static int
> > > amdgpu_hw_ip_info(struct amdgpu_device
> > *adev,
> > >                             continue;
> > >
> > >                     if (adev->vcn.inst[i].ring_dec.sched.ready &&
> > > -                       !adev->vcn.inst[i].ring_dec.no_user_submission)
> > > +                       adev->gfx.disable_uq)
> > >                             ++num_rings;
> > >             }
> > >             ib_start_alignment = 256; @@ -489,7 +489,7 @@ static int
> > > amdgpu_hw_ip_info(struct amdgpu_device *adev,
> > >
> > >                     for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
> > >                             if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
> > > -                               !adev->vcn.inst[i].ring_enc[j].no_user_submission)
> > > +                               adev->gfx.disable_uq)
> > >                                     ++num_rings;
> > >             }
> > >             ib_start_alignment = 256; @@ -505,7 +505,7 @@ static int
> > > amdgpu_hw_ip_info(struct amdgpu_device *adev,
> > >
> > >                     for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
> > >                             if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
> > > -                               !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
> > > +                               adev->gfx.disable_uq)
> > >                                     ++num_rings;
> > >             }
> > >             ib_start_alignment = 256; @@ -514,7 +514,7 @@ static int
> > > amdgpu_hw_ip_info(struct amdgpu_device
> > *adev,
> > >     case AMDGPU_HW_IP_VPE:
> > >             type = AMD_IP_BLOCK_TYPE_VPE;
> > >             if (adev->vpe.ring.sched.ready &&
> > > -               !adev->vpe.ring.no_user_submission)
> > > +               adev->gfx.disable_uq)
> > >                     ++num_rings;
> > >             ib_start_alignment = 256;
> > >             ib_size_alignment = 4;



More information about the amd-gfx mailing list