[PATCH 2/7] drm/amdgpu/userq: optimize enforce isolation and s/r
Liang, Prike
Prike.Liang at amd.com
Fri Apr 18 13:04:43 UTC 2025
[Public]
Reviewed-by: Prike Liang <Prike.Liang at amd.com>
Regards,
Prike
> -----Original Message-----
> From: amd-gfx <amd-gfx-bounces at lists.freedesktop.org> On Behalf Of Alex
> Deucher
> Sent: Thursday, April 17, 2025 6:21 AM
> To: amd-gfx at lists.freedesktop.org
> Cc: Deucher, Alexander <Alexander.Deucher at amd.com>
> Subject: [PATCH 2/7] drm/amdgpu/userq: optimize enforce isolation and s/r
>
> If user queues are disabled for all IPs in the case of suspend and resume and for
> gfx/compute in the case of enforce isolation, we can return early.
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c | 18 ++++++++++++++++++
> 1 file changed, 18 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> index dbc1fcdc0f73b..04583f9d134f1 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
> @@ -758,12 +758,16 @@ void amdgpu_userq_mgr_fini(struct
> amdgpu_userq_mgr *userq_mgr)
>
> int amdgpu_userq_suspend(struct amdgpu_device *adev) {
> + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
> const struct amdgpu_userq_funcs *userq_funcs;
> struct amdgpu_usermode_queue *queue;
> struct amdgpu_userq_mgr *uqm, *tmp;
> int queue_id;
> int ret = 0;
>
> + if (!ip_mask)
> + return 0;
> +
> mutex_lock(&adev->userq_mutex);
> list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
> cancel_delayed_work_sync(&uqm->resume_work);
> @@ -778,12 +782,16 @@ int amdgpu_userq_suspend(struct amdgpu_device
> *adev)
>
> int amdgpu_userq_resume(struct amdgpu_device *adev) {
> + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
> const struct amdgpu_userq_funcs *userq_funcs;
> struct amdgpu_usermode_queue *queue;
> struct amdgpu_userq_mgr *uqm, *tmp;
> int queue_id;
> int ret = 0;
>
> + if (!ip_mask)
> + return 0;
> +
> mutex_lock(&adev->userq_mutex);
> list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
> idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { @@ -
> 798,12 +806,17 @@ int amdgpu_userq_resume(struct amdgpu_device *adev) int
> amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
> u32 idx)
> {
> + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
> const struct amdgpu_userq_funcs *userq_funcs;
> struct amdgpu_usermode_queue *queue;
> struct amdgpu_userq_mgr *uqm, *tmp;
> int queue_id;
> int ret = 0;
>
> + /* only need to stop gfx/compute */
> + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 <<
> AMDGPU_HW_IP_COMPUTE))))
> + return 0;
> +
> mutex_lock(&adev->userq_mutex);
> if (adev->userq_halt_for_enforce_isolation)
> dev_warn(adev->dev, "userq scheduling already stopped!\n"); @@ -
> 826,12 +839,17 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct
> amdgpu_device *adev, int
> amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
> u32 idx)
> {
> + u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
> const struct amdgpu_userq_funcs *userq_funcs;
> struct amdgpu_usermode_queue *queue;
> struct amdgpu_userq_mgr *uqm, *tmp;
> int queue_id;
> int ret = 0;
>
> + /* only need to stop gfx/compute */
> + if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 <<
> AMDGPU_HW_IP_COMPUTE))))
> + return 0;
> +
> mutex_lock(&adev->userq_mutex);
> if (!adev->userq_halt_for_enforce_isolation)
> dev_warn(adev->dev, "userq scheduling already started!\n");
> --
> 2.49.0
More information about the amd-gfx
mailing list