[v5 5/5] drm/amdgpu: fix invalid memory access in amdgpu_fence_driver_sw_fini()

Christian König christian.koenig at amd.com
Mon Jan 20 09:04:32 UTC 2025


Am 17.01.25 um 08:55 schrieb Jiang Liu:
> Introduce amdgpu_device_fini_schedulers() to clean scheduler related
> resources, and avoid possible invalid memory access.
>
> Signed-off-by: Jiang Liu <gerry at linux.alibaba.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 35 +++++++++++++++++++---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c  |  9 ------
>   2 files changed, 31 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index ee695e70fb4f..dd94f87c1f7c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2821,6 +2821,26 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
>   	return r;
>   }
>   
> +static void __amdgpu_device_fini_schedulers(struct amdgpu_device *adev,
> +					    int num_rings)
> +{
> +	int i;
> +
> +	for (i = 0; i < num_rings; ++i) {
> +		struct amdgpu_ring *ring = adev->rings[i];
> +
> +		if (!ring || ring->no_scheduler)
> +			continue;
> +
> +		drm_sched_fini(&ring->sched);
> +	}
> +}
> +
> +static void amdgpu_device_fini_schedulers(struct amdgpu_device *adev)
> +{
> +	__amdgpu_device_fini_schedulers(adev, AMDGPU_MAX_RINGS);
> +}
> +
>   static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
>   {
>   	long timeout;
> @@ -2857,27 +2877,32 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
>   		if (r) {
>   			DRM_ERROR("Failed to create scheduler on ring %s.\n",
>   				  ring->name);
> -			return r;
> +			goto out_err;
>   		}
>   		r = amdgpu_uvd_entity_init(adev, ring);
>   		if (r) {
>   			DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
>   				  ring->name);
> -			return r;
> +			goto out_sched_fini;
>   		}
>   		r = amdgpu_vce_entity_init(adev, ring);
>   		if (r) {
>   			DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
>   				  ring->name);
> -			return r;
> +			goto out_sched_fini;
>   		}
>   	}
>   
>   	amdgpu_xcp_update_partition_sched_list(adev);
>   
>   	return 0;
> -}
>   
> +out_sched_fini:
> +	drm_sched_fini(&adev->rings[i]->sched);
> +out_err:
> +	__amdgpu_device_fini_schedulers(adev, i);

I would just open code that as:

while (i--)
     if (adev->rings[i] && !adev->rings[i]->no_scheduler)
         drm_sched_fini(adev->rings[i]);

instead of delegating that to the __amdgpu_device_fini_schedulers() 
function.

That makes code maintenance easier should anything more need to be added 
to the amdgpu_device_fini_schedulers() function in the future.

Regards,
Christian.

> +	return r;
> +}
>   
>   /**
>    * amdgpu_device_ip_init - run init for hardware IPs
> @@ -3424,6 +3449,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
>   
>   	amdgpu_amdkfd_device_fini_sw(adev);
>   
> +	amdgpu_device_fini_schedulers(adev);
> +
>   	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
>   		if (!adev->ip_blocks[i].status.sw)
>   			continue;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 2f24a6aa13bf..c95895a7b888 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -650,15 +650,6 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
>   		if (!ring || !ring->fence_drv.initialized)
>   			continue;
>   
> -		/*
> -		 * Notice we check for sched.ops since there's some
> -		 * override on the meaning of sched.ready by amdgpu.
> -		 * The natural check would be sched.ready, which is
> -		 * set as drm_sched_init() finishes...
> -		 */
> -		if (ring->sched.ops)
> -			drm_sched_fini(&ring->sched);
> -
>   		for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
>   			dma_fence_put(ring->fence_drv.fences[j]);
>   		kfree(ring->fence_drv.fences);



More information about the amd-gfx mailing list