[v6 5/5] drm/amdgpu: fix invalid memory access in amdgpu_fence_driver_sw_fini()
Christian König
christian.koenig at amd.com
Fri Jan 24 14:16:36 UTC 2025
Am 24.01.25 um 06:19 schrieb Jiang Liu:
> Introduce amdgpu_device_fini_schedulers() to clean scheduler related
> resources, and avoid possible invalid memory access.
>
> Signed-off-by: Jiang Liu <gerry at linux.alibaba.com>
I can't say much about the RAS stuff but that patch here is Reviewed-by:
Christian König <christian.koenig at amd.com>.
Alex will probably pick the patch up as soon as the series is fully
reviewed, if not just ping me and I will push it.
Thanks,
Christian.
> ---
> drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 29 +++++++++++++++++++---
> drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 9 -------
> 2 files changed, 26 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index ee695e70fb4f..1619bd2473c2 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2857,27 +2857,48 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
> if (r) {
> DRM_ERROR("Failed to create scheduler on ring %s.\n",
> ring->name);
> - return r;
> + goto out_err;
> }
> r = amdgpu_uvd_entity_init(adev, ring);
> if (r) {
> DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
> ring->name);
> - return r;
> + goto out_sched_fini;
> }
> r = amdgpu_vce_entity_init(adev, ring);
> if (r) {
> DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
> ring->name);
> - return r;
> + goto out_sched_fini;
> }
> }
>
> amdgpu_xcp_update_partition_sched_list(adev);
>
> return 0;
> +
> +out_sched_fini:
> + drm_sched_fini(&adev->rings[i]->sched);
> +out_err:
> + while (i--)
> + if (adev->rings[i] && !adev->rings[i]->no_scheduler)
> + drm_sched_fini(&adev->rings[i]->sched);
> + return r;
> }
>
> +static void amdgpu_device_fini_schedulers(struct amdgpu_device *adev)
> +{
> + int i;
> +
> + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
> + struct amdgpu_ring *ring = adev->rings[i];
> +
> + if (!ring || ring->no_scheduler)
> + continue;
> +
> + drm_sched_fini(&ring->sched);
> + }
> +}
>
> /**
> * amdgpu_device_ip_init - run init for hardware IPs
> @@ -3424,6 +3445,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
>
> amdgpu_amdkfd_device_fini_sw(adev);
>
> + amdgpu_device_fini_schedulers(adev);
> +
> for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
> if (!adev->ip_blocks[i].status.sw)
> continue;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> index 2f24a6aa13bf..c95895a7b888 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
> @@ -650,15 +650,6 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
> if (!ring || !ring->fence_drv.initialized)
> continue;
>
> - /*
> - * Notice we check for sched.ops since there's some
> - * override on the meaning of sched.ready by amdgpu.
> - * The natural check would be sched.ready, which is
> - * set as drm_sched_init() finishes...
> - */
> - if (ring->sched.ops)
> - drm_sched_fini(&ring->sched);
> -
> for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
> dma_fence_put(ring->fence_drv.fences[j]);
> kfree(ring->fence_drv.fences);
More information about the amd-gfx
mailing list