[PATCH 08/11] drm/amdgpu/gfx12: add support for disable_kq

Khatri, Sunil sukhatri at amd.com
Thu Mar 6 11:16:47 UTC 2025


Reviewed-by: Sunil Khatri <sunil.khatri at amd.com>

On 3/6/2025 2:17 AM, Alex Deucher wrote:
> Plumb in support for disabling kernel queues.
>
> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c | 114 ++++++++++++++-----------
>   1 file changed, 65 insertions(+), 49 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> index 8a448a20774a8..8fde7b239fdbb 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
> @@ -1453,37 +1453,39 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
>   		return r;
>   	}
>   
> -	/* set up the gfx ring */
> -	for (i = 0; i < adev->gfx.me.num_me; i++) {
> -		for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
> -			for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
> -				if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
> -					continue;
> -
> -				r = gfx_v12_0_gfx_ring_init(adev, ring_id,
> -							    i, k, j);
> -				if (r)
> -					return r;
> -				ring_id++;
> +	if (!adev->gfx.disable_kq) {
> +		/* set up the gfx ring */
> +		for (i = 0; i < adev->gfx.me.num_me; i++) {
> +			for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) {
> +				for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) {
> +					if (!amdgpu_gfx_is_me_queue_enabled(adev, i, k, j))
> +						continue;
> +
> +					r = gfx_v12_0_gfx_ring_init(adev, ring_id,
> +								    i, k, j);
> +					if (r)
> +						return r;
> +					ring_id++;
> +				}
>   			}
>   		}
> -	}
> -
> -	ring_id = 0;
> -	/* set up the compute queues - allocate horizontally across pipes */
> -	for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
> -		for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
> -			for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
> -				if (!amdgpu_gfx_is_mec_queue_enabled(adev,
> -								0, i, k, j))
> -					continue;
> -
> -				r = gfx_v12_0_compute_ring_init(adev, ring_id,
> -								i, k, j);
> -				if (r)
> -					return r;
>   
> -				ring_id++;
> +		ring_id = 0;
> +		/* set up the compute queues - allocate horizontally across pipes */
> +		for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
> +			for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
> +				for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
> +					if (!amdgpu_gfx_is_mec_queue_enabled(adev,
> +									     0, i, k, j))
> +						continue;
> +
> +					r = gfx_v12_0_compute_ring_init(adev, ring_id,
> +									i, k, j);
> +					if (r)
> +						return r;
> +
> +					ring_id++;
> +				}
>   			}
>   		}
>   	}
> @@ -1572,10 +1574,12 @@ static int gfx_v12_0_sw_fini(struct amdgpu_ip_block *ip_block)
>   	int i;
>   	struct amdgpu_device *adev = ip_block->adev;
>   
> -	for (i = 0; i < adev->gfx.num_gfx_rings; i++)
> -		amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
> -	for (i = 0; i < adev->gfx.num_compute_rings; i++)
> -		amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
> +	if (!adev->gfx.disable_kq) {
> +		for (i = 0; i < adev->gfx.num_gfx_rings; i++)
> +			amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
> +		for (i = 0; i < adev->gfx.num_compute_rings; i++)
> +			amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
> +	}
>   
>   	amdgpu_gfx_mqd_sw_fini(adev, 0);
>   
> @@ -3418,6 +3422,9 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
>   	if (!amdgpu_async_gfx_ring)
>   		gfx_v12_0_cp_compute_enable(adev, true);
>   
> +	if (adev->gfx.disable_kq)
> +		return 0;
> +
>   	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
>   		ring = &adev->gfx.compute_ring[i];
>   
> @@ -3477,28 +3484,34 @@ static int gfx_v12_0_cp_resume(struct amdgpu_device *adev)
>   	if (r)
>   		return r;
>   
> -	if (!amdgpu_async_gfx_ring) {
> -		r = gfx_v12_0_cp_gfx_resume(adev);
> +	if (adev->gfx.disable_kq) {
> +		r = gfx_v12_0_cp_gfx_start(adev);
>   		if (r)
>   			return r;
>   	} else {
> -		r = gfx_v12_0_cp_async_gfx_ring_resume(adev);
> -		if (r)
> -			return r;
> -	}
> +		if (!amdgpu_async_gfx_ring) {
> +			r = gfx_v12_0_cp_gfx_resume(adev);
> +			if (r)
> +				return r;
> +		} else {
> +			r = gfx_v12_0_cp_async_gfx_ring_resume(adev);
> +			if (r)
> +				return r;
> +		}
>   
> -	for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> -		ring = &adev->gfx.gfx_ring[i];
> -		r = amdgpu_ring_test_helper(ring);
> -		if (r)
> -			return r;
> -	}
> +		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
> +			ring = &adev->gfx.gfx_ring[i];
> +			r = amdgpu_ring_test_helper(ring);
> +			if (r)
> +				return r;
> +		}
>   
> -	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> -		ring = &adev->gfx.compute_ring[i];
> -		r = amdgpu_ring_test_helper(ring);
> -		if (r)
> -			return r;
> +		for (i = 0; i < adev->gfx.num_compute_rings; i++) {
> +			ring = &adev->gfx.compute_ring[i];
> +			r = amdgpu_ring_test_helper(ring);
> +			if (r)
> +				return r;
> +		}
>   	}
>   
>   	return 0;
> @@ -3791,6 +3804,9 @@ static int gfx_v12_0_early_init(struct amdgpu_ip_block *ip_block)
>   {
>   	struct amdgpu_device *adev = ip_block->adev;
>   
> +	if (amdgpu_disable_kq == 1)
> +		adev->gfx.disable_kq = true;
> +
>   	adev->gfx.funcs = &gfx_v12_0_gfx_funcs;
>   
>   	adev->gfx.num_gfx_rings = GFX12_NUM_GFX_RINGS;


More information about the amd-gfx mailing list