[PATCH 2/3] drm/amdgpu: move gfx_v*_0_compute_queue_acquire to common code

Deucher, Alexander Alexander.Deucher at amd.com
Wed Jun 7 16:04:06 UTC 2017


> -----Original Message-----
> From: amd-gfx [mailto:amd-gfx-bounces at lists.freedesktop.org] On Behalf
> Of Tom St Denis
> Sent: Wednesday, June 07, 2017 12:01 PM
> To: amd-gfx at lists.freedesktop.org
> Subject: Re: [PATCH 2/3] drm/amdgpu: move
> gfx_v*_0_compute_queue_acquire to common code
> 
> Hi Alex,
> 
> This failed to apply on top of amd-staging-4.11... Am I trying the wrong
> branch?

Sorry, I have this patch in my tree as well which I just pushed to amd-staging-4.11:
https://patchwork.freedesktop.org/patch/160199/

Alex

> 
> Cheers,
> Tom
> 
> On 07/06/17 11:10 AM, Alex Deucher wrote:
> > Same function was duplicated in all gfx IP files.
> >
> > Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
> > ---
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 37
> +++++++++++++++++++++++++++++++
> >   drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h |  2 ++
> >   drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c   | 39 +-----------------------------
> ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c   | 39 +-----------------------------
> ---
> >   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c   | 39 +-----------------------------
> ---
> >   5 files changed, 42 insertions(+), 114 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > index 1994335..51a9708 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
> > @@ -108,3 +108,40 @@ void amdgpu_gfx_parse_disable_cu(unsigned
> *mask, unsigned max_se, unsigned max_s
> >   		p = next + 1;
> >   	}
> >   }
> > +
> > +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
> *adev)
> > +{
> > +	int i, queue, pipe, mec;
> > +
> > +	/* policy for amdgpu compute queue ownership */
> > +	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > +		queue = i % adev->gfx.mec.num_queue_per_pipe;
> > +		pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > +			% adev->gfx.mec.num_pipe_per_mec;
> > +		mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > +			/ adev->gfx.mec.num_pipe_per_mec;
> > +
> > +		/* we've run out of HW */
> > +		if (mec >= adev->gfx.mec.num_mec)
> > +			break;
> > +
> > +		if (adev->gfx.mec.num_mec > 1) {
> > +			/* policy: amdgpu owns the first two queues of the
> first MEC */
> > +			if (mec == 0 && queue < 2)
> > +				set_bit(i, adev->gfx.mec.queue_bitmap);
> > +		} else {
> > +			/* policy: amdgpu owns all queues in the first pipe */
> > +			if (mec == 0 && pipe == 0)
> > +				set_bit(i, adev->gfx.mec.queue_bitmap);
> > +		}
> > +	}
> > +
> > +	/* update the number of active compute rings */
> > +	adev->gfx.num_compute_rings =
> > +		bitmap_weight(adev->gfx.mec.queue_bitmap,
> AMDGPU_MAX_COMPUTE_QUEUES);
> > +
> > +	/* If you hit this case and edited the policy, you probably just
> > +	 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
> > +	if (WARN_ON(adev->gfx.num_compute_rings >
> AMDGPU_MAX_COMPUTE_RINGS))
> > +		adev->gfx.num_compute_rings =
> AMDGPU_MAX_COMPUTE_RINGS;
> > +}
> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > index 2d846ef..9b9ea6e 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
> > @@ -30,6 +30,8 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device
> *adev, uint32_t reg);
> >   void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
> >   		unsigned max_sh);
> >
> > +void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device
> *adev);
> > +
> >   /**
> >    * amdgpu_gfx_create_bitmask - create a bitmask
> >    *
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > index 862bc72..6ffb2da 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
> > @@ -2809,43 +2809,6 @@ static void gfx_v7_0_mec_fini(struct
> amdgpu_device *adev)
> >   	}
> >   }
> >
> > -static void gfx_v7_0_compute_queue_acquire(struct amdgpu_device
> *adev)
> > -{
> > -	int i, queue, pipe, mec;
> > -
> > -	/* policy for amdgpu compute queue ownership */
> > -	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > -		queue = i % adev->gfx.mec.num_queue_per_pipe;
> > -		pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			% adev->gfx.mec.num_pipe_per_mec;
> > -		mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			/ adev->gfx.mec.num_pipe_per_mec;
> > -
> > -		/* we've run out of HW */
> > -		if (mec >= adev->gfx.mec.num_mec)
> > -			break;
> > -
> > -		if (adev->gfx.mec.num_mec > 1) {
> > -			/* policy: amdgpu owns the first two queues of the
> first MEC */
> > -			if (mec == 0 && queue < 2)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		} else {
> > -			/* policy: amdgpu owns all queues in the first pipe */
> > -			if (mec == 0 && pipe == 0)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		}
> > -	}
> > -
> > -	/* update the number of active compute rings */
> > -	adev->gfx.num_compute_rings =
> > -		bitmap_weight(adev->gfx.mec.queue_bitmap,
> AMDGPU_MAX_COMPUTE_QUEUES);
> > -
> > -	/* If you hit this case and edited the policy, you probably just
> > -	 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
> > -	if (WARN_ON(adev->gfx.num_compute_rings >
> AMDGPU_MAX_COMPUTE_RINGS))
> > -		adev->gfx.num_compute_rings =
> AMDGPU_MAX_COMPUTE_RINGS;
> > -}
> > -
> >   static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
> >   {
> >   	int r;
> > @@ -2870,7 +2833,7 @@ static int gfx_v7_0_mec_init(struct
> amdgpu_device *adev)
> >   	adev->gfx.mec.num_queue_per_pipe = 8;
> >
> >   	/* take ownership of the relevant compute queues */
> > -	gfx_v7_0_compute_queue_acquire(adev);
> > +	amdgpu_gfx_compute_queue_acquire(adev);
> >
> >   	/* allocate space for ALL pipes (even the ones we don't own) */
> >   	mec_hpd_size = adev->gfx.mec.num_mec * adev-
> >gfx.mec.num_pipe_per_mec
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > index 1370b39..cfa37f1 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
> > @@ -1448,43 +1448,6 @@ static void gfx_v8_0_kiq_free_ring(struct
> amdgpu_ring *ring,
> >   	amdgpu_ring_fini(ring);
> >   }
> >
> > -static void gfx_v8_0_compute_queue_acquire(struct amdgpu_device
> *adev)
> > -{
> > -	int i, queue, pipe, mec;
> > -
> > -	/* policy for amdgpu compute queue ownership */
> > -	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > -		queue = i % adev->gfx.mec.num_queue_per_pipe;
> > -		pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			% adev->gfx.mec.num_pipe_per_mec;
> > -		mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			/ adev->gfx.mec.num_pipe_per_mec;
> > -
> > -		/* we've run out of HW */
> > -		if (mec >= adev->gfx.mec.num_mec)
> > -			break;
> > -
> > -		if (adev->gfx.mec.num_mec > 1) {
> > -			/* policy: amdgpu owns the first two queues of the
> first MEC */
> > -			if (mec == 0 && queue < 2)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		} else {
> > -			/* policy: amdgpu owns all queues in the first pipe */
> > -			if (mec == 0 && pipe == 0)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		}
> > -	}
> > -
> > -	/* update the number of active compute rings */
> > -	adev->gfx.num_compute_rings =
> > -		bitmap_weight(adev->gfx.mec.queue_bitmap,
> AMDGPU_MAX_COMPUTE_QUEUES);
> > -
> > -	/* If you hit this case and edited the policy, you probably just
> > -	 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
> > -	if (WARN_ON(adev->gfx.num_compute_rings >
> AMDGPU_MAX_COMPUTE_RINGS))
> > -		adev->gfx.num_compute_rings =
> AMDGPU_MAX_COMPUTE_RINGS;
> > -}
> > -
> >   static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
> >   {
> >   	int r;
> > @@ -1513,7 +1476,7 @@ static int gfx_v8_0_mec_init(struct
> amdgpu_device *adev)
> >   	adev->gfx.mec.num_queue_per_pipe = 8;
> >
> >   	/* take ownership of the relevant compute queues */
> > -	gfx_v8_0_compute_queue_acquire(adev);
> > +	amdgpu_gfx_compute_queue_acquire(adev);
> >
> >   	mec_hpd_size = adev->gfx.num_compute_rings *
> GFX8_MEC_HPD_SIZE;
> >
> > diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > index 9d675b3..3ea0e71 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> > @@ -857,43 +857,6 @@ static void gfx_v9_0_mec_fini(struct
> amdgpu_device *adev)
> >   	}
> >   }
> >
> > -static void gfx_v9_0_compute_queue_acquire(struct amdgpu_device
> *adev)
> > -{
> > -	int i, queue, pipe, mec;
> > -
> > -	/* policy for amdgpu compute queue ownership */
> > -	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
> > -		queue = i % adev->gfx.mec.num_queue_per_pipe;
> > -		pipe = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			% adev->gfx.mec.num_pipe_per_mec;
> > -		mec = (i / adev->gfx.mec.num_queue_per_pipe)
> > -			/ adev->gfx.mec.num_pipe_per_mec;
> > -
> > -		/* we've run out of HW */
> > -		if (mec >= adev->gfx.mec.num_mec)
> > -			break;
> > -
> > -		if (adev->gfx.mec.num_mec > 1) {
> > -			/* policy: amdgpu owns the first two queues of the
> first MEC */
> > -			if (mec == 0 && queue < 2)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		} else {
> > -			/* policy: amdgpu owns all queues in the first pipe */
> > -			if (mec == 0 && pipe == 0)
> > -				set_bit(i, adev->gfx.mec.queue_bitmap);
> > -		}
> > -	}
> > -
> > -	/* update the number of active compute rings */
> > -	adev->gfx.num_compute_rings =
> > -		bitmap_weight(adev->gfx.mec.queue_bitmap,
> AMDGPU_MAX_COMPUTE_QUEUES);
> > -
> > -	/* If you hit this case and edited the policy, you probably just
> > -	 * need to increase AMDGPU_MAX_COMPUTE_RINGS */
> > -	if (WARN_ON(adev->gfx.num_compute_rings >
> AMDGPU_MAX_COMPUTE_RINGS))
> > -		adev->gfx.num_compute_rings =
> AMDGPU_MAX_COMPUTE_RINGS;
> > -}
> > -
> >   static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
> >   {
> >   	int r;
> > @@ -920,7 +883,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device
> *adev)
> >   	adev->gfx.mec.num_queue_per_pipe = 8;
> >
> >   	/* take ownership of the relevant compute queues */
> > -	gfx_v9_0_compute_queue_acquire(adev);
> > +	amdgpu_gfx_compute_queue_acquire(adev);
> >   	mec_hpd_size = adev->gfx.num_compute_rings *
> GFX9_MEC_HPD_SIZE;
> >
> >   	if (adev->gfx.mec.hpd_eop_obj == NULL) {
> >
> 
> _______________________________________________
> amd-gfx mailing list
> amd-gfx at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/amd-gfx


More information about the amd-gfx mailing list