[PATCH 2/7] drm/amdgpu: kiq pm4 function implementation for gfx_v9

Felix Kuehling felix.kuehling at amd.com
Mon Jan 13 16:57:49 UTC 2020


On 2020-01-11 1:39 p.m., Alex Sierra wrote:
> Functions implemented from kiq_pm4_funcs struct members
> for gfx_v9 version.
>
> Change-Id: I8fd3e160c4bd58f19d35d29e39517db967063afe
> Signed-off-by: Alex Sierra <alex.sierra at amd.com>

Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>


> ---
>   drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 115 ++++++++++++++++++++++++++
>   1 file changed, 115 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> index e3d466bd5c4e..ad0179ea2cc5 100644
> --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
> @@ -739,6 +739,120 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
>   static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
>   				     void *inject_if);
>   
> +static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
> +				uint64_t queue_mask)
> +{
> +	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
> +	amdgpu_ring_write(kiq_ring,
> +		PACKET3_SET_RESOURCES_VMID_MASK(0) |
> +		/* vmid_mask:0* queue_type:0 (KIQ) */
> +		PACKET3_SET_RESOURCES_QUEUE_TYPE(0));
> +	amdgpu_ring_write(kiq_ring,
> +			lower_32_bits(queue_mask));	/* queue mask lo */
> +	amdgpu_ring_write(kiq_ring,
> +			upper_32_bits(queue_mask));	/* queue mask hi */
> +	amdgpu_ring_write(kiq_ring, 0);	/* gws mask lo */
> +	amdgpu_ring_write(kiq_ring, 0);	/* gws mask hi */
> +	amdgpu_ring_write(kiq_ring, 0);	/* oac mask */
> +	amdgpu_ring_write(kiq_ring, 0);	/* gds heap base:0, gds heap size:0 */
> +}
> +
> +static void gfx_v9_0_kiq_map_queues(struct amdgpu_ring *kiq_ring,
> +				 struct amdgpu_ring *ring)
> +{
> +	struct amdgpu_device *adev = kiq_ring->adev;
> +	uint64_t mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
> +	uint64_t wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
> +	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
> +
> +	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
> +	/* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/
> +	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
> +			 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
> +			 PACKET3_MAP_QUEUES_VMID(0) | /* VMID */
> +			 PACKET3_MAP_QUEUES_QUEUE(ring->queue) |
> +			 PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
> +			 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
> +			 /*queue_type: normal compute queue */
> +			 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) |
> +			 /* alloc format: all_on_one_pipe */
> +			 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) |
> +			 PACKET3_MAP_QUEUES_ENGINE_SEL(eng_sel) |
> +			 /* num_queues: must be 1 */
> +			 PACKET3_MAP_QUEUES_NUM_QUEUES(1));
> +	amdgpu_ring_write(kiq_ring,
> +			PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
> +	amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr));
> +	amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr));
> +	amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr));
> +	amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr));
> +}
> +
> +static void gfx_v9_0_kiq_unmap_queues(struct amdgpu_ring *kiq_ring,
> +				   struct amdgpu_ring *ring,
> +				   enum amdgpu_unmap_queues_action action,
> +				   u64 gpu_addr, u64 seq)
> +{
> +	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
> +
> +	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
> +	amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
> +			  PACKET3_UNMAP_QUEUES_ACTION(action) |
> +			  PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
> +			  PACKET3_UNMAP_QUEUES_ENGINE_SEL(eng_sel) |
> +			  PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
> +	amdgpu_ring_write(kiq_ring,
> +			PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
> +
> +	if (action == PREEMPT_QUEUES_NO_UNMAP) {
> +		amdgpu_ring_write(kiq_ring, lower_32_bits(gpu_addr));
> +		amdgpu_ring_write(kiq_ring, upper_32_bits(gpu_addr));
> +		amdgpu_ring_write(kiq_ring, seq);
> +	} else {
> +		amdgpu_ring_write(kiq_ring, 0);
> +		amdgpu_ring_write(kiq_ring, 0);
> +		amdgpu_ring_write(kiq_ring, 0);
> +	}
> +}
> +
> +static void gfx_v9_0_kiq_query_status(struct amdgpu_ring *kiq_ring,
> +				   struct amdgpu_ring *ring,
> +				   u64 addr,
> +				   u64 seq)
> +{
> +	uint32_t eng_sel = ring->funcs->type == AMDGPU_RING_TYPE_GFX ? 4 : 0;
> +
> +	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_QUERY_STATUS, 5));
> +	amdgpu_ring_write(kiq_ring,
> +			  PACKET3_QUERY_STATUS_CONTEXT_ID(0) |
> +			  PACKET3_QUERY_STATUS_INTERRUPT_SEL(0) |
> +			  PACKET3_QUERY_STATUS_COMMAND(2));
> +	/* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
> +	amdgpu_ring_write(kiq_ring,
> +			PACKET3_QUERY_STATUS_DOORBELL_OFFSET(ring->doorbell_index) |
> +			PACKET3_QUERY_STATUS_ENG_SEL(eng_sel));
> +	amdgpu_ring_write(kiq_ring, lower_32_bits(addr));
> +	amdgpu_ring_write(kiq_ring, upper_32_bits(addr));
> +	amdgpu_ring_write(kiq_ring, lower_32_bits(seq));
> +	amdgpu_ring_write(kiq_ring, upper_32_bits(seq));
> +}
> +
> +static const struct kiq_pm4_funcs gfx_v9_0_kiq_pm4_funcs = {
> +	.kiq_set_resources = gfx_v9_0_kiq_set_resources,
> +	.kiq_map_queues = gfx_v9_0_kiq_map_queues,
> +	.kiq_unmap_queues = gfx_v9_0_kiq_unmap_queues,
> +	.kiq_query_status = gfx_v9_0_kiq_query_status,
> +	.set_resources_size = 8,
> +	.map_queues_size = 7,
> +	.unmap_queues_size = 6,
> +	.query_status_size = 7,
> +};
> +
> +static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
> +{
> +	adev->gfx.kiq.pmf = &gfx_v9_0_kiq_pm4_funcs;
> +}
> +
>   static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
>   {
>   	switch (adev->asic_type) {
> @@ -4260,6 +4374,7 @@ static int gfx_v9_0_early_init(void *handle)
>   	else
>   		adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
>   	adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
> +	gfx_v9_0_set_kiq_pm4_funcs(adev);
>   	gfx_v9_0_set_ring_funcs(adev);
>   	gfx_v9_0_set_irq_funcs(adev);
>   	gfx_v9_0_set_gds_init(adev);


More information about the amd-gfx mailing list