[PATCH] drm/amdgpu: implement harvesting support for UVD 7.2 (v2)
Alex Deucher
alexdeucher at gmail.com
Wed Jul 25 21:02:33 UTC 2018
On Wed, Jul 25, 2018 at 4:21 PM, James Zhu <jamesz at amd.com> wrote:
>
>
> On 2018-07-23 05:53 PM, Alex Deucher wrote:
>>
>> Properly handle cases where one or more instance of the IP
>> block may be harvested.
>>
>> v2: make sure ip_num_rings is initialized amdgpu_queue_mgr.c
>>
>> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
>> ---
>> drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 ++++-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 13 +++++--
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 11 +++++-
>> drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 5 +++
>> drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 56
>> +++++++++++++++++++++++++--
>> 5 files changed, 86 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> index 258b6f73cbdf..f4d379cd4e47 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
>> @@ -348,8 +348,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev,
>> void *data, struct drm_file
>> break;
>> case AMDGPU_HW_IP_UVD:
>> type = AMD_IP_BLOCK_TYPE_UVD;
>> - for (i = 0; i < adev->uvd.num_uvd_inst; i++)
>> + for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
>> + if (adev->uvd.harvest_config & (1 << i))
>> + continue;
>> ring_mask |=
>> ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
>> + }
>> ib_start_alignment = 64;
>> ib_size_alignment = 64;
>> break;
>> @@ -362,11 +365,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev,
>> void *data, struct drm_file
>> break;
>> case AMDGPU_HW_IP_UVD_ENC:
>> type = AMD_IP_BLOCK_TYPE_UVD;
>> - for (i = 0; i < adev->uvd.num_uvd_inst; i++)
>> + for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
>> + if (adev->uvd.harvest_config & (1 << i))
>> + continue;
>> for (j = 0; j < adev->uvd.num_enc_rings;
>> j++)
>> ring_mask |=
>>
>> ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
>> (j + i *
>> adev->uvd.num_enc_rings));
>> + }
>> ib_start_alignment = 64;
>> ib_size_alignment = 64;
>> break;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
>> index ea9850c9224d..c59d3a2af388 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
>> @@ -219,7 +219,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
>> u32 hw_ip, u32 instance, u32 ring,
>> struct amdgpu_ring **out_ring)
>> {
>> - int r, ip_num_rings;
>> + int i, r, ip_num_rings = 0;
>> struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
>> if (!adev || !mgr || !out_ring)
>> @@ -248,14 +248,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
>> ip_num_rings = adev->sdma.num_instances;
>> break;
>> case AMDGPU_HW_IP_UVD:
>> - ip_num_rings = adev->uvd.num_uvd_inst;
>> + for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
>> + if (!(adev->uvd.harvest_config & (1 << i)))
>> + ip_num_rings++;
>> + }
>> break;
>> case AMDGPU_HW_IP_VCE:
>> ip_num_rings = adev->vce.num_rings;
>> break;
>> case AMDGPU_HW_IP_UVD_ENC:
>> + for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
>> + if (!(adev->uvd.harvest_config & (1 << i)))
>> + ip_num_rings++;
>> + }
>> ip_num_rings =
>> - adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
>> + adev->uvd.num_enc_rings * ip_num_rings;
>> break;
>> case AMDGPU_HW_IP_VCN_DEC:
>> ip_num_rings = 1;
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> index 80b5c453f8c1..a07548c99ab8 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
>> @@ -255,7 +255,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
>> bo_size +=
>> AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
>> for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
>> -
>> + if (adev->uvd.harvest_config & (1 << j))
>> + continue;
>> r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
>> AMDGPU_GEM_DOMAIN_VRAM,
>> &adev->uvd.inst[j].vcpu_bo,
>> &adev->uvd.inst[j].gpu_addr,
>> &adev->uvd.inst[j].cpu_addr);
>> @@ -309,6 +310,8 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
>> &adev->uvd.entity);
>> for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
>> + if (adev->uvd.harvest_config & (1 << j))
>> + continue;
>> kfree(adev->uvd.inst[j].saved_bo);
>> amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
>> @@ -344,6 +347,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
>> }
>> for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
>> + if (adev->uvd.harvest_config & (1 << j))
>> + continue;
>> if (adev->uvd.inst[j].vcpu_bo == NULL)
>> continue;
>> @@ -366,6 +371,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
>> int i;
>> for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
>> + if (adev->uvd.harvest_config & (1 << i))
>> + continue;
>> if (adev->uvd.inst[i].vcpu_bo == NULL)
>> return -EINVAL;
>> @@ -1160,6 +1167,8 @@ static void amdgpu_uvd_idle_work_handler(struct
>> work_struct *work)
>> unsigned fences = 0, i, j;
>> for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
>> + if (adev->uvd.harvest_config & (1 << i))
>> + continue;
>> fences +=
>> amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
>> for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
>> fences +=
>> amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
>> b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
>> index 66872286ab12..9cf42454ba81 100644
>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
>> @@ -46,8 +46,12 @@ struct amdgpu_uvd_inst {
>> struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
>> struct amdgpu_irq_src irq;
>> uint32_t srbm_soft_reset;
>
> I didn't see this patch use this new member : instance
Whoops, yes leftover from an earlier revision. Removed and rebased on
Christian's latest changes. I'll send a v3 shortly.
Alex
More information about the amd-gfx
mailing list