[PATCH 2/2] drm/amdkfd: convert misc checks to IP version checking

Felix Kuehling felix.kuehling at amd.com
Mon Nov 8 22:21:16 UTC 2021


Am 2021-11-05 um 3:58 p.m. schrieb Graham Sider:
> Switch to IP version checking instead of asic_type on various KFD
> version checks.
>
> Signed-off-by: Graham Sider <Graham.Sider at amd.com>
> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c      |  4 ++--
>  drivers/gpu/drm/amd/amdkfd/kfd_crat.c         |  2 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_device.c       | 24 ++++++++++---------
>  .../drm/amd/amdkfd/kfd_device_queue_manager.c |  6 ++---
>  .../amd/amdkfd/kfd_device_queue_manager_v9.c  |  2 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_events.c       |  6 +++--
>  drivers/gpu/drm/amd/amdkfd/kfd_migrate.c      |  2 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_priv.h         |  2 +-
>  drivers/gpu/drm/amd/amdkfd/kfd_process.c      |  8 +++----
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c          |  6 ++---
>  drivers/gpu/drm/amd/amdkfd/kfd_topology.c     |  4 ++--
>  11 files changed, 35 insertions(+), 31 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index 2e3d74f7fbfb..f66c78fda5be 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
>  	/* Return gpu_id as doorbell offset for mmap usage */
>  	args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
>  	args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
> -	if (KFD_IS_SOC15(dev->device_info->asic_family))
> +	if (KFD_IS_SOC15(dev->adev->ip_versions[GC_HWIP][0]))

Given the way this is used, you can probably change the definition of
KFD_IS_SOC15 to take "dev" as its parameter. It saves you some typing.
Or better yet, replace it with a more general macro you can use in the
other places as well:

#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])

...

    if (KFD_GC_VERSION(dev) >= IP_VERSION(9,0,1)) {
        ...

Regards,
  Felix


>  		/* On SOC15 ASICs, include the doorbell offset within the
>  		 * process doorbell frame, which is 2 pages.
>  		 */
> @@ -1603,7 +1603,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
>  	}
>  	mutex_unlock(&p->mutex);
>  
> -	if (dev->device_info->asic_family == CHIP_ALDEBARAN) {
> +	if (dev->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) {
>  		err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev,
>  				(struct kgd_mem *) mem, true);
>  		if (err) {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> index 500bc7e40309..b41e62a324f6 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
> @@ -1992,7 +1992,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
>  		sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
>  		sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
>  		sub_type_hdr->num_hops_xgmi = 1;
> -		if (kdev->adev->asic_type == CHIP_ALDEBARAN) {
> +		if (kdev->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) {
>  			sub_type_hdr->minimum_bandwidth_mbs =
>  					amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
>  							kdev->adev, NULL, true);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> index b752dc36a2cd..29f8fcd4b779 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
> @@ -844,23 +844,23 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
>  static void kfd_cwsr_init(struct kfd_dev *kfd)
>  {
>  	if (cwsr_enable && kfd->device_info->supports_cwsr) {
> -		if (kfd->device_info->asic_family < CHIP_VEGA10) {
> +		if (kfd->adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1)) {
>  			BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
>  			kfd->cwsr_isa = cwsr_trap_gfx8_hex;
>  			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
> -		} else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
> +		} else if (kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
>  			BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
>  			kfd->cwsr_isa = cwsr_trap_arcturus_hex;
>  			kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
> -		} else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) {
> +		} else if (kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) {
>  			BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
>  			kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
>  			kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
> -		} else if (kfd->device_info->asic_family < CHIP_NAVI10) {
> +		} else if (kfd->adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 1, 1)) {
>  			BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
>  			kfd->cwsr_isa = cwsr_trap_gfx9_hex;
>  			kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
> -		} else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) {
> +		} else if (kfd->adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)) {
>  			BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
>  			kfd->cwsr_isa = cwsr_trap_nv1x_hex;
>  			kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
> @@ -882,14 +882,16 @@ static int kfd_gws_init(struct kfd_dev *kfd)
>  		return 0;
>  
>  	if (hws_gws_support
> -		|| (kfd->device_info->asic_family == CHIP_VEGA10
> +		|| (kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1)
>  			&& kfd->mec2_fw_version >= 0x81b3)
> -		|| (kfd->device_info->asic_family >= CHIP_VEGA12
> -			&& kfd->device_info->asic_family <= CHIP_RAVEN
> +		|| ((kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1)
> +			|| kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)
> +			|| kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)
> +			|| kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 2))
>  			&& kfd->mec2_fw_version >= 0x1b3)
> -		|| (kfd->device_info->asic_family == CHIP_ARCTURUS
> +		|| (kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)
>  			&& kfd->mec2_fw_version >= 0x30)
> -		|| (kfd->device_info->asic_family == CHIP_ALDEBARAN
> +		|| (kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)
>  			&& kfd->mec2_fw_version >= 0x28))
>  		ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
>  				kfd->adev->gds.gws_size, &kfd->gws);
> @@ -959,7 +961,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
>  	 * There can be only 2 packets at once
>  	 */
>  	map_process_packet_size =
> -			kfd->device_info->asic_family == CHIP_ALDEBARAN ?
> +			kfd->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ?
>  				sizeof(struct pm4_mes_map_process_aldebaran) :
>  					sizeof(struct pm4_mes_map_process);
>  	size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 8a39494fa093..7cadcdd9ffd2 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -157,7 +157,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
>  {
>  	struct kfd_dev *dev = qpd->dqm->dev;
>  
> -	if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
> +	if (!KFD_IS_SOC15(dev->adev->ip_versions[GC_HWIP][0])) {
>  		/* On pre-SOC15 chips we need to use the queue ID to
>  		 * preserve the user mode ABI.
>  		 */
> @@ -202,7 +202,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd,
>  	unsigned int old;
>  	struct kfd_dev *dev = qpd->dqm->dev;
>  
> -	if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
> +	if (!KFD_IS_SOC15(dev->adev->ip_versions[GC_HWIP][0]) ||
>  	    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
>  	    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
>  		return;
> @@ -250,7 +250,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
>  
>  	program_sh_mem_settings(dqm, qpd);
>  
> -	if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 &&
> +	if (dqm->dev->adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 0, 1) &&
>  	    dqm->dev->cwsr_enabled)
>  		program_trap_handler_settings(dqm, qpd);
>  
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
> index b5c3d13643f1..0f7471796667 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
> @@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
>  				SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
>  					SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
>  
> -		if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) {
> +		if (dqm->dev->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) {
>  			/* Aldebaran can safely support different XNACK modes
>  			 * per process
>  			 */
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
> index 3eea4edee355..20745086308e 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
> @@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid,
>  	/* Workaround on Raven to not kill the process when memory is freed
>  	 * before IOMMU is able to finish processing all the excessive PPRs
>  	 */
> -	if (dev->device_info->asic_family != CHIP_RAVEN &&
> -	    dev->device_info->asic_family != CHIP_RENOIR) {
> +
> +	if (dev->adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 1, 0) &&
> +	    dev->adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 2) &&
> +	    dev->adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0)) {
>  		mutex_lock(&p->event_mutex);
>  
>  		/* Lookup events by type and signal them */
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> index aeade32ec298..c376c43a6c16 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
> @@ -940,7 +940,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
>  	void *r;
>  
>  	/* Page migration works on Vega10 or newer */
> -	if (kfddev->device_info->asic_family < CHIP_VEGA10)
> +	if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
>  		return -EINVAL;
>  
>  	pgmap = &kfddev->pgmap;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> index 2a5b4d86bf40..013678fabc0c 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> @@ -183,7 +183,7 @@ enum cache_policy {
>  	cache_policy_noncoherent
>  };
>  
> -#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
> +#define KFD_IS_SOC15(gcipv) ((gcipv) >= (IP_VERSION(9, 0, 1)))
>  
>  struct kfd_event_interrupt_class {
>  	bool (*interrupt_isr)(struct kfd_dev *dev,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> index f29b3932e3dc..f260f30b996c 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
> @@ -1317,14 +1317,14 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
>  		 * support the SVM APIs and don't need to be considered
>  		 * for the XNACK mode selection.
>  		 */
> -		if (dev->device_info->asic_family < CHIP_VEGA10)
> +		if (dev->adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1))
>  			continue;
>  		/* Aldebaran can always support XNACK because it can support
>  		 * per-process XNACK mode selection. But let the dev->noretry
>  		 * setting still influence the default XNACK mode.
>  		 */
>  		if (supported &&
> -		    dev->device_info->asic_family == CHIP_ALDEBARAN)
> +		    dev->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
>  			continue;
>  
>  		/* GFXv10 and later GPUs do not support shader preemption
> @@ -1332,7 +1332,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
>  		 * management and memory-manager-related preemptions or
>  		 * even deadlocks.
>  		 */
> -		if (dev->device_info->asic_family >= CHIP_NAVI10)
> +		if (dev->adev->ip_versions[GC_HWIP][0] > IP_VERSION(10, 1, 1))
>  			return false;
>  
>  		if (dev->noretry)
> @@ -1431,7 +1431,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
>  	int range_start = dev->shared_resources.non_cp_doorbells_start;
>  	int range_end = dev->shared_resources.non_cp_doorbells_end;
>  
> -	if (!KFD_IS_SOC15(dev->device_info->asic_family))
> +	if (!KFD_IS_SOC15(dev->adev->ip_versions[GC_HWIP][0]))
>  		return 0;
>  
>  	qpd->doorbell_bitmap =
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 065fa2a74c78..3be0ccb7a880 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -1051,8 +1051,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
>  	if (domain == SVM_RANGE_VRAM_DOMAIN)
>  		bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev);
>  
> -	switch (adev->asic_type) {
> -	case CHIP_ARCTURUS:
> +	switch (adev->ip_versions[GC_HWIP][0]) {
> +	case IP_VERSION(9, 4, 1):
>  		if (domain == SVM_RANGE_VRAM_DOMAIN) {
>  			if (bo_adev == adev) {
>  				mapping_flags |= coherent ?
> @@ -1068,7 +1068,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange,
>  				AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
>  		}
>  		break;
> -	case CHIP_ALDEBARAN:
> +	case IP_VERSION(9, 4, 2):
>  		if (domain == SVM_RANGE_VRAM_DOMAIN) {
>  			if (bo_adev == adev) {
>  				mapping_flags |= coherent ?
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> index 5353f43c67f3..27c4d2599990 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
> @@ -1239,7 +1239,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
>  		 */
>  		if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS ||
>  		    (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI &&
> -		    to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) {
> +		    to_dev->gpu->adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0))) {
>  			outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
>  			inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT;
>  		}
> @@ -1487,7 +1487,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
>  		((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
>  		HSA_CAP_MEM_EDCSUPPORTED : 0;
>  
> -	if (dev->gpu->adev->asic_type != CHIP_VEGA10)
> +	if (dev->gpu->adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 0, 1))
>  		dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ?
>  			HSA_CAP_RASEVENTNOTIFY : 0;
>  


More information about the amd-gfx mailing list