[PATCH v3] drm/amdkfd: Change svm_range_get_info return type

Felix Kuehling felix.kuehling at amd.com
Fri May 16 18:35:41 UTC 2025


On 2025-04-02 10:12, Ваторопин Андрей wrote:
> From: Andrey Vatoropin <a.vatoropin at crpt.ru>
>
> Static analysis shows that pointer "svms" cannot be NULL because it points
> to the object "struct svm_range_list". Remove the extra NULL check. It is
> meaningless and harms the readability of the code.
>
> In the function svm_range_get_info() there is no possibility of failure.
> Therefore, the caller of the function svm_range_get_info() does not need
> a return value. Change the function svm_range_get_info() return type from
> "int" to "void". 
>
> Since the function svm_range_get_info() has a return type of "void". The
> caller of the function svm_range_get_info() does not need a return value.
> Delete extra code.
>
> Found by Linux Verification Center (linuxtesting.org) with SVACE.
>
> Signed-off-by: Andrey Vatoropin <a.vatoropin at crpt.ru>

Sorry about the long delay. I just applied this patch to amd-staging-drm-next.

Regards,
  Felix


> ---
> v1 -> v2: also change return type of svm_range_get_info() per Felix Kuehling suggestion
> v2 -> v3: deleted extra code in the function kfd_criu_checkpoint_svm()
>
>  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c |  4 +---
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.c     |  9 ++-------
>  drivers/gpu/drm/amd/amdkfd/kfd_svm.h     | 11 +++++------
>  3 files changed, 8 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index 1e9dd00620bf..a2149afa5803 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -2039,9 +2039,7 @@ static int criu_get_process_object_info(struct kfd_process *p,
>  
>  	num_events = kfd_get_num_events(p);
>  
> -	ret = svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
> -	if (ret)
> -		return ret;
> +	svm_range_get_info(p, &num_svm_ranges, &svm_priv_data_size);
>  
>  	*num_objects = num_queues + num_events + num_svm_ranges;
>  
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> index 100717a98ec1..ebdbbb620b11 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
> @@ -4076,8 +4076,8 @@ int kfd_criu_restore_svm(struct kfd_process *p,
>  	return ret;
>  }
>  
> -int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
> -		       uint64_t *svm_priv_data_size)
> +void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
> +			uint64_t *svm_priv_data_size)
>  {
>  	uint64_t total_size, accessibility_size, common_attr_size;
>  	int nattr_common = 4, nattr_accessibility = 1;
> @@ -4089,8 +4089,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
>  	*svm_priv_data_size = 0;
>  
>  	svms = &p->svms;
> -	if (!svms)
> -		return -EINVAL;
>  
>  	mutex_lock(&svms->lock);
>  	list_for_each_entry(prange, &svms->list, list) {
> @@ -4132,7 +4130,6 @@ int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
>  
>  	pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges,
>  		 *svm_priv_data_size);
> -	return 0;
>  }
>  
>  int kfd_criu_checkpoint_svm(struct kfd_process *p,
> @@ -4149,8 +4146,6 @@ int kfd_criu_checkpoint_svm(struct kfd_process *p,
>  	struct mm_struct *mm;
>  
>  	svms = &p->svms;
> -	if (!svms)
> -		return -EINVAL;
>  
>  	mm = get_task_mm(p->lead_thread);
>  	if (!mm) {
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> index 6ea23c78009c..01c7a4877904 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
> @@ -184,8 +184,8 @@ void schedule_deferred_list_work(struct svm_range_list *svms);
>  void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr,
>  			 unsigned long offset, unsigned long npages);
>  void svm_range_dma_unmap(struct svm_range *prange);
> -int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
> -		       uint64_t *svm_priv_data_size);
> +void svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges,
> +			uint64_t *svm_priv_data_size);
>  int kfd_criu_checkpoint_svm(struct kfd_process *p,
>  			    uint8_t __user *user_priv_data,
>  			    uint64_t *priv_offset);
> @@ -237,13 +237,12 @@ static inline int svm_range_schedule_evict_svm_bo(
>  	return -EINVAL;
>  }
>  
> -static inline int svm_range_get_info(struct kfd_process *p,
> -				     uint32_t *num_svm_ranges,
> -				     uint64_t *svm_priv_data_size)
> +static inline void svm_range_get_info(struct kfd_process *p,
> +				      uint32_t *num_svm_ranges,
> +				      uint64_t *svm_priv_data_size)
>  {
>  	*num_svm_ranges = 0;
>  	*svm_priv_data_size = 0;
> -	return 0;
>  }
>  
>  static inline int kfd_criu_checkpoint_svm(struct kfd_process *p,


More information about the amd-gfx mailing list