[PATCH v2 2/2] drm/amdkfd: Remove cu mask from struct queue_properties(v2)

Felix Kuehling felix.kuehling at amd.com
Tue Oct 26 00:10:16 UTC 2021


Am 2021-10-25 um 5:58 a.m. schrieb Lang Yu:
> Actually, cu_mask has been copied to mqd memory and
> does't have to persist in queue_properties. Remove it
> from queue_properties.
>
> And use struct mqd_update_info to store such properties,
> then pass it to update queue operation.
>
> v2:
> * Rename pqm_update_queue to pqm_update_queue_properties.
> * Rename struct queue_update_info to struct mqd_update_info.
> * Rename pqm_set_cu_mask to pqm_update_mqd.
>
> Suggested-by: Felix Kuehling <Felix.Kuehling at amd.com>
> Signed-off-by: Lang Yu <lang.yu at amd.com>

The series is

Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>


> ---
>  drivers/gpu/drm/amd/amdkfd/kfd_chardev.c      | 31 ++++++++++---------
>  drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c |  1 -
>  .../gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c  |  9 +++---
>  .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c  |  9 +++---
>  .../gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c   |  9 +++---
>  .../gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c   |  9 +++---
>  drivers/gpu/drm/amd/amdkfd/kfd_priv.h         | 23 +++++++++-----
>  .../amd/amdkfd/kfd_process_queue_manager.c    | 20 +++---------
>  8 files changed, 57 insertions(+), 54 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index 9317a2e238d0..24ebd61395d8 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
>  
>  	mutex_lock(&p->mutex);
>  
> -	retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
> +	retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
>  
>  	mutex_unlock(&p->mutex);
>  
> @@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
>  	int retval;
>  	const int max_num_cus = 1024;
>  	struct kfd_ioctl_set_cu_mask_args *args = data;
> -	struct queue_properties properties;
> +	struct mqd_update_info minfo = {0};
>  	uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
>  	size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
>  
> @@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
>  		return -EINVAL;
>  	}
>  
> -	properties.cu_mask_count = args->num_cu_mask;
> -	if (properties.cu_mask_count == 0) {
> +	minfo.cu_mask.count = args->num_cu_mask;
> +	if (minfo.cu_mask.count == 0) {
>  		pr_debug("CU mask cannot be 0");
>  		return -EINVAL;
>  	}
> @@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
>  	 * limit of max_num_cus bits.  We can then just drop any CU mask bits
>  	 * past max_num_cus bits and just use the first max_num_cus bits.
>  	 */
> -	if (properties.cu_mask_count > max_num_cus) {
> +	if (minfo.cu_mask.count > max_num_cus) {
>  		pr_debug("CU mask cannot be greater than 1024 bits");
> -		properties.cu_mask_count = max_num_cus;
> +		minfo.cu_mask.count = max_num_cus;
>  		cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
>  	}
>  
> -	properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
> -	if (!properties.cu_mask)
> +	minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
> +	if (!minfo.cu_mask.ptr)
>  		return -ENOMEM;
>  
> -	retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
> +	retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
>  	if (retval) {
>  		pr_debug("Could not copy CU mask from userspace");
> -		kfree(properties.cu_mask);
> -		return -EFAULT;
> +		retval = -EFAULT;
> +		goto out;
>  	}
>  
> +	minfo.update_flag = UPDATE_FLAG_CU_MASK;
> +
>  	mutex_lock(&p->mutex);
>  
> -	retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
> +	retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
>  
>  	mutex_unlock(&p->mutex);
>  
> -	if (retval)
> -		kfree(properties.cu_mask);
> -
> +out:
> +	kfree(minfo.cu_mask.ptr);
>  	return retval;
>  }
>  
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
> index a2b77d1df854..64b4ac339904 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
> @@ -136,7 +136,6 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
>  	prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
>  	prop.eop_ring_buffer_address = kq->eop_gpu_addr;
>  	prop.eop_ring_buffer_size = PAGE_SIZE;
> -	prop.cu_mask = NULL;
>  
>  	if (init_queue(&kq->queue, &prop) != 0)
>  		goto err_init_queue;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
> index 00bcaa11ff57..8128f4d312f1 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
> @@ -42,16 +42,17 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
>  }
>  
>  static void update_cu_mask(struct mqd_manager *mm, void *mqd,
> -			struct queue_properties *q)
> +			struct mqd_update_info *minfo)
>  {
>  	struct cik_mqd *m;
>  	uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
>  
> -	if (q->cu_mask_count == 0)
> +	if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
> +	    !minfo->cu_mask.ptr)
>  		return;
>  
>  	mqd_symmetrically_map_cu_mask(mm,
> -		q->cu_mask, q->cu_mask_count, se_mask);
> +		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
>  
>  	m = get_mqd(mqd);
>  	m->compute_static_thread_mgmt_se0 = se_mask[0];
> @@ -215,7 +216,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
>  	if (q->format == KFD_QUEUE_FORMAT_AQL)
>  		m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
>  
> -	update_cu_mask(mm, mqd, q);
> +	update_cu_mask(mm, mqd, minfo);
>  	set_priority(m, q);
>  
>  	q->is_active = QUEUE_IS_ACTIVE(*q);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> index 7f1101780135..270160fc401b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
> @@ -42,16 +42,17 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
>  }
>  
>  static void update_cu_mask(struct mqd_manager *mm, void *mqd,
> -			   struct queue_properties *q)
> +			struct mqd_update_info *minfo)
>  {
>  	struct v10_compute_mqd *m;
>  	uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
>  
> -	if (q->cu_mask_count == 0)
> +	if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
> +	    !minfo->cu_mask.ptr)
>  		return;
>  
>  	mqd_symmetrically_map_cu_mask(mm,
> -		q->cu_mask, q->cu_mask_count, se_mask);
> +		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
>  
>  	m = get_mqd(mqd);
>  	m->compute_static_thread_mgmt_se0 = se_mask[0];
> @@ -219,7 +220,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
>  	if (mm->dev->cwsr_enabled)
>  		m->cp_hqd_ctx_save_control = 0;
>  
> -	update_cu_mask(mm, mqd, q);
> +	update_cu_mask(mm, mqd, minfo);
>  	set_priority(m, q);
>  
>  	q->is_active = QUEUE_IS_ACTIVE(*q);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
> index 152f29b28a79..4e5932f54b5a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
> @@ -43,16 +43,17 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
>  }
>  
>  static void update_cu_mask(struct mqd_manager *mm, void *mqd,
> -			struct queue_properties *q)
> +			struct mqd_update_info *minfo)
>  {
>  	struct v9_mqd *m;
>  	uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
>  
> -	if (q->cu_mask_count == 0)
> +	if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
> +	    !minfo->cu_mask.ptr)
>  		return;
>  
>  	mqd_symmetrically_map_cu_mask(mm,
> -		q->cu_mask, q->cu_mask_count, se_mask);
> +		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
>  
>  	m = get_mqd(mqd);
>  	m->compute_static_thread_mgmt_se0 = se_mask[0];
> @@ -270,7 +271,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
>  	if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
>  		m->cp_hqd_ctx_save_control = 0;
>  
> -	update_cu_mask(mm, mqd, q);
> +	update_cu_mask(mm, mqd, minfo);
>  	set_priority(m, q);
>  
>  	q->is_active = QUEUE_IS_ACTIVE(*q);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
> index 4a8f3a06e6df..cd9220eb8a7a 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
> @@ -45,16 +45,17 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
>  }
>  
>  static void update_cu_mask(struct mqd_manager *mm, void *mqd,
> -			struct queue_properties *q)
> +			struct mqd_update_info *minfo)
>  {
>  	struct vi_mqd *m;
>  	uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
>  
> -	if (q->cu_mask_count == 0)
> +	if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
> +	    !minfo->cu_mask.ptr)
>  		return;
>  
>  	mqd_symmetrically_map_cu_mask(mm,
> -		q->cu_mask, q->cu_mask_count, se_mask);
> +		minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
>  
>  	m = get_mqd(mqd);
>  	m->compute_static_thread_mgmt_se0 = se_mask[0];
> @@ -230,7 +231,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
>  			atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
>  			mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
>  
> -	update_cu_mask(mm, mqd, q);
> +	update_cu_mask(mm, mqd, minfo);
>  	set_priority(m, q);
>  
>  	q->is_active = QUEUE_IS_ACTIVE(*q);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> index d758a57b17e2..4104b167e721 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> @@ -472,9 +472,6 @@ struct queue_properties {
>  	uint32_t ctl_stack_size;
>  	uint64_t tba_addr;
>  	uint64_t tma_addr;
> -	/* Relevant for CU */
> -	uint32_t cu_mask_count; /* Must be a multiple of 32 */
> -	uint32_t *cu_mask;
>  };
>  
>  #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 &&	\
> @@ -482,7 +479,19 @@ struct queue_properties {
>  			    (q).queue_percent > 0 &&	\
>  			    !(q).is_evicted)
>  
> -struct mqd_update_info;
> +enum mqd_update_flag {
> +	UPDATE_FLAG_CU_MASK = 0,
> +};
> +
> +struct mqd_update_info {
> +	union {
> +		struct {
> +			uint32_t count; /* Must be a multiple of 32 */
> +			uint32_t *ptr;
> +		} cu_mask;
> +	};
> +	enum mqd_update_flag update_flag;
> +};
>  
>  /**
>   * struct queue
> @@ -1036,10 +1045,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
>  			    unsigned int *qid,
>  			    uint32_t *p_doorbell_offset_in_process);
>  int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
> -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
> -			struct queue_properties *p);
> -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
> +int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
>  			struct queue_properties *p);
> +int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
> +			struct mqd_update_info *minfo);
>  int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
>  			void *gws);
>  struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index 37529592457d..3627e7ac161b 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -394,8 +394,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
>  			pdd->qpd.num_gws = 0;
>  		}
>  
> -		kfree(pqn->q->properties.cu_mask);
> -		pqn->q->properties.cu_mask = NULL;
>  		uninit_queue(pqn->q);
>  	}
>  
> @@ -411,8 +409,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
>  	return retval;
>  }
>  
> -int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
> -			struct queue_properties *p)
> +int pqm_update_queue_properties(struct process_queue_manager *pqm,
> +				unsigned int qid, struct queue_properties *p)
>  {
>  	int retval;
>  	struct process_queue_node *pqn;
> @@ -436,8 +434,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
>  	return 0;
>  }
>  
> -int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
> -			struct queue_properties *p)
> +int pqm_update_mqd(struct process_queue_manager *pqm,
> +				unsigned int qid, struct mqd_update_info *minfo)
>  {
>  	int retval;
>  	struct process_queue_node *pqn;
> @@ -448,16 +446,8 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
>  		return -EFAULT;
>  	}
>  
> -	/* Free the old CU mask memory if it is already allocated, then
> -	 * allocate memory for the new CU mask.
> -	 */
> -	kfree(pqn->q->properties.cu_mask);
> -
> -	pqn->q->properties.cu_mask_count = p->cu_mask_count;
> -	pqn->q->properties.cu_mask = p->cu_mask;
> -
>  	retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
> -							pqn->q, NULL);
> +							pqn->q, minfo);
>  	if (retval != 0)
>  		return retval;
>  


More information about the amd-gfx mailing list