[PATCH 30/32] drm/amdkfd: add debug queue snapshot operation
Felix Kuehling
felix.kuehling at amd.com
Wed Mar 22 21:52:07 UTC 2023
Am 2023-01-25 um 14:53 schrieb Jonathan Kim:
> Allow the debugger to get a snapshot of a specified number of queues
> containing various queue property information that is copied to the
> debugger.
>
> Since the debugger doesn't know how many queues exist at any given time,
> allow the debugger to pass the requested number of snapshots as 0 to get
> the actual number of potential snapshots to use for a subsequent snapshot
> request for actual information.
>
> To prevent future ABI breakage, pass in the requested entry_size.
> The KFD will return it's own entry_size in case the debugger still wants
> log the information in a core dump on sizing failure.
>
> Also allow the debugger to clear exceptions when doing a snapshot.
>
> v3: fix uninitialized return and change queue snapshot to type void for
> proper increment on buffer copy.
> use memset 0 to init snapshot entry to clear struct padding.
>
> v2: change buf_size arg to num_queues for clarity.
> fix minimum entry size calculation.
>
> Signed-off-by: Jonathan Kim <jonathan.kim at amd.com>
> ---
> drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 +++
> .../drm/amd/amdkfd/kfd_device_queue_manager.c | 36 ++++++++++++++++
> .../drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++
> drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++
> .../amd/amdkfd/kfd_process_queue_manager.c | 41 +++++++++++++++++++
> 5 files changed, 91 insertions(+)
>
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> index d3d2026b6e65..93b288233577 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
> @@ -2965,6 +2965,12 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v
> &args->query_exception_info.info_size);
> break;
> case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT:
> + r = pqm_get_queue_snapshot(&target->pqm,
> + args->queue_snapshot.exception_mask,
> + (void __user *)args->queue_snapshot.snapshot_buf_ptr,
> + &args->queue_snapshot.num_queues,
> + &args->queue_snapshot.entry_size);
> + break;
> case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT:
> pr_warn("Debug op %i not supported yet\n", args->op);
> r = -EACCES;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> index 7792fe9491c5..5ae504a512f0 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
> @@ -3000,6 +3000,42 @@ int suspend_queues(struct kfd_process *p,
> return total_suspended;
> }
>
> +static uint32_t set_queue_type_for_user(struct queue_properties *q_props)
> +{
> + switch (q_props->type) {
> + case KFD_QUEUE_TYPE_COMPUTE:
> + return q_props->format == KFD_QUEUE_FORMAT_PM4
> + ? KFD_IOC_QUEUE_TYPE_COMPUTE
> + : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL;
> + case KFD_QUEUE_TYPE_SDMA:
> + return KFD_IOC_QUEUE_TYPE_SDMA;
> + case KFD_QUEUE_TYPE_SDMA_XGMI:
> + return KFD_IOC_QUEUE_TYPE_SDMA_XGMI;
> + default:
> + WARN_ONCE(true, "queue type not recognized!");
> + return 0xffffffff;
> + };
> +}
> +
> +void set_queue_snapshot_entry(struct queue *q,
> + uint64_t exception_clear_mask,
> + struct kfd_queue_snapshot_entry *qss_entry)
> +{
> + qss_entry->ring_base_address = q->properties.queue_address;
> + qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr;
> + qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr;
> + qss_entry->ctx_save_restore_address =
> + q->properties.ctx_save_restore_area_address;
> + qss_entry->ctx_save_restore_area_size =
> + q->properties.ctx_save_restore_area_size;
> + qss_entry->exception_status = q->properties.exception_status;
> + qss_entry->queue_id = q->properties.queue_id;
> + qss_entry->gpu_id = q->device->id;
> + qss_entry->ring_size = (uint32_t)q->properties.queue_size;
> + qss_entry->queue_type = set_queue_type_for_user(&q->properties);
> + q->properties.exception_status &= ~exception_clear_mask;
> +}
> +
> int debug_lock_and_unmap(struct device_queue_manager *dqm)
> {
> int r;
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> index 7ccf8d0d1867..89d4a5b293a5 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
> @@ -296,6 +296,9 @@ int suspend_queues(struct kfd_process *p,
> int resume_queues(struct kfd_process *p,
> uint32_t num_queues,
> uint32_t *usr_queue_id_array);
> +void set_queue_snapshot_entry(struct queue *q,
> + uint64_t exception_clear_mask,
> + struct kfd_queue_snapshot_entry *qss_entry);
> int debug_lock_and_unmap(struct device_queue_manager *dqm);
> int debug_map_and_unlock(struct device_queue_manager *dqm);
> int debug_refresh_runlist(struct device_queue_manager *dqm);
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> index cfc50d1690c7..cc7816db60eb 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
> @@ -1302,6 +1302,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
> void __user *ctl_stack,
> u32 *ctl_stack_used_size,
> u32 *save_area_used_size);
> +int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
> + uint64_t exception_clear_mask,
> + void __user *buf,
> + int *num_qss_entries,
> + uint32_t *entry_size);
>
> int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
> uint64_t fence_value,
> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> index 0ae6026c7d69..221cd4b03f1c 100644
> --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
> @@ -576,6 +576,47 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
> save_area_used_size);
> }
>
> +int pqm_get_queue_snapshot(struct process_queue_manager *pqm,
> + uint64_t exception_clear_mask,
> + void __user *buf,
> + int *num_qss_entries,
> + uint32_t *entry_size)
> +{
> + struct process_queue_node *pqn;
> + uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries;
> + int r = 0;
> +
> + *num_qss_entries = 0;
> + if (!(*entry_size))
> + return -EINVAL;
> +
> + *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry));
> + mutex_lock(&pqm->process->event_mutex);
> +
> + list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
> + if (!pqn->q)
> + continue;
> +
> + if (*num_qss_entries < tmp_qss_entries) {
> + struct kfd_queue_snapshot_entry src;
> +
> + memset(&src, 0, sizeof(src));
I'd move the variable declaration up to the function scope. That way you
only need to memset it once outside the loop. With that fixed, the patch is
Reviewed-by: Felix Kuehling <Felix.Kuehling at amd.com>
> +
> + set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src);
> +
> + if (copy_to_user(buf, &src, *entry_size)) {
> + r = -EFAULT;
> + break;
> + }
> + buf += tmp_entry_size;
> + }
> + *num_qss_entries += 1;
> + }
> +
> + mutex_unlock(&pqm->process->event_mutex);
> + return r;
> +}
> +
> static int get_queue_data_sizes(struct kfd_process_device *pdd,
> struct queue *q,
> uint32_t *mqd_size,
More information about the dri-devel
mailing list