<html><head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head>
<body>
<p><br>
</p>
<div class="moz-cite-prefix">On 2024-07-17 16:16, Felix Kuehling
wrote:<br>
</div>
<blockquote type="cite" cite="mid:478a13e5-2a72-4a43-8a68-10117c1a0f6f@amd.com">Sorry, I
see that this patch still doesn't propagate errors returned from
kfd_queue_releasre_buffers correctly. And the later patches in the
series don't seem to fix it either. See inline.
<br>
</blockquote>
kfd_queue_release_buffers return value is handled in queue destroy
path, to return -ERESTARTSYS if fail to hold vm lock to release
buffers because signal is received. See inline.<br>
<blockquote type="cite" cite="mid:478a13e5-2a72-4a43-8a68-10117c1a0f6f@amd.com">
<br>
On 2024-07-15 08:34, Philip Yang wrote:
<br>
<blockquote type="cite">Add helper function
kfd_queue_acquire_buffers to get queue wptr_bo
<br>
reference from queue write_ptr if it is mapped to the KFD node
with
<br>
expected size.
<br>
<br>
Move wptr_bo to structure queue_properties from struct queue as
queue is
<br>
allocated after queue buffers are validated, then we can remove
wptr_bo
<br>
parameter from pqm_create_queue.
<br>
<br>
Because amdgpu_bo_unref clear the pointer, queue_properties
wptr_bo is
<br>
used to acquire and release wptr_bo for validation, add
wptr_bo_gart to
<br>
queue_propertes, to hold wptr_bo reference for GART mapping and
<br>
umapping.
<br>
<br>
Move MES wptr_bo GART mapping to init_user_queue, the same
location with
<br>
queue ctx_bo GART mapping.
<br>
<br>
Signed-off-by: Philip Yang <a class="moz-txt-link-rfc2396E" href="mailto:Philip.Yang@amd.com"><Philip.Yang@amd.com></a>
<br>
---
<br>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +-
<br>
.../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 +-
<br>
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 56
+++---------------
<br>
.../drm/amd/amdkfd/kfd_device_queue_manager.c | 6 +-
<br>
drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 14 +++--
<br>
.../amd/amdkfd/kfd_process_queue_manager.c | 45
+++++++++++----
<br>
drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 57
+++++++++++++++++++
<br>
7 files changed, 116 insertions(+), 69 deletions(-)
<br>
<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
<br>
index 6e591280774b..4ed49265c764 100644
<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
<br>
@@ -322,7 +322,7 @@ int
amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
<br>
void **kptr, uint64_t *size);
<br>
void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct
kgd_mem *mem);
<br>
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
<br>
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo,
struct amdgpu_bo **bo_gart);
<br>
int amdgpu_amdkfd_gpuvm_restore_process_bos(void
*process_info,
<br>
struct dma_fence __rcu **ef);
<br>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
<br>
index 199e387d35f4..0ab37e7aec26 100644
<br>
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
<br>
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
<br>
@@ -2226,11 +2226,12 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
<br>
/**
<br>
* amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and
increment reference count
<br>
* @bo: Buffer object to be mapped
<br>
+ * @bo_gart: Return bo reference
<br>
*
<br>
* Before return, bo reference count is incremented. To
release the reference and unpin/
<br>
* unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
<br>
*/
<br>
-int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
<br>
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo,
struct amdgpu_bo **bo_gart)
<br>
{
<br>
int ret;
<br>
@@ -2257,7 +2258,7 @@ int
amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo)
<br>
amdgpu_bo_unreserve(bo);
<br>
- bo = amdgpu_bo_ref(bo);
<br>
+ *bo_gart = amdgpu_bo_ref(bo);
<br>
return 0;
<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
<br>
index 823f245dc7d0..202f24ee4bd7 100644
<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
<br>
@@ -247,8 +247,8 @@ static int
set_queue_properties_from_user(struct queue_properties
*q_properties,
<br>
q_properties->priority = args->queue_priority;
<br>
q_properties->queue_address =
args->ring_base_address;
<br>
q_properties->queue_size = args->ring_size;
<br>
- q_properties->read_ptr = (uint32_t *)
args->read_pointer_address;
<br>
- q_properties->write_ptr = (uint32_t *)
args->write_pointer_address;
<br>
+ q_properties->read_ptr = (void __user
*)args->read_pointer_address;
<br>
+ q_properties->write_ptr = (void __user
*)args->write_pointer_address;
<br>
q_properties->eop_ring_buffer_address =
args->eop_buffer_address;
<br>
q_properties->eop_ring_buffer_size =
args->eop_buffer_size;
<br>
q_properties->ctx_save_restore_area_address =
<br>
@@ -306,7 +306,6 @@ static int kfd_ioctl_create_queue(struct
file *filep, struct kfd_process *p,
<br>
struct kfd_process_device *pdd;
<br>
struct queue_properties q_properties;
<br>
uint32_t doorbell_offset_in_process = 0;
<br>
- struct amdgpu_bo *wptr_bo = NULL;
<br>
memset(&q_properties, 0, sizeof(struct
queue_properties));
<br>
@@ -342,53 +341,17 @@ static int kfd_ioctl_create_queue(struct
file *filep, struct kfd_process *p,
<br>
}
<br>
}
<br>
- /* Starting with GFX11, wptr BOs must be mapped to GART
for MES to determine work
<br>
- * on unmapped queues for usermode queue oversubscription
(no aggregated doorbell)
<br>
- */
<br>
- if (dev->kfd->shared_resources.enable_mes &&
<br>
- ((dev->adev->mes.sched_version &
AMDGPU_MES_API_VERSION_MASK)
<br>
- >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
<br>
- struct amdgpu_bo_va_mapping *wptr_mapping;
<br>
- struct amdgpu_vm *wptr_vm;
<br>
-
<br>
- wptr_vm = drm_priv_to_vm(pdd->drm_priv);
<br>
- err = amdgpu_bo_reserve(wptr_vm->root.bo, false);
<br>
- if (err)
<br>
- goto err_wptr_map_gart;
<br>
-
<br>
- wptr_mapping = amdgpu_vm_bo_lookup_mapping(
<br>
- wptr_vm, args->write_pointer_address
>> PAGE_SHIFT);
<br>
- amdgpu_bo_unreserve(wptr_vm->root.bo);
<br>
- if (!wptr_mapping) {
<br>
- pr_err("Failed to lookup wptr bo\n");
<br>
- err = -EINVAL;
<br>
- goto err_wptr_map_gart;
<br>
- }
<br>
-
<br>
- wptr_bo = wptr_mapping->bo_va->base.bo;
<br>
- if (wptr_bo->tbo.base.size > PAGE_SIZE) {
<br>
- pr_err("Requested GART mapping for wptr bo larger
than one page\n");
<br>
- err = -EINVAL;
<br>
- goto err_wptr_map_gart;
<br>
- }
<br>
- if (dev->adev !=
amdgpu_ttm_adev(wptr_bo->tbo.bdev)) {
<br>
- pr_err("Queue memory allocated to wrong device\n");
<br>
- err = -EINVAL;
<br>
- goto err_wptr_map_gart;
<br>
- }
<br>
-
<br>
- err = amdgpu_amdkfd_map_gtt_bo_to_gart(wptr_bo);
<br>
- if (err) {
<br>
- pr_err("Failed to map wptr bo to GART\n");
<br>
- goto err_wptr_map_gart;
<br>
- }
<br>
+ err = kfd_queue_acquire_buffers(pdd, &q_properties);
<br>
+ if (err) {
<br>
+ pr_debug("failed to acquire user queue buffers\n");
<br>
+ goto err_acquire_queue_buf;
<br>
}
<br>
pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
<br>
p->pasid,
<br>
dev->id);
<br>
- err = pqm_create_queue(&p->pqm, dev, filep,
&q_properties, &queue_id, wptr_bo,
<br>
+ err = pqm_create_queue(&p->pqm, dev, filep,
&q_properties, &queue_id,
<br>
NULL, NULL, NULL,
&doorbell_offset_in_process);
<br>
if (err != 0)
<br>
goto err_create_queue;
<br>
@@ -422,9 +385,8 @@ static int kfd_ioctl_create_queue(struct
file *filep, struct kfd_process *p,
<br>
return 0;
<br>
err_create_queue:
<br>
- if (wptr_bo)
<br>
- amdgpu_amdkfd_free_gtt_mem(dev->adev, (void
**)&wptr_bo);
<br>
-err_wptr_map_gart:
<br>
+ kfd_queue_release_buffers(pdd, &q_properties);
<br>
</blockquote>
<br>
You're ignoring the return value here. In this patch, the function
always returns 0, but in later patches it can return -ERESTARTSYS
and you never fix up the error handling here. This patch should
lay the groundwork for proper error handling.
<br>
</blockquote>
This is error handling path after acquiring queue buffers, but fail
to alloc queue, or fail GART mapping queue wptr or F/W return
failure to create queue, <br>
<blockquote type="cite" cite="mid:478a13e5-2a72-4a43-8a68-10117c1a0f6f@amd.com">
<br>
<br>
<blockquote type="cite">+err_acquire_queue_buf:
<br>
err_bind_process:
<br>
err_pdd:
<br>
mutex_unlock(&p->mutex);
<br>
diff --git
a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
<br>
index 420444eb8e98..fdc76c24b2e7 100644
<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
<br>
@@ -208,10 +208,8 @@ static int add_queue_mes(struct
device_queue_manager *dqm, struct queue *q,
<br>
queue_input.mqd_addr = q->gart_mqd_addr;
<br>
queue_input.wptr_addr =
(uint64_t)q->properties.write_ptr;
<br>
- if (q->wptr_bo) {
<br>
- wptr_addr_off = (uint64_t)q->properties.write_ptr
& (PAGE_SIZE - 1);
<br>
- queue_input.wptr_mc_addr =
amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
<br>
- }
<br>
+ wptr_addr_off = (uint64_t)q->properties.write_ptr &
(PAGE_SIZE - 1);
<br>
+ queue_input.wptr_mc_addr =
amdgpu_bo_gpu_offset(q->properties.wptr_bo) + wptr_addr_off;
<br>
queue_input.is_kfd_process = 1;
<br>
queue_input.is_aql_queue = (q->properties.format ==
KFD_QUEUE_FORMAT_AQL);
<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
<br>
index 2b3ec92981e8..c98ff548313c 100644
<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
<br>
@@ -494,8 +494,8 @@ struct queue_properties {
<br>
uint64_t queue_size;
<br>
uint32_t priority;
<br>
uint32_t queue_percent;
<br>
- uint32_t *read_ptr;
<br>
- uint32_t *write_ptr;
<br>
+ void __user *read_ptr;
<br>
+ void __user *write_ptr;
<br>
void __iomem *doorbell_ptr;
<br>
uint32_t doorbell_off;
<br>
bool is_interop;
<br>
@@ -522,6 +522,9 @@ struct queue_properties {
<br>
uint64_t tba_addr;
<br>
uint64_t tma_addr;
<br>
uint64_t exception_status;
<br>
+
<br>
+ struct amdgpu_bo *wptr_bo_gart;
<br>
+ struct amdgpu_bo *wptr_bo;
<br>
};
<br>
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0
&& \
<br>
@@ -603,8 +606,6 @@ struct queue {
<br>
void *gang_ctx_bo;
<br>
uint64_t gang_ctx_gpu_addr;
<br>
void *gang_ctx_cpu_ptr;
<br>
-
<br>
- struct amdgpu_bo *wptr_bo;
<br>
};
<br>
enum KFD_MQD_TYPE {
<br>
@@ -1284,6 +1285,10 @@ int init_queue(struct queue **q, const
struct queue_properties *properties);
<br>
void uninit_queue(struct queue *q);
<br>
void print_queue_properties(struct queue_properties *q);
<br>
void print_queue(struct queue *q);
<br>
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user
*addr, struct amdgpu_bo **pbo,
<br>
+ u64 expected_size);
<br>
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd,
struct queue_properties *properties);
<br>
+int kfd_queue_release_buffers(struct kfd_process_device *pdd,
struct queue_properties *properties);
<br>
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE
type,
<br>
struct kfd_node *dev);
<br>
@@ -1320,7 +1325,6 @@ int pqm_create_queue(struct
process_queue_manager *pqm,
<br>
struct file *f,
<br>
struct queue_properties *properties,
<br>
unsigned int *qid,
<br>
- struct amdgpu_bo *wptr_bo,
<br>
const struct kfd_criu_queue_priv_data *q_data,
<br>
const void *restore_mqd,
<br>
const void *restore_ctl_stack,
<br>
diff --git
a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
<br>
index 36f0460cbffe..8552400d6d47 100644
<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
<br>
@@ -205,18 +205,21 @@ static void
pqm_clean_queue_resource(struct process_queue_manager *pqm,
<br>
if (dev->kfd->shared_resources.enable_mes) {
<br>
amdgpu_amdkfd_free_gtt_mem(dev->adev,
&pqn->q->gang_ctx_bo);
<br>
- if (pqn->q->wptr_bo)
<br>
- amdgpu_amdkfd_free_gtt_mem(dev->adev, (void
**)&pqn->q->wptr_bo);
<br>
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, (void
**)&pqn->q->properties.wptr_bo_gart);
<br>
}
<br>
}
<br>
void pqm_uninit(struct process_queue_manager *pqm)
<br>
{
<br>
struct process_queue_node *pqn, *next;
<br>
+ struct kfd_process_device *pdd;
<br>
list_for_each_entry_safe(pqn, next, &pqm->queues,
process_queue_list) {
<br>
- if (pqn->q)
<br>
+ if (pqn->q) {
<br>
+ pdd =
kfd_get_process_device_data(pqn->q->device,
pqm->process);
<br>
+ kfd_queue_release_buffers(pdd,
&pqn->q->properties);
<br>
</blockquote>
You're ignoring the return value here. In this patch, the function
always returns 0, but in later patches it can return -ERESTARTSYS
and you never fix up the error handling here. This patch should
lay the groundwork for proper error handling.
<br>
</blockquote>
This is called from kfd_process_wq_release kernel worker, to cleanup
outstanding user queues after process exit, it is impossible to be
interrupted by user signal, I think it is safe to ignore the return
value here.<br>
<blockquote type="cite" cite="mid:478a13e5-2a72-4a43-8a68-10117c1a0f6f@amd.com">
<br>
Regards,
<br>
Felix
<br>
<br>
<br>
<blockquote type="cite">
pqm_clean_queue_resource(pqm, pqn);
<br>
+ }
<br>
kfd_procfs_del_queue(pqn->q);
<br>
uninit_queue(pqn->q);
<br>
@@ -231,8 +234,7 @@ void pqm_uninit(struct process_queue_manager
*pqm)
<br>
static int init_user_queue(struct process_queue_manager *pqm,
<br>
struct kfd_node *dev, struct queue **q,
<br>
struct queue_properties *q_properties,
<br>
- struct file *f, struct amdgpu_bo *wptr_bo,
<br>
- unsigned int qid)
<br>
+ struct file *f, unsigned int qid)
<br>
{
<br>
int retval;
<br>
@@ -263,12 +265,32 @@ static int init_user_queue(struct
process_queue_manager *pqm,
<br>
goto cleanup;
<br>
}
<br>
memset((*q)->gang_ctx_cpu_ptr, 0,
AMDGPU_MES_GANG_CTX_SIZE);
<br>
- (*q)->wptr_bo = wptr_bo;
<br>
+
<br>
+ /* Starting with GFX11, wptr BOs must be mapped to GART
for MES to determine work
<br>
+ * on unmapped queues for usermode queue
oversubscription (no aggregated doorbell)
<br>
+ */
<br>
+ if (((dev->adev->mes.sched_version &
AMDGPU_MES_API_VERSION_MASK)
<br>
+ >> AMDGPU_MES_API_VERSION_SHIFT) >= 2) {
<br>
+ if (dev->adev !=
amdgpu_ttm_adev(q_properties->wptr_bo->tbo.bdev)) {
<br>
+ pr_err("Queue memory allocated to wrong
device\n");
<br>
+ retval = -EINVAL;
<br>
+ goto free_gang_ctx_bo;
<br>
+ }
<br>
+
<br>
+ retval =
amdgpu_amdkfd_map_gtt_bo_to_gart(q_properties->wptr_bo,
<br>
+
&(*q)->properties.wptr_bo_gart);
<br>
+ if (retval) {
<br>
+ pr_err("Failed to map wptr bo to GART\n");
<br>
+ goto free_gang_ctx_bo;
<br>
+ }
<br>
+ }
<br>
}
<br>
pr_debug("PQM After init queue");
<br>
return 0;
<br>
+free_gang_ctx_bo:
<br>
+ amdgpu_amdkfd_free_gtt_mem(dev->adev,
(*q)->gang_ctx_bo);
<br>
cleanup:
<br>
uninit_queue(*q);
<br>
*q = NULL;
<br>
@@ -280,7 +302,6 @@ int pqm_create_queue(struct
process_queue_manager *pqm,
<br>
struct file *f,
<br>
struct queue_properties *properties,
<br>
unsigned int *qid,
<br>
- struct amdgpu_bo *wptr_bo,
<br>
const struct kfd_criu_queue_priv_data *q_data,
<br>
const void *restore_mqd,
<br>
const void *restore_ctl_stack,
<br>
@@ -351,7 +372,7 @@ int pqm_create_queue(struct
process_queue_manager *pqm,
<br>
* allocate_sdma_queue() in create_queue() has the
<br>
* corresponding check logic.
<br>
*/
<br>
- retval = init_user_queue(pqm, dev, &q, properties,
f, wptr_bo, *qid);
<br>
+ retval = init_user_queue(pqm, dev, &q, properties,
f, *qid);
<br>
if (retval != 0)
<br>
goto err_create_queue;
<br>
pqn->q = q;
<br>
@@ -372,7 +393,7 @@ int pqm_create_queue(struct
process_queue_manager *pqm,
<br>
goto err_create_queue;
<br>
}
<br>
- retval = init_user_queue(pqm, dev, &q,
properties, f, wptr_bo, *qid);
<br>
+ retval = init_user_queue(pqm, dev, &q, properties,
f, *qid);
<br>
if (retval != 0)
<br>
goto err_create_queue;
<br>
pqn->q = q;
<br>
@@ -490,6 +511,10 @@ int pqm_destroy_queue(struct
process_queue_manager *pqm, unsigned int qid)
<br>
}
<br>
if (pqn->q) {
<br>
+ retval = kfd_queue_release_buffers(pdd,
&pqn->q->properties);
<br>
+ if (retval)
<br>
+ goto err_destroy_queue;
<br>
</blockquote>
</blockquote>
<p>if destroy queue wait for vm lock is interrupted return by a
signal, here return -ERESTARTSYS, then user process could retry or
exit.<br>
</p>
<p>Regards,</p>
<p>Philip<br>
</p>
<blockquote type="cite" cite="mid:478a13e5-2a72-4a43-8a68-10117c1a0f6f@amd.com">
<blockquote type="cite">+
<br>
kfd_procfs_del_queue(pqn->q);
<br>
dqm = pqn->q->device->dqm;
<br>
retval = dqm->ops.destroy_queue(dqm,
&pdd->qpd, pqn->q);
<br>
@@ -971,7 +996,7 @@ int kfd_criu_restore_queue(struct
kfd_process *p,
<br>
print_queue_properties(&qp);
<br>
- ret = pqm_create_queue(&p->pqm, pdd->dev, NULL,
&qp, &queue_id, NULL, q_data, mqd, ctl_stack,
<br>
+ ret = pqm_create_queue(&p->pqm, pdd->dev, NULL,
&qp, &queue_id, q_data, mqd, ctl_stack,
<br>
NULL);
<br>
if (ret) {
<br>
pr_err("Failed to create new queue err:%d\n", ret);
<br>
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
<br>
index 0f6992b1895c..b4529ec298a9 100644
<br>
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
<br>
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
<br>
@@ -82,3 +82,60 @@ void uninit_queue(struct queue *q)
<br>
{
<br>
kfree(q);
<br>
}
<br>
+
<br>
+int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user
*addr, struct amdgpu_bo **pbo,
<br>
+ u64 expected_size)
<br>
+{
<br>
+ struct amdgpu_bo_va_mapping *mapping;
<br>
+ u64 user_addr;
<br>
+ u64 size;
<br>
+
<br>
+ user_addr = (u64)addr >> AMDGPU_GPU_PAGE_SHIFT;
<br>
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
<br>
+
<br>
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
<br>
+ if (!mapping)
<br>
+ goto out_err;
<br>
+
<br>
+ if (user_addr != mapping->start || user_addr + size - 1
!= mapping->last) {
<br>
+ pr_debug("expected size 0x%llx not equal to mapping
addr 0x%llx size 0x%llx\n",
<br>
+ expected_size, mapping->start <<
AMDGPU_GPU_PAGE_SHIFT,
<br>
+ (mapping->last - mapping->start + 1) <<
AMDGPU_GPU_PAGE_SHIFT);
<br>
+ goto out_err;
<br>
+ }
<br>
+
<br>
+ *pbo = amdgpu_bo_ref(mapping->bo_va->base.bo);
<br>
+ return 0;
<br>
+
<br>
+out_err:
<br>
+ *pbo = NULL;
<br>
+ return -EINVAL;
<br>
+}
<br>
+
<br>
+int kfd_queue_acquire_buffers(struct kfd_process_device *pdd,
struct queue_properties *properties)
<br>
+{
<br>
+ struct amdgpu_vm *vm;
<br>
+ int err;
<br>
+
<br>
+ vm = drm_priv_to_vm(pdd->drm_priv);
<br>
+ err = amdgpu_bo_reserve(vm->root.bo, false);
<br>
+ if (err)
<br>
+ return err;
<br>
+
<br>
+ err = kfd_queue_buffer_get(vm, properties->write_ptr,
&properties->wptr_bo, PAGE_SIZE);
<br>
+ if (err)
<br>
+ goto out_unreserve;
<br>
+
<br>
+ amdgpu_bo_unreserve(vm->root.bo);
<br>
+ return 0;
<br>
+
<br>
+out_unreserve:
<br>
+ amdgpu_bo_unreserve(vm->root.bo);
<br>
+ return err;
<br>
+}
<br>
+
<br>
+int kfd_queue_release_buffers(struct kfd_process_device *pdd,
struct queue_properties *properties)
<br>
+{
<br>
+ amdgpu_bo_unref(&properties->wptr_bo);
<br>
+ return 0;
<br>
+}
<br>
</blockquote>
</blockquote>
</body>
</html>