[PATCH 15/15] drm/amdgpu: user fence proof of concept
Christian König
ckoenig.leichtzumerken at gmail.com
Mon May 2 16:37:22 UTC 2022
Just some hack to test the functionality, not a real implementation of
the interface.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/dma-buf/dma-resv.c | 3 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 28 ++++++++++++++++---
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +-
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +-
.../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++++--
5 files changed, 34 insertions(+), 10 deletions(-)
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index da667c21ad55..e18efb21c452 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -286,7 +286,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
/* Drivers should not add containers here, instead add each fence
* individually.
*/
- WARN_ON(dma_fence_is_container(fence));
+ //WARN_ON(dma_fence_is_container(fence));
+
/* User fences must be added using DMA_RESV_USAGE_USER */
WARN_ON(test_bit(DMA_FENCE_FLAG_USER, &fence->flags) !=
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 95eeab527ca9..299ab8e50c42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -453,6 +453,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct drm_gem_object *obj;
+ long timeout = HZ / 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
@@ -476,6 +477,17 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
}
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_resv *resv = e->bo->tbo.base.resv;
+
+ timeout = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_USER,
+ true, timeout);
+ if (unlikely(timeout < 0))
+ return timeout;
+ if (unlikely(timeout == 0))
+ return -ETIME;
+ }
+
/* Get userptr backing pages. If pages are updated after registered
* in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
* amdgpu_ttm_backend_bind() to flush and invalidate new pages
@@ -516,7 +528,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return r;
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
- r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, 2);
+ r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, 3);
drm_exec_break_on_contention(&p->exec);
if (unlikely(r))
return r;
@@ -527,7 +539,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_bo) {
r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
- 2);
+ 3);
drm_exec_continue_on_contention(&p->exec);
if (unlikely(r))
return r;
@@ -1160,6 +1172,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct drm_sched_entity *entity = p->entity;
struct amdgpu_bo_list_entry *e;
struct drm_gem_object *gobj;
+ struct dma_fence *dummy;
struct amdgpu_job *job;
unsigned long index;
uint64_t seq;
@@ -1191,6 +1204,11 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
}
p->fence = dma_fence_get(&job->base.s_fence->finished);
+ dummy = dma_fence_merge(p->fence, dma_fence_get_stub(true));
+ if (!dummy) {
+ r = -ENOMEM;
+ goto error_abort;
+ }
amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
amdgpu_cs_post_dependencies(p);
@@ -1214,11 +1232,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
drm_exec_for_each_duplicate_object(&p->exec, index, gobj) {
ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
- dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(gobj->resv, dummy, DMA_RESV_USAGE_USER);
}
drm_exec_for_each_locked_object(&p->exec, index, gobj) {
ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
- dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_KERNEL);
+ dma_resv_add_fence(gobj->resv, dummy, DMA_RESV_USAGE_USER);
}
mutex_unlock(&p->adev->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b03663f42cc9..bd334f5fd64f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2655,7 +2655,7 @@ static const struct drm_driver amdgpu_kms_driver = {
DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
- DRIVER_SYNCOBJ_TIMELINE,
+ DRIVER_SYNCOBJ_TIMELINE | DRIVER_USER_FENCE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index e5c8e72a9485..6705287887e9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -628,7 +628,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
*/
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec)
{
- return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, 4);
+ return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, 5);
}
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c5b2417adcc6..2e0f059b9d12 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -7627,12 +7627,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
DRM_ERROR("%p bind failed\n", rbo);
goto error_unpin;
}
+ amdgpu_bo_unreserve(rbo);
r = drm_gem_plane_helper_prepare_fb(plane, new_state);
if (unlikely(r != 0))
- goto error_unpin;
-
- amdgpu_bo_unreserve(rbo);
+ goto error_reserve;
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -7665,6 +7664,10 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
return 0;
+error_reserve:
+ if (WARN_ON(amdgpu_bo_reserve(rbo, true)))
+ return r;
+
error_unpin:
amdgpu_bo_unpin(rbo);
--
2.25.1
More information about the dri-devel
mailing list