[PATCH 1/3] drm/radeon: wait for BO move to finish on kmap
Christian König
deathsimple at vodafone.de
Mon Apr 13 07:52:15 PDT 2015
From: Christian König <christian.koenig at amd.com>
After resume buffer need to move back into VRAM. We already had this
problem with UVD, but solve it more general to be on the safe side.
Signed-off-by: Christian König <christian.koenig at amd.com>
---
drivers/gpu/drm/radeon/radeon.h | 2 ++
drivers/gpu/drm/radeon/radeon_object.c | 11 +++++++++++
drivers/gpu/drm/radeon/radeon_ttm.c | 3 +++
drivers/gpu/drm/radeon/radeon_uvd.c | 10 ----------
4 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 33d5a4f..6e6b49a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -506,6 +506,8 @@ struct radeon_bo {
struct radeon_mn *mn;
struct interval_tree_node mn_it;
+
+ struct radeon_fence *last_move;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 318165d..14a0f87 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -263,6 +263,17 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
bool is_iomem;
int r;
+ if (bo->last_move) {
+ r = radeon_fence_wait(bo->last_move, false);
+ if (r) {
+ radeon_bo_kunmap(bo);
+ DRM_ERROR("Failed waiting BO move (%d)!\n", r);
+ return r;
+ }
+
+ radeon_fence_unref(&bo->last_move);
+ }
+
if (bo->kptr) {
if (ptr) {
*ptr = bo->kptr;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b292aca..1fa4f2d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -253,6 +253,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
@@ -300,6 +301,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = ttm_bo_move_accel_cleanup(bo, &fence->base,
evict, no_wait_gpu, new_mem);
+ radeon_fence_unref(&rbo->last_move);
+ rbo->last_move = radeon_fence_ref(fence);
radeon_fence_unref(&fence);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2ae..60f96a3 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -401,7 +401,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
{
int32_t *msg, msg_type, handle;
unsigned img_size = 0;
- struct fence *f;
void *ptr;
int i, r;
@@ -411,15 +410,6 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- f = reservation_object_get_excl(bo->tbo.resv);
- if (f) {
- r = radeon_fence_wait((struct radeon_fence *)f, false);
- if (r) {
- DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
- return r;
- }
- }
-
r = radeon_bo_kmap(bo, &ptr);
if (r) {
DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
--
1.9.1
More information about the dri-devel
mailing list