[PATCH 13/13] drm/radeon: rework recursive gpu reset handling

Christian König deathsimple at vodafone.de
Thu Apr 19 15:39:20 PDT 2012


Instead of all this humpy pumpy with recursive
mutex (which also fixes only halve of the problem)
move the actual gpu reset out of the fence code,
return -EDEADLK and then reset the gpu in the
calling ioctl function.

Signed-off-by: Christian König <deathsimple at vodafone.de>
---
 drivers/gpu/drm/radeon/radeon.h        |   44 +-------------------------------
 drivers/gpu/drm/radeon/radeon_cs.c     |   21 +++++++++++---
 drivers/gpu/drm/radeon/radeon_device.c |    7 +----
 drivers/gpu/drm/radeon/radeon_fence.c  |   10 ++-----
 drivers/gpu/drm/radeon/radeon_gart.c   |   16 ++++++------
 drivers/gpu/drm/radeon/radeon_gem.c    |   14 ++++++++++
 6 files changed, 43 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 5912cab..aadc334 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -155,48 +155,6 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
 #endif
 bool radeon_get_bios(struct radeon_device *rdev);
 
-
-/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
-	struct mutex		mutex;
-	struct task_struct	*owner;
-	int			level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
-	mutex_init(&mutex->mutex);
-	mutex->owner = NULL;
-	mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
-	if (mutex_trylock(&mutex->mutex)) {
-		/* The mutex was unlocked before, so it's ours now */
-		mutex->owner = current;
-	} else if (mutex->owner != current) {
-		/* Another process locked the mutex, take it */
-		mutex_lock(&mutex->mutex);
-		mutex->owner = current;
-	}
-	/* Otherwise the mutex was already locked by this process */
-
-	mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
-	if (--mutex->level > 0)
-		return;
-
-	mutex->owner = NULL;
-	mutex_unlock(&mutex->mutex);
-}
-
-
 /*
  * Dummy page
  */
@@ -1497,7 +1455,7 @@ struct radeon_device {
 	struct radeon_gem		gem;
 	struct radeon_pm		pm;
 	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
-	struct radeon_mutex		cs_mutex;
+	struct mutex			cs_mutex;
 	struct radeon_wb		wb;
 	struct radeon_dummy_page	dummy_page;
 	bool				shutdown;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index de40d33..2486b21 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -496,15 +496,23 @@ out:
 	return r;
 }
 
+static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		r = radeon_gpu_reset(rdev);
+	}
+	return r;
+}
+
 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_cs_parser parser;
 	int r;
 
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->cs_mutex);
 	if (!rdev->accel_working) {
-		radeon_mutex_unlock(&rdev->cs_mutex);
+		mutex_unlock(&rdev->cs_mutex);
 		return -EBUSY;
 	}
 	/* initialize parser */
@@ -517,7 +525,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
 		radeon_cs_parser_fini(&parser, r);
-		radeon_mutex_unlock(&rdev->cs_mutex);
+		r = radeon_cs_handle_lockup(rdev, r);
+		mutex_unlock(&rdev->cs_mutex);
 		return r;
 	}
 	r = radeon_cs_parser_relocs(&parser);
@@ -525,7 +534,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 		if (r != -ERESTARTSYS)
 			DRM_ERROR("Failed to parse relocation %d!\n", r);
 		radeon_cs_parser_fini(&parser, r);
-		radeon_mutex_unlock(&rdev->cs_mutex);
+		r = radeon_cs_handle_lockup(rdev, r);
+		mutex_unlock(&rdev->cs_mutex);
 		return r;
 	}
 	r = radeon_cs_ib_chunk(rdev, &parser);
@@ -538,7 +548,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 	}
 out:
 	radeon_cs_parser_fini(&parser, r);
-	radeon_mutex_unlock(&rdev->cs_mutex);
+	r = radeon_cs_handle_lockup(rdev, r);
+	mutex_unlock(&rdev->cs_mutex);
 	return r;
 }
 
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 9189f8d..04f3f40 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -722,7 +722,7 @@ int radeon_device_init(struct radeon_device *rdev,
 
 	/* mutex initialization are all done here so we
 	 * can recall function without having locking issues */
-	radeon_mutex_init(&rdev->cs_mutex);
+	mutex_init(&rdev->cs_mutex);
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		mutex_init(&rdev->ring[i].mutex);
 	mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -983,9 +983,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 	int r;
 	int resched;
 
-	/* Prevent CS ioctl from interfering */
-	radeon_mutex_lock(&rdev->cs_mutex);
-
 	radeon_save_bios_scratch_regs(rdev);
 	/* block TTM */
 	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -1000,8 +997,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
 		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
 	}
 
-	radeon_mutex_unlock(&rdev->cs_mutex);
-
 	if (r) {
 		/* bad news, how to tell it to userspace ? */
 		dev_info(rdev->dev, "GPU reset failed\n");
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 0e8ac35..ea7aee3 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -267,6 +267,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 			/* change sequence value on all rings, so nobody else things there is a lockup */
 			for (i = 0; i < RADEON_NUM_RINGS; ++i)
 				rdev->fence_drv[i].last_seq -= 0x10000;
+
+			rdev->fence_drv[fence->ring].last_activity = jiffies;
 			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
 
 			if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {
@@ -277,13 +279,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 
 				/* mark the ring as not ready any more */
 				rdev->ring[fence->ring].ready = false;
-				r = radeon_gpu_reset(rdev);
-				if (r)
-					return r;
-
-				write_lock_irqsave(&rdev->fence_lock, irq_flags);
-				rdev->fence_drv[fence->ring].last_activity = jiffies;
-				write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+				return -EDEADLK;
 			}
 		}
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 0ab3277..450ae77 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -356,13 +356,13 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev)
 {
 	struct radeon_vm *vm, *tmp;
 
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->cs_mutex);
 	/* unbind all active vm */
 	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
 		radeon_vm_unbind_locked(rdev, vm);
 	}
 	rdev->vm_manager.funcs->fini(rdev);
-	radeon_mutex_unlock(&rdev->cs_mutex);
+	mutex_unlock(&rdev->cs_mutex);
 	return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
 }
 
@@ -480,9 +480,9 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 	if (last_pfn > vm->last_pfn) {
 		/* grow va space 32M by 32M */
 		unsigned align = ((32 << 20) >> 12) - 1;
-		radeon_mutex_lock(&rdev->cs_mutex);
+		mutex_lock(&rdev->cs_mutex);
 		radeon_vm_unbind_locked(rdev, vm);
-		radeon_mutex_unlock(&rdev->cs_mutex);
+		mutex_unlock(&rdev->cs_mutex);
 		vm->last_pfn = (last_pfn + align) & ~align;
 	}
 	head = &vm->va;
@@ -598,9 +598,9 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
 		return 0;
 
 	mutex_lock(&vm->mutex);
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->cs_mutex);
 	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
-	radeon_mutex_unlock(&rdev->cs_mutex);
+	mutex_unlock(&rdev->cs_mutex);
 	list_del(&bo_va->vm_list);
 	mutex_unlock(&vm->mutex);
 	list_del(&bo_va->bo_list);
@@ -645,9 +645,9 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 
 	mutex_lock(&vm->mutex);
 
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->cs_mutex);
 	radeon_vm_unbind_locked(rdev, vm);
-	radeon_mutex_unlock(&rdev->cs_mutex);
+	mutex_unlock(&rdev->cs_mutex);
 
 	/* remove all bo */
 	r = radeon_bo_reserve(rdev->sa_manager.bo, false);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c7008b5..f65377f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -154,6 +154,15 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
 	radeon_bo_unreserve(rbo);
 }
 
+static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
+{
+	if (r == -EDEADLK) {
+		mutex_lock(&rdev->cs_mutex);
+		r = radeon_gpu_reset(rdev);
+		mutex_unlock(&rdev->cs_mutex);
+	}
+	return r;
+}
 
 /*
  * GEM ioctls.
@@ -210,12 +219,14 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
 					args->initial_domain, false,
 					false, &gobj);
 	if (r) {
+		r = radeon_gem_handle_lockup(rdev, r);
 		return r;
 	}
 	r = drm_gem_handle_create(filp, gobj, &handle);
 	/* drop reference from allocate - handle holds it now */
 	drm_gem_object_unreference_unlocked(gobj);
 	if (r) {
+		r = radeon_gem_handle_lockup(rdev, r);
 		return r;
 	}
 	args->handle = handle;
@@ -245,6 +256,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
 	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
 	return r;
 }
 
@@ -301,6 +313,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 		break;
 	}
 	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
 	return r;
 }
 
@@ -322,6 +335,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	if (robj->rdev->asic->ioctl_wait_idle)
 		robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
 	drm_gem_object_unreference_unlocked(gobj);
+	r = radeon_gem_handle_lockup(robj->rdev, r);
 	return r;
 }
 
-- 
1.7.5.4



More information about the dri-devel mailing list