[PATCH 09/24] drm/radeon: simplify semaphore handling
j.glisse at gmail.com
j.glisse at gmail.com
Wed Apr 25 12:03:14 PDT 2012
From: Christian König <deathsimple at vodafone.de>
Directly use the suballocator to get small chunks
of memory. It's equally fast and doesn't crash when
we encounter a GPU reset.
Signed-off-by: Christian König <deathsimple at vodafone.de>
---
drivers/gpu/drm/radeon/evergreen.c | 1 -
drivers/gpu/drm/radeon/ni.c | 1 -
drivers/gpu/drm/radeon/r600.c | 1 -
drivers/gpu/drm/radeon/radeon.h | 29 +------
drivers/gpu/drm/radeon/radeon_device.c | 2 -
drivers/gpu/drm/radeon/radeon_semaphore.c | 137 +++++------------------------
drivers/gpu/drm/radeon/rv770.c | 1 -
drivers/gpu/drm/radeon/si.c | 1 -
8 files changed, 26 insertions(+), 147 deletions(-)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ca47f52..a76389c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -3431,7 +3431,6 @@ void evergreen_fini(struct radeon_device *rdev)
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 0146428..c0b0956 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1773,7 +1773,6 @@ void cayman_fini(struct radeon_device *rdev)
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index bc3a2ef..24e68fd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2667,7 +2667,6 @@ void r600_fini(struct radeon_device *rdev)
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 415a496..222939f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -427,34 +427,12 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
/*
* Semaphores.
*/
-struct radeon_ring;
-
-#define RADEON_SEMAPHORE_BO_SIZE 256
-
-struct radeon_semaphore_driver {
- rwlock_t lock;
- struct list_head bo;
-};
-
-struct radeon_semaphore_bo;
-
-/* everything here is constant */
struct radeon_semaphore {
- struct list_head list;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- struct radeon_semaphore_bo *bo;
-};
-
-struct radeon_semaphore_bo {
- struct list_head list;
- struct radeon_ib *ib;
- struct list_head free;
- struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
- unsigned nused;
+ struct radeon_sa_bo sa_bo;
+ signed waiters;
+ uint64_t gpu_addr;
};
-void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
@@ -1518,7 +1496,6 @@ struct radeon_device {
struct radeon_mman mman;
rwlock_t fence_lock;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
- struct radeon_semaphore_driver semaphore_drv;
struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index eb63a06..f314819 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -733,11 +733,9 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_lock);
- rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
- INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
/* initialize vm here */
rdev->vm_manager.use_bitmap = 1;
rdev->vm_manager.max_pfn = 1 << 20;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 61dd4e3..b67c259 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -31,108 +31,32 @@
#include "drm.h"
#include "radeon.h"
-static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore)
{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- uint64_t gpu_addr;
- uint32_t *cpu_ptr;
- int r, i;
-
+ void *cpu_ptr;
+ int r;
- bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
- if (bo == NULL) {
+ *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
+ if (*semaphore == NULL) {
return -ENOMEM;
}
- INIT_LIST_HEAD(&bo->free);
- INIT_LIST_HEAD(&bo->list);
- bo->nused = 0;
- r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+ r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+ &(*semaphore)->sa_bo, 8, 8, true);
if (r) {
- dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
- kfree(bo);
+ kfree(*semaphore);
+ *semaphore = NULL;
return r;
}
- gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
- gpu_addr += bo->ib->sa_bo.offset;
- cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
- cpu_ptr += (bo->ib->sa_bo.offset >> 2);
- for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
- bo->semaphores[i].gpu_addr = gpu_addr;
- bo->semaphores[i].cpu_ptr = cpu_ptr;
- bo->semaphores[i].bo = bo;
- list_add_tail(&bo->semaphores[i].list, &bo->free);
- gpu_addr += 8;
- cpu_ptr += 2;
- }
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
- return 0;
-}
-
-static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
- struct radeon_semaphore_bo *bo)
-{
- radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
- radeon_fence_unref(&bo->ib->fence);
- list_del(&bo->list);
- kfree(bo);
-}
-
-void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
-
- if (list_empty(&rdev->semaphore_drv.bo)) {
- return;
- }
- /* only shrink if first bo has free semaphore */
- bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
- if (list_empty(&bo->free)) {
- return;
- }
- list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
- if (bo->nused)
- continue;
- radeon_semaphore_del_bo_locked(rdev, bo);
- }
-}
-int radeon_semaphore_create(struct radeon_device *rdev,
- struct radeon_semaphore **semaphore)
-{
- struct radeon_semaphore_bo *bo;
- unsigned long irq_flags;
- bool do_retry = true;
- int r;
-
-retry:
- *semaphore = NULL;
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
- if (list_empty(&bo->free))
- continue;
- *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
- (*semaphore)->cpu_ptr[0] = 0;
- (*semaphore)->cpu_ptr[1] = 0;
- list_del(&(*semaphore)->list);
- bo->nused++;
- break;
- }
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ (*semaphore)->waiters = 0;
+ (*semaphore)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ (*semaphore)->gpu_addr += (*semaphore)->sa_bo.offset;
- if (*semaphore == NULL) {
- if (do_retry) {
- do_retry = false;
- r = radeon_semaphore_add_bo(rdev);
- if (r)
- return r;
- goto retry;
- }
- return -ENOMEM;
- }
+ cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ cpu_ptr += (*semaphore)->sa_bo.offset;
+ *((uint64_t*)cpu_ptr) = 0;
return 0;
}
@@ -140,39 +64,24 @@ retry:
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ --semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
+ ++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore *semaphore)
{
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- semaphore->bo->nused--;
- list_add_tail(&semaphore->list, &semaphore->bo->free);
- radeon_semaphore_shrink_locked(rdev);
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
-}
-
-void radeon_semaphore_driver_fini(struct radeon_device *rdev)
-{
- struct radeon_semaphore_bo *bo, *n;
- unsigned long irq_flags;
-
- write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
- /* we force to free everything */
- list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
- if (!list_empty(&bo->free)) {
- dev_err(rdev->dev, "still in use semaphore\n");
- }
- radeon_semaphore_del_bo_locked(rdev, bo);
+ if (semaphore->waiters > 0) {
+ dev_err(rdev->dev, "Semaphore %p has more waiters than signalers,"
+ " hardware lockup imminent!\n", semaphore);
}
- write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ radeon_sa_bo_free(rdev, &semaphore->sa_bo);
+ kfree(semaphore);
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index cacec0e..c6ee54e 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1278,7 +1278,6 @@ void rv770_fini(struct radeon_device *rdev)
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ac7a199..ebe2c9b 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -4118,7 +4118,6 @@ void si_fini(struct radeon_device *rdev)
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
- radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
--
1.7.7.6
More information about the dri-devel
mailing list