[PATCH 1/2] drm/radeon: expand fence sequence values to 64 bit
Christian König
deathsimple at vodafone.de
Wed Feb 1 06:58:20 PST 2012
They are protected by a read/write lock anyway, so
we actually don't need to use the atomic type.
Signed-off-by: Christian König <deathsimple at vodafone.de>
---
drivers/gpu/drm/radeon/radeon.h | 6 +++---
drivers/gpu/drm/radeon/radeon_fence.c | 29 +++++++++++++++++------------
2 files changed, 20 insertions(+), 15 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 73e05cb..2f4fb4d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -209,8 +209,8 @@ struct radeon_fence_driver {
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
- atomic_t seq;
- uint32_t last_seq;
+ uint64_t seq;
+ uint64_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
@@ -225,7 +225,7 @@ struct radeon_fence {
struct kref kref;
struct list_head list;
/* protected by radeon_fence.lock */
- uint32_t seq;
+ uint64_t seq;
bool emitted;
bool signaled;
/* RB, DMA, etc. */
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 64ea3dd..ac177c5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -29,7 +29,6 @@
* Dave Airlie
*/
#include <linux/seq_file.h>
-#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
@@ -70,7 +69,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+ fence->seq = ++rdev->fence_drv[fence->ring].seq;
if (!rdev->ring[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right
* away
@@ -90,12 +89,18 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
{
struct radeon_fence *fence;
struct list_head *i, *n;
- uint32_t seq;
+ uint64_t seq;
bool wake = false;
unsigned long cjiffies;
seq = radeon_fence_read(rdev, ring);
+ seq |= rdev->fence_drv[ring].last_seq & 0xFFFFFFFF00000000;
if (seq != rdev->fence_drv[ring].last_seq) {
+ if (seq < rdev->fence_drv[ring].last_seq) {
+ /* sequence wrapped around */
+ seq = (seq & 0xFFFFFFFF) |
+ (rdev->fence_drv[ring].seq & 0xFFFFFFFF00000000);
+ }
rdev->fence_drv[ring].last_seq = seq;
rdev->fence_drv[ring].last_jiffies = jiffies;
rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -216,7 +221,7 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
unsigned long irq_flags, timeout;
- u32 seq;
+ uint64_t last_seq;
int r;
if (fence == NULL) {
@@ -230,8 +235,8 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
timeout = rdev->fence_drv[fence->ring].last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv[fence->ring].last_seq;
- trace_radeon_fence_wait_begin(rdev->ddev, seq);
+ last_seq = rdev->fence_drv[fence->ring].last_seq;
+ trace_radeon_fence_wait_begin(rdev->ddev, last_seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev, fence->ring);
r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
@@ -246,7 +251,7 @@ retry:
radeon_fence_signaled(fence), timeout);
radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
- trace_radeon_fence_wait_end(rdev->ddev, seq);
+ trace_radeon_fence_wait_end(rdev->ddev, last_seq);
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
@@ -258,11 +263,11 @@ retry:
/* don't protect read access to rdev->fence_drv[t].last_seq
* if we experiencing a lockup the value doesn't change
*/
- if (seq == rdev->fence_drv[fence->ring].last_seq &&
+ if (last_seq == rdev->fence_drv[fence->ring].last_seq &&
radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
- fence->seq, seq);
+ (uint32_t)fence->seq, (uint32_t)last_seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
@@ -403,7 +408,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
- radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
rdev->fence_drv[ring].initialized = true;
DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
@@ -416,7 +421,7 @@ static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
- atomic_set(&rdev->fence_drv[ring].seq, 0);
+ rdev->fence_drv[ring].seq = 0;
INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
@@ -481,7 +486,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
fence = list_entry(rdev->fence_drv[i].emitted.prev,
struct radeon_fence, list);
seq_printf(m, "Last emitted fence %p with 0x%08X\n",
- fence, fence->seq);
+ fence, (uint32_t)fence->seq);
}
}
return 0;
--
1.7.5.4
--------------040606070707070606090300
Content-Type: text/x-patch;
name="0002-drm-radeon-interface-waiting-for-fence-values.patch"
Content-Transfer-Encoding: 8bit
Content-Disposition: attachment;
filename*0="0002-drm-radeon-interface-waiting-for-fence-values.patch"
More information about the dri-devel
mailing list