[PATCH 1/2] radeon: Don't generate new fence for page flip.
Michel Dänzer
michel at daenzer.net
Tue Jul 12 04:39:46 PDT 2011
From: Michel Dänzer <michel.daenzer at amd.com>
Use the fence of the new frontbuffer, if any.
Generating a new fence could cause us to wait for completely unrelated
rendering to finish before performing the flip.
Signed-off-by: Michel Dänzer <michel.daenzer at amd.com>
---
drivers/gpu/drm/radeon/radeon_display.c | 33 ++++--------------------------
1 files changed, 5 insertions(+), 28 deletions(-)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0671934..71a4840 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -280,7 +280,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
work = radeon_crtc->unpin_work;
if (work == NULL ||
- !radeon_fence_signaled(work->fence)) {
+ (work->fence && !radeon_fence_signaled(work->fence))) {
spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
return;
}
@@ -346,7 +346,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct radeon_framebuffer *new_radeon_fb;
struct drm_gem_object *obj;
struct radeon_bo *rbo;
- struct radeon_fence *fence;
struct radeon_unpin_work *work;
unsigned long flags;
u32 tiling_flags, pitch_pixels;
@@ -357,22 +356,19 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (work == NULL)
return -ENOMEM;
- r = radeon_fence_create(rdev, &fence);
- if (unlikely(r != 0)) {
- kfree(work);
- DRM_ERROR("flip queue: failed to create fence.\n");
- return -ENOMEM;
- }
work->event = event;
work->rdev = rdev;
work->crtc_id = radeon_crtc->crtc_id;
- work->fence = radeon_fence_ref(fence);
old_radeon_fb = to_radeon_framebuffer(crtc->fb);
new_radeon_fb = to_radeon_framebuffer(fb);
/* schedule unpin of the old buffer */
obj = old_radeon_fb->obj;
rbo = gem_to_radeon_bo(obj);
work->old_rbo = rbo;
+ obj = new_radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
+ if (rbo->tbo.sync_obj)
+ work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
INIT_WORK(&work->work, radeon_unpin_work_func);
/* We borrow the event spin lock for protecting unpin_work */
@@ -380,7 +376,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (radeon_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
- radeon_fence_unref(&fence);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@@ -390,9 +385,6 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
/* pin the new buffer */
- obj = new_radeon_fb->obj;
- rbo = gem_to_radeon_bo(obj);
-
DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
work->old_rbo, rbo);
@@ -460,25 +452,11 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
goto pflip_cleanup1;
}
- /* 32 ought to cover us */
- r = radeon_ring_lock(rdev, 32);
- if (r) {
- DRM_ERROR("failed to lock the ring before flip\n");
- goto pflip_cleanup2;
- }
-
- /* emit the fence */
- radeon_fence_emit(rdev, fence);
/* set the proper interrupt */
radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
- /* fire the ring */
- radeon_ring_unlock_commit(rdev);
return 0;
-pflip_cleanup2:
- drm_vblank_put(dev, radeon_crtc->crtc_id);
-
pflip_cleanup1:
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0)) {
@@ -498,7 +476,6 @@ pflip_cleanup:
spin_lock_irqsave(&dev->event_lock, flags);
radeon_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
- radeon_fence_unref(&fence);
kfree(work);
return r;
--
1.7.5.4
More information about the dri-devel
mailing list