xf86-video-intel: src/sna/kgem.c src/sna/kgem.h

Chris Wilson ickle at kemper.freedesktop.org
Thu Jan 3 16:04:10 PST 2013


 src/sna/kgem.c |   14 ++++++++++++++
 src/sna/kgem.h |    4 ++--
 2 files changed, 16 insertions(+), 2 deletions(-)

New commits:
commit b5b3cfb0ad1cc5e66c99035f526946bf41011e13
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 3 23:33:44 2013 +0000

    sna: Flush the batch prior to referencing work from another ring
    
    In the case where the kernel is inserting semaphores to serialise work
    between rings, we want to only delay the surface that is coming from the
    other ring and not interfere with work already queued.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5f9ba9e..ac7724a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4030,6 +4030,11 @@ bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
 	return bo->needs_flush;
 }
 
+inline static bool needs_semaphore(struct kgem *kgem, struct kgem_bo *bo)
+{
+	return kgem->nreloc && bo->rq && RQ_RING(bo->rq) != kgem->ring;
+}
+
 bool kgem_check_bo(struct kgem *kgem, ...)
 {
 	va_list ap;
@@ -4045,6 +4050,9 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 		if (bo->exec)
 			continue;
 
+		if (needs_semaphore(kgem, bo))
+			return false;
+
 		num_pages += num_pages(bo);
 		num_exec++;
 
@@ -4109,6 +4117,9 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 		return true;
 	}
 
+	if (needs_semaphore(kgem, bo))
+		return false;
+
 	if (kgem_flush(kgem, bo->flush))
 		return false;
 
@@ -4165,6 +4176,9 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 			continue;
 		}
 
+		if (needs_semaphore(kgem, bo))
+			return false;
+
 		num_pages += num_pages(bo);
 		num_exec++;
 		if (kgem->gen < 040 && bo->tiling) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9152cc0..72f8cb3 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -305,7 +305,7 @@ static inline void kgem_submit(struct kgem *kgem)
 
 static inline bool kgem_flush(struct kgem *kgem, bool flush)
 {
-	if (kgem->nexec == 0)
+	if (kgem->nreloc == 0)
 		return false;
 
 	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
@@ -359,7 +359,7 @@ static inline void kgem_set_mode(struct kgem *kgem,
 	kgem_submit(kgem);
 #endif
 
-	if (kgem->nexec && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
+	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
 		_kgem_submit(kgem);
 
 	if (kgem->mode == mode)


More information about the xorg-commit mailing list