xf86-video-intel: 4 commits - src/sna/gen2_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_blt.c src/sna/sna_io.c

Chris Wilson ickle at kemper.freedesktop.org
Sun Oct 7 14:44:49 PDT 2012


 src/sna/gen2_render.c |    7 ++++-
 src/sna/kgem.c        |    2 -
 src/sna/kgem.h        |    5 ++++
 src/sna/sna_blt.c     |   60 +++++++++++++++++++++++++++++++++++---------------
 src/sna/sna_io.c      |    6 +----
 5 files changed, 55 insertions(+), 25 deletions(-)

New commits:
commit 2ac3776be85d857a57ce7b742e52cd6091d2befb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 7 22:41:25 2012 +0100

    sna: Check that we have sufficient space for a copy when replacing a fill
    
    Reported-by: Timo Kamph <timo at kamph.org>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=55700
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index c3c4735..7410eb1 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -435,18 +435,23 @@ static void sna_blt_copy_one(struct sna *sna,
 	    kgem->batch[kgem->nbatch-3] == ((uint32_t)(dst_y+height) << 16 | (uint16_t)(dst_x+width)) &&
 	    kgem->reloc[kgem->nreloc-1].target_handle == blt->bo[1]->handle) {
 		DBG(("%s: replacing last fill\n", __FUNCTION__));
-		b = kgem->batch + kgem->nbatch - 6;
-		b[0] = blt->cmd;
-		b[1] = blt->br13;
-		b[5] = (src_y << 16) | src_x;
-		b[6] = blt->pitch[0];
-		b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7 - 6,
-				      blt->bo[0],
-				      I915_GEM_DOMAIN_RENDER << 16 |
-				      KGEM_RELOC_FENCED,
-				      0);
-		kgem->nbatch += 8 - 6;
-		return;
+		if (kgem_check_batch(kgem, 8-6)) {
+			b = kgem->batch + kgem->nbatch - 6;
+			b[0] = blt->cmd;
+			b[1] = blt->br13;
+			b[5] = (src_y << 16) | src_x;
+			b[6] = blt->pitch[0];
+			b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7 - 6,
+					      blt->bo[0],
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      KGEM_RELOC_FENCED,
+					      0);
+			kgem->nbatch += 8 - 6;
+			assert(kgem->nbatch < kgem->surface);
+			return;
+		}
+		kgem->nbatch -= 6;
+		kgem->nreloc--;
 	}
 
 	if (!kgem_check_batch(kgem, 8) ||
commit 1a5d5b9ae5771d81da16222582acedb1557386c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 7 22:41:02 2012 +0100

    sna: Check that for batch overflows after advancing a BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 06f3999..e547215 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -352,16 +352,21 @@ static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
 
 static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
 {
+	assert(num_dwords > 0);
+	assert(kgem->nbatch < kgem->surface);
+	assert(kgem->surface <= kgem->batch_size);
 	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
 }
 
 static inline bool kgem_check_reloc(struct kgem *kgem, int n)
 {
+	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
 	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
 }
 
 static inline bool kgem_check_exec(struct kgem *kgem, int n)
 {
+	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
 	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
 }
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 94b5f4a..c3c4735 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -84,6 +84,7 @@ static const uint8_t fill_ROP[] = {
 
 static void nop_done(struct sna *sna, const struct sna_composite_op *op)
 {
+	assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem));
 	(void)sna;
 	(void)op;
 }
@@ -97,7 +98,9 @@ static void gen6_blt_copy_done(struct sna *sna, const struct sna_composite_op *o
 		b[0] = XY_SETUP_CLIP;
 		b[1] = b[2] = 0;
 		kgem->nbatch += 3;
+		assert(kgem->nbatch < kgem->surface);
 	}
+	assert(sna->kgem.nbatch <= KGEM_BATCH_SIZE(&sna->kgem));
 	(void)op;
 }
 
@@ -143,8 +146,8 @@ static bool sna_blt_fill_init(struct sna *sna,
 	blt->bpp = bpp;
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, bo) ||
-	    !kgem_check_batch(kgem, 12)) {
+	if (!kgem_check_batch(kgem, 12) ||
+	    !kgem_check_bo_fenced(kgem, bo)) {
 		_kgem_submit(kgem);
 		assert(kgem_check_bo_fenced(kgem, bo));
 		_kgem_set_mode(kgem, KGEM_BLT);
@@ -178,6 +181,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 		b[7] = 0;
 		b[8] = 0;
 		kgem->nbatch += 9;
+		assert(kgem->nbatch < kgem->surface);
 
 		sna->blt_state.fill_bo = bo->unique_id;
 		sna->blt_state.fill_pixel = pixel;
@@ -236,6 +240,7 @@ inline static void sna_blt_fill_one(struct sna *sna,
 
 	b = kgem->batch + kgem->nbatch;
 	kgem->nbatch += 3;
+	assert(kgem->nbatch < kgem->surface);
 
 	b[0] = blt->cmd;
 	b[1] = y << 16 | x;
@@ -369,7 +374,8 @@ static void sna_blt_alpha_fixup_one(struct sna *sna,
 	assert(width > 0);
 	assert(height > 0);
 
-	if (!kgem_check_batch(kgem, 12) || !kgem_check_reloc(kgem, 2)) {
+	if (!kgem_check_batch(kgem, 12) ||
+	    !kgem_check_reloc(kgem, 2)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -397,6 +403,7 @@ static void sna_blt_alpha_fixup_one(struct sna *sna,
 	b[10] = 0;
 	b[11] = 0;
 	kgem->nbatch += 12;
+	assert(kgem->nbatch < kgem->surface);
 }
 
 static void sna_blt_copy_one(struct sna *sna,
@@ -442,7 +449,8 @@ static void sna_blt_copy_one(struct sna *sna,
 		return;
 	}
 
-	if (!kgem_check_batch(kgem, 8) || !kgem_check_reloc(kgem, 2)) {
+	if (!kgem_check_batch(kgem, 8) ||
+	    !kgem_check_reloc(kgem, 2)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -466,6 +474,7 @@ static void sna_blt_copy_one(struct sna *sna,
 			      KGEM_RELOC_FENCED,
 			      0);
 	kgem->nbatch += 8;
+	assert(kgem->nbatch < kgem->surface);
 }
 
 bool
@@ -839,6 +848,7 @@ inline static void _sna_blt_fill_box(struct sna *sna,
 
 	b = kgem->batch + kgem->nbatch;
 	kgem->nbatch += 3;
+	assert(kgem->nbatch < kgem->surface);
 
 	b[0] = blt->cmd;
 	*(uint64_t *)(b+1) = *(const uint64_t *)box;
@@ -868,6 +878,7 @@ inline static void _sna_blt_fill_boxes(struct sna *sna,
 		nbox -= nbox_this_time;
 
 		kgem->nbatch += 3 * nbox_this_time;
+		assert(kgem->nbatch < kgem->surface);
 		while (nbox_this_time >= 8) {
 			b[0] = cmd; *(uint64_t *)(b+1) = *(const uint64_t *)box++;
 			b[3] = cmd; *(uint64_t *)(b+4) = *(const uint64_t *)box++;
@@ -1913,6 +1924,7 @@ static void convert_done(struct sna *sna, const struct sna_composite_op *op)
 		b[0] = XY_SETUP_CLIP;
 		b[1] = b[2] = 0;
 		kgem->nbatch += 3;
+		assert(kgem->nbatch < kgem->surface);
 	}
 
 	kgem_bo_destroy(kgem, op->src.bo);
@@ -2229,6 +2241,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 			      0);
 	b[5] = color;
 	kgem->nbatch += 6;
+	assert(kgem->nbatch < kgem->surface);
 
 	sna->blt_state.fill_bo = bo->unique_id;
 	sna->blt_state.fill_pixel = color;
@@ -2285,8 +2298,8 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, bo) ||
-	    !kgem_check_batch(kgem, 12)) {
+	if (!kgem_check_batch(kgem, 12) ||
+	    !kgem_check_bo_fenced(kgem, bo)) {
 		_kgem_submit(kgem);
 		assert(kgem_check_bo_fenced(&sna->kgem, bo));
 		_kgem_set_mode(kgem, KGEM_BLT);
@@ -2320,6 +2333,7 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 		b[7] = 0;
 		b[8] = 0;
 		kgem->nbatch += 9;
+		assert(kgem->nbatch < kgem->surface);
 
 		sna->blt_state.fill_bo = bo->unique_id;
 		sna->blt_state.fill_pixel = pixel;
@@ -2350,6 +2364,7 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 
 			b = kgem->batch + kgem->nbatch;
 			kgem->nbatch += 3;
+			assert(kgem->nbatch < kgem->surface);
 			b[0] = cmd;
 			*(uint64_t *)(b+1) = *(const uint64_t *)box;
 			box++;
@@ -2378,6 +2393,7 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 			b[7] = 0;
 			b[8] = 0;
 			kgem->nbatch += 9;
+			assert(kgem->nbatch < kgem->surface);
 		}
 	} while (nbox);
 
@@ -2510,6 +2526,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 						      KGEM_RELOC_FENCED,
 						      0);
 				kgem->nbatch += 8;
+				assert(kgem->nbatch < kgem->surface);
 				box++;
 			} while (--nbox_this_time);
 
@@ -2561,6 +2578,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 						      KGEM_RELOC_FENCED,
 						      0);
 				kgem->nbatch += 8;
+				assert(kgem->nbatch < kgem->surface);
 				box++;
 			} while (--nbox_this_time);
 
@@ -2577,6 +2595,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		b[0] = XY_SETUP_CLIP;
 		b[1] = b[2] = 0;
 		kgem->nbatch += 3;
+		assert(kgem->nbatch < kgem->surface);
 	}
 
 	sna->blt_state.fill_bo = 0;
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 60ea517..367bfde 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -379,8 +379,7 @@ fallback:
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
-	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
+	if (!kgem_check_exec_and_reloc(kgem, 2) ||
 	    !kgem_check_batch(kgem, 8) ||
 	    !kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
 		_kgem_submit(kgem);
@@ -1173,8 +1172,7 @@ tile:
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
-	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
+	if (!kgem_check_exec_and_reloc(kgem, 2) ||
 	    !kgem_check_batch(kgem, 8) ||
 	    !kgem_check_bo_fenced(kgem, dst_bo)) {
 		_kgem_submit(kgem);
commit b41ecda69f2e6c9f8adf79dd24ded45740bcc015
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 7 22:39:36 2012 +0100

    sna: Only reduce the maximum batch size for old kernels
    
    Be careful we not increase the batch size to span multiple pages on 865g!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8c9cb6c..6f33ed5 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -838,7 +838,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
 	if (gen >= 70 && gen < 80)
 		kgem->batch_size = 16*1024;
-	if (!kgem->has_relaxed_delta)
+	if (!kgem->has_relaxed_delta && kgem->batch_size > 4*1024)
 		kgem->batch_size = 4*1024;
 
 	DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
commit 3ca7f8be5bcfec3c50c43bd485f0c57b931f8206
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Oct 7 22:39:10 2012 +0100

    sna/gen2: Count the number of dwords required for the invariant
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 5504e28..5012662 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -530,6 +530,8 @@ static void gen2_emit_invariant(struct sna *sna)
 	      TEXBLENDARG_MODIFY_PARMS |
 	      TEXBLENDARG_DIFFUSE);
 
+#define INVARIANT_SIZE 35
+
 	sna->render_state.gen2.need_invariant = false;
 }
 
@@ -538,9 +540,9 @@ gen2_get_batch(struct sna *sna)
 {
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 
-	if (!kgem_check_batch(&sna->kgem, 40+40)) {
+	if (!kgem_check_batch(&sna->kgem, INVARIANT_SIZE+40)) {
 		DBG(("%s: flushing batch: size %d > %d\n",
-		     __FUNCTION__, 40+40,
+		     __FUNCTION__, INVARIANT_SIZE+40,
 		     sna->kgem.surface-sna->kgem.nbatch));
 		kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
@@ -1011,6 +1013,7 @@ inline static int gen2_get_rectangles(struct sna *sna,
 	     __FUNCTION__, want, op->floats_per_vertex, rem));
 
 	assert(op->floats_per_vertex);
+	assert(op->floats_per_rect == 3 * op->floats_per_vertex);
 
 	need = 1;
 	size = op->floats_per_rect;


More information about the xorg-commit mailing list