xf86-video-intel: 4 commits - src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_io.c

Chris Wilson ickle at kemper.freedesktop.org
Mon Nov 4 10:37:23 CET 2013


 src/sna/kgem.c      |   48 +++---
 src/sna/sna_accel.c |    2 
 src/sna/sna_io.c    |  392 ++++++++++++++++++++++++++++------------------------
 3 files changed, 242 insertions(+), 200 deletions(-)

New commits:
commit 8d067e961920e19fda7e9990440ac2be1a2e1760
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Nov 4 09:34:25 2013 +0000

    sna: Rename the two variants of pwrite so very relative dangers are obvious
    
    Since we extend the write in the cache-aligned routine, it runs the risk
    of reading from beyond the end of the allocation. As such, callers
    should be carefully vetted to make sure that their allocations are
    already cache-aligned (typically page-aligned). To make it obvious that
    this complexity exists, rename the routine.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1217367..a5bb553 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -374,9 +374,9 @@ retry_mmap:
 	return ptr;
 }
 
-static int __gem_write(int fd, uint32_t handle,
-		       int offset, int length,
-		       const void *src)
+static int gem_write(int fd, uint32_t handle,
+		     int offset, int length,
+		     const void *src)
 {
 	struct drm_i915_gem_pwrite pwrite;
 
@@ -391,9 +391,9 @@ static int __gem_write(int fd, uint32_t handle,
 	return drmIoctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
 }
 
-static int gem_write(int fd, uint32_t handle,
-		     int offset, int length,
-		     const void *src)
+static int gem_write__cachealigned(int fd, uint32_t handle,
+				   int offset, int length,
+				   const void *src)
 {
 	struct drm_i915_gem_pwrite pwrite;
 
@@ -482,7 +482,7 @@ bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 	assert(length <= bytes(bo));
 retry:
-	if (__gem_write(kgem->fd, bo->handle, 0, length, data)) {
+	if (gem_write(kgem->fd, bo->handle, 0, length, data)) {
 		int err = errno;
 
 		assert(err != EINVAL);
@@ -2550,8 +2550,8 @@ static void kgem_finish_buffers(struct kgem *kgem)
 				     bo->base.handle, shrink->handle));
 
 				assert(bo->used <= bytes(shrink));
-				if (gem_write(kgem->fd, shrink->handle,
-					      0, bo->used, bo->mem) == 0) {
+				if (gem_write__cachealigned(kgem->fd, shrink->handle,
+							    0, bo->used, bo->mem) == 0) {
 					shrink->target_handle =
 						kgem->has_handle_lut ? bo->base.target_handle : shrink->handle;
 					for (n = 0; n < kgem->nreloc; n++) {
@@ -2589,8 +2589,8 @@ static void kgem_finish_buffers(struct kgem *kgem)
 		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
 		ASSERT_IDLE(kgem, bo->base.handle);
 		assert(bo->used <= bytes(&bo->base));
-		gem_write(kgem->fd, bo->base.handle,
-			  0, bo->used, bo->mem);
+		gem_write__cachealigned(kgem->fd, bo->base.handle,
+					0, bo->used, bo->mem);
 		bo->need_io = 0;
 
 decouple:
@@ -2642,9 +2642,9 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
 retry:
 	/* If there is no surface data, just upload the batch */
 	if (kgem->surface == kgem->batch_size) {
-		if (gem_write(kgem->fd, handle,
-			      0, sizeof(uint32_t)*kgem->nbatch,
-			      kgem->batch) == 0)
+		if (gem_write__cachealigned(kgem->fd, handle,
+					    0, sizeof(uint32_t)*kgem->nbatch,
+					    kgem->batch) == 0)
 			return 0;
 
 		goto expire;
@@ -2653,26 +2653,26 @@ retry:
 	/* Are the batch pages conjoint with the surface pages? */
 	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
 		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
-		if (gem_write(kgem->fd, handle,
-				0, kgem->batch_size*sizeof(uint32_t),
-				kgem->batch) == 0)
+		if (gem_write__cachealigned(kgem->fd, handle,
+					    0, kgem->batch_size*sizeof(uint32_t),
+					    kgem->batch) == 0)
 			return 0;
 
 		goto expire;
 	}
 
 	/* Disjoint surface/batch, upload separately */
-	if (gem_write(kgem->fd, handle,
-			0, sizeof(uint32_t)*kgem->nbatch,
-			kgem->batch))
+	if (gem_write__cachealigned(kgem->fd, handle,
+				    0, sizeof(uint32_t)*kgem->nbatch,
+				    kgem->batch))
 		goto expire;
 
 	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
 	ret -= sizeof(uint32_t) * kgem->surface;
 	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
-	if (__gem_write(kgem->fd, handle,
-			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
-			kgem->batch + kgem->surface))
+	if (gem_write(kgem->fd, handle,
+		      size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
+		      kgem->batch + kgem->surface))
 		goto expire;
 
 	return 0;
@@ -5862,8 +5862,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		    size <= bytes(&bo->base)) {
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
-			gem_write(kgem->fd, bo->base.handle,
-				  0, bo->used, bo->mem);
+			gem_write__cachealigned(kgem->fd, bo->base.handle,
+						0, bo->used, bo->mem);
 			kgem_buffer_release(kgem, bo);
 			bo->need_io = 0;
 			bo->write = 0;
commit 7050c8da56676b60a602dce9abbdb61a7c4fa61e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Nov 4 09:31:05 2013 +0000

    sna: Use the unoptimized pwrite for general buffers
    
    When we call kgem_bo_write() we have less control over the allocation of
    the buffer, and do not ensure it meets the alignment criteria required
    for the cacheline optimisation. So use the simple pwrite routine to
    avoid reading beyond the end of the allocation.
    
    Reported-and-tested-by: Mark Kettenis <mark.kettenis at xs4all.nl>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 264a379..1217367 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -482,7 +482,7 @@ bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 	assert(length <= bytes(bo));
 retry:
-	if (gem_write(kgem->fd, bo->handle, 0, length, data)) {
+	if (__gem_write(kgem->fd, bo->handle, 0, length, data)) {
 		int err = errno;
 
 		assert(err != EINVAL);
commit 10023bf8f5a95b4e9f42c05370bc2c103d6a3d3d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Nov 3 22:07:22 2013 +0000

    sna: Use fast-path target placement if we are also IGNORE_CPU
    
    If we are ignoring CPU damage, we also need only to check GPU damage
    when considering placement of the target bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1328c28..b8d89d6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3401,7 +3401,7 @@ create_gpu_bo:
 
 	if (priv->gpu_damage) {
 		assert(priv->gpu_bo);
-		if (!priv->cpu_damage) {
+		if (!priv->cpu_damage || flags & IGNORE_CPU) {
 			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
 							       &region.extents)) {
 				DBG(("%s: region wholly contained within GPU damage\n",
commit 08d8a47e7c8b9bcb5bb317be4623161e58e0e0cc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Nov 3 19:51:56 2013 +0000

    sna: Wrap staging buffer access with sigtrap handler
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index db56f56..bad6052 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -225,7 +225,6 @@ void sna_read_boxes(struct sna *sna, PixmapPtr dst, struct kgem_bo *src_bo,
 	BoxRec extents;
 	const BoxRec *tmp_box;
 	int tmp_nbox;
-	char *src;
 	void *ptr;
 	int src_pitch, cpp, offset;
 	int n, cmd, br13;
@@ -375,14 +374,17 @@ fallback:
 					kgem_bo_submit(&sna->kgem, dst_bo);
 					kgem_buffer_read_sync(kgem, dst_bo);
 
-					while (c-- != clipped) {
-						memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
-							   dst_bo->pitch, dst->devKind,
-							   c->x1 - tile.x1,
-							   c->y1 - tile.y1,
-							   c->x1, c->y1,
-							   c->x2 - c->x1,
-							   c->y2 - c->y1);
+					if (sigtrap_get() == 0) {
+						while (c-- != clipped) {
+							memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
+								   dst_bo->pitch, dst->devKind,
+								   c->x1 - tile.x1,
+								   c->y1 - tile.y1,
+								   c->x1, c->y1,
+								   c->x2 - c->x1,
+								   c->y2 - c->y1);
+						}
+						sigtrap_put();
 					}
 
 					kgem_bo_destroy(&sna->kgem, dst_bo);
@@ -412,14 +414,17 @@ fallback:
 			kgem_bo_submit(&sna->kgem, dst_bo);
 			kgem_buffer_read_sync(kgem, dst_bo);
 
-			for (n = 0; n < nbox; n++) {
-				memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
-					   dst_bo->pitch, dst->devKind,
-					   box[n].x1 - extents.x1,
-					   box[n].y1 - extents.y1,
-					   box[n].x1, box[n].y1,
-					   box[n].x2 - box[n].x1,
-					   box[n].y2 - box[n].y1);
+			if (sigtrap_get() == 0) {
+				for (n = 0; n < nbox; n++) {
+					memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
+						   dst_bo->pitch, dst->devKind,
+						   box[n].x1 - extents.x1,
+						   box[n].y1 - extents.y1,
+						   box[n].x1, box[n].y1,
+						   box[n].x2 - box[n].x1,
+						   box[n].y2 - box[n].y1);
+				}
+				sigtrap_put();
 			}
 
 			kgem_bo_destroy(&sna->kgem, dst_bo);
@@ -594,34 +599,37 @@ fallback:
 
 	kgem_buffer_read_sync(kgem, dst_bo);
 
-	src = ptr;
-	do {
-		int height = box->y2 - box->y1;
-		int width  = box->x2 - box->x1;
-		int pitch = PITCH(width, cpp);
-
-		DBG(("    copy offset %lx [%08x...%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
-		     (long)((char *)src - (char *)ptr),
-		     *(uint32_t*)src, *(uint32_t*)(src+pitch*height/2 + pitch/2 - 4), *(uint32_t*)(src+pitch*height - 4),
-		     box->x1, box->y1,
-		     width, height,
-		     pitch, dst->devKind, cpp*8));
-
-		assert(box->x1 >= 0);
-		assert(box->x2 <= dst->drawable.width);
-		assert(box->y1 >= 0);
-		assert(box->y2 <= dst->drawable.height);
-
-		memcpy_blt(src, dst->devPrivate.ptr, cpp*8,
-			   pitch, dst->devKind,
-			   0, 0,
-			   box->x1, box->y1,
-			   width, height);
-		box++;
+	if (sigtrap_get() == 0) {
+		char *src = ptr;
+		do {
+			int height = box->y2 - box->y1;
+			int width  = box->x2 - box->x1;
+			int pitch = PITCH(width, cpp);
+
+			DBG(("    copy offset %lx [%08x...%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
+			     (long)((char *)src - (char *)ptr),
+			     *(uint32_t*)src, *(uint32_t*)(src+pitch*height/2 + pitch/2 - 4), *(uint32_t*)(src+pitch*height - 4),
+			     box->x1, box->y1,
+			     width, height,
+			     pitch, dst->devKind, cpp*8));
+
+			assert(box->x1 >= 0);
+			assert(box->x2 <= dst->drawable.width);
+			assert(box->y1 >= 0);
+			assert(box->y2 <= dst->drawable.height);
+
+			memcpy_blt(src, dst->devPrivate.ptr, cpp*8,
+				   pitch, dst->devKind,
+				   0, 0,
+				   box->x1, box->y1,
+				   width, height);
+			box++;
 
-		src += pitch * height;
-	} while (--nbox);
-	assert(src - (char *)ptr == __kgem_buffer_size(dst_bo));
+			src += pitch * height;
+		} while (--nbox);
+		assert(src - (char *)ptr == __kgem_buffer_size(dst_bo));
+		sigtrap_put();
+	}
 	kgem_bo_destroy(kgem, dst_bo);
 	sna->blt_state.fill_bo = 0;
 }
@@ -831,7 +839,7 @@ bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 		     tmp.drawable.width, tmp.drawable.height,
 		     sna->render.max_3d_size, sna->render.max_3d_size));
 		if (must_tile(sna, tmp.drawable.width, tmp.drawable.height)) {
-			BoxRec tile, stack[64], *clipped, *c;
+			BoxRec tile, stack[64], *clipped;
 			int cpp, step;
 
 tile:
@@ -883,7 +891,7 @@ tile:
 					}
 
 					if (sigtrap_get() == 0) {
-						c = clipped;
+						BoxRec *c = clipped;
 						for (n = 0; n < nbox; n++) {
 							*c = box[n];
 							if (!box_intersect(c, &tile))
@@ -940,28 +948,32 @@ tile:
 			if (!src_bo)
 				goto fallback;
 
-			for (n = 0; n < nbox; n++) {
-				DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
-				     __FUNCTION__,
-				     box[n].x1, box[n].y1,
-				     box[n].x2, box[n].y2,
-				     src_dx, src_dy,
-				     box[n].x1 - extents.x1,
-				     box[n].y1 - extents.y1));
-				memcpy_blt(src, ptr, tmp.drawable.bitsPerPixel,
-					   stride, src_bo->pitch,
-					   box[n].x1 + src_dx,
-					   box[n].y1 + src_dy,
-					   box[n].x1 - extents.x1,
-					   box[n].y1 - extents.y1,
-					   box[n].x2 - box[n].x1,
-					   box[n].y2 - box[n].y1);
-			}
+			if (sigtrap_get() == 0) {
+				for (n = 0; n < nbox; n++) {
+					DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+					     __FUNCTION__,
+					     box[n].x1, box[n].y1,
+					     box[n].x2, box[n].y2,
+					     src_dx, src_dy,
+					     box[n].x1 - extents.x1,
+					     box[n].y1 - extents.y1));
+					memcpy_blt(src, ptr, tmp.drawable.bitsPerPixel,
+						   stride, src_bo->pitch,
+						   box[n].x1 + src_dx,
+						   box[n].y1 + src_dy,
+						   box[n].x1 - extents.x1,
+						   box[n].y1 - extents.y1,
+						   box[n].x2 - box[n].x1,
+						   box[n].y2 - box[n].y1);
+				}
 
-			n = sna->render.copy_boxes(sna, GXcopy,
-						   &tmp, src_bo, -extents.x1, -extents.y1,
-						   dst, dst_bo, dst_dx, dst_dy,
-						   box, nbox, 0);
+				n = sna->render.copy_boxes(sna, GXcopy,
+							   &tmp, src_bo, -extents.x1, -extents.y1,
+							   dst, dst_bo, dst_dx, dst_dy,
+							   box, nbox, 0);
+				sigtrap_put();
+			} else
+				n = 0;
 
 			kgem_bo_destroy(&sna->kgem, src_bo);
 
@@ -1026,59 +1038,62 @@ tile:
 			if (!src_bo)
 				break;
 
-			offset = 0;
-			do {
-				int height = box->y2 - box->y1;
-				int width = box->x2 - box->x1;
-				int pitch = PITCH(width, dst->drawable.bitsPerPixel >> 3);
-				uint32_t *b;
-
-				DBG(("  %s: box src=(%d, %d), dst=(%d, %d) size=(%d, %d), dst offset=%d, dst pitch=%d\n",
-				     __FUNCTION__,
-				     box->x1 + src_dx, box->y1 + src_dy,
-				     box->x1 + dst_dx, box->y1 + dst_dy,
-				     width, height,
-				     offset, pitch));
-
-				assert(box->x1 + src_dx >= 0);
-				assert((box->x2 + src_dx)*dst->drawable.bitsPerPixel <= 8*stride);
-				assert(box->y1 + src_dy >= 0);
-
-				assert(box->x1 + dst_dx >= 0);
-				assert(box->y1 + dst_dy >= 0);
-
-				memcpy_blt(src, (char *)ptr + offset,
-					   dst->drawable.bitsPerPixel,
-					   stride, pitch,
-					   box->x1 + src_dx, box->y1 + src_dy,
-					   0, 0,
-					   width, height);
-
-				assert(kgem->mode == KGEM_BLT);
-				b = kgem->batch + kgem->nbatch;
-				b[0] = cmd;
-				b[1] = br13;
-				b[2] = (box->y1 + dst_dy) << 16 | (box->x1 + dst_dx);
-				b[3] = (box->y2 + dst_dy) << 16 | (box->x2 + dst_dx);
-				*(uint64_t *)(b+4) =
-					kgem_add_reloc64(kgem, kgem->nbatch + 4, dst_bo,
-							 I915_GEM_DOMAIN_RENDER << 16 |
-							 I915_GEM_DOMAIN_RENDER |
-							 KGEM_RELOC_FENCED,
-							 0);
-				b[6] = 0;
-				b[7] = pitch;
-				*(uint64_t *)(b+8) =
-					kgem_add_reloc64(kgem, kgem->nbatch + 8, src_bo,
-							 I915_GEM_DOMAIN_RENDER << 16 |
-							 KGEM_RELOC_FENCED,
-							 offset);
-				kgem->nbatch += 10;
-
-				box++;
-				offset += pitch * height;
-			} while (--nbox_this_time);
-			assert(offset == __kgem_buffer_size(src_bo));
+			if (sigtrap_get() == 0) {
+				offset = 0;
+				do {
+					int height = box->y2 - box->y1;
+					int width = box->x2 - box->x1;
+					int pitch = PITCH(width, dst->drawable.bitsPerPixel >> 3);
+					uint32_t *b;
+
+					DBG(("  %s: box src=(%d, %d), dst=(%d, %d) size=(%d, %d), dst offset=%d, dst pitch=%d\n",
+					     __FUNCTION__,
+					     box->x1 + src_dx, box->y1 + src_dy,
+					     box->x1 + dst_dx, box->y1 + dst_dy,
+					     width, height,
+					     offset, pitch));
+
+					assert(box->x1 + src_dx >= 0);
+					assert((box->x2 + src_dx)*dst->drawable.bitsPerPixel <= 8*stride);
+					assert(box->y1 + src_dy >= 0);
+
+					assert(box->x1 + dst_dx >= 0);
+					assert(box->y1 + dst_dy >= 0);
+
+					memcpy_blt(src, (char *)ptr + offset,
+						   dst->drawable.bitsPerPixel,
+						   stride, pitch,
+						   box->x1 + src_dx, box->y1 + src_dy,
+						   0, 0,
+						   width, height);
+
+					assert(kgem->mode == KGEM_BLT);
+					b = kgem->batch + kgem->nbatch;
+					b[0] = cmd;
+					b[1] = br13;
+					b[2] = (box->y1 + dst_dy) << 16 | (box->x1 + dst_dx);
+					b[3] = (box->y2 + dst_dy) << 16 | (box->x2 + dst_dx);
+					*(uint64_t *)(b+4) =
+						kgem_add_reloc64(kgem, kgem->nbatch + 4, dst_bo,
+								 I915_GEM_DOMAIN_RENDER << 16 |
+								 I915_GEM_DOMAIN_RENDER |
+								 KGEM_RELOC_FENCED,
+								 0);
+					b[6] = 0;
+					b[7] = pitch;
+					*(uint64_t *)(b+8) =
+						kgem_add_reloc64(kgem, kgem->nbatch + 8, src_bo,
+								 I915_GEM_DOMAIN_RENDER << 16 |
+								 KGEM_RELOC_FENCED,
+								 offset);
+					kgem->nbatch += 10;
+
+					box++;
+					offset += pitch * height;
+				} while (--nbox_this_time);
+				assert(offset == __kgem_buffer_size(src_bo));
+				sigtrap_put();
+			}
 
 			if (nbox) {
 				_kgem_submit(kgem);
@@ -1116,6 +1131,11 @@ tile:
 			if (!src_bo)
 				break;
 
+			if (sigtrap_get()) {
+				kgem_bo_destroy(kgem, src_bo);
+				goto fallback;
+			}
+
 			offset = 0;
 			do {
 				int height = box->y2 - box->y1;
@@ -1167,6 +1187,7 @@ tile:
 				offset += pitch * height;
 			} while (--nbox_this_time);
 			assert(offset == __kgem_buffer_size(src_bo));
+			sigtrap_put();
 
 			if (nbox) {
 				_kgem_submit(kgem);
@@ -1322,7 +1343,7 @@ bool sna_write_boxes__xor(struct sna *sna, PixmapPtr dst,
 		     tmp.drawable.width, tmp.drawable.height,
 		     sna->render.max_3d_size, sna->render.max_3d_size));
 		if (must_tile(sna, tmp.drawable.width, tmp.drawable.height)) {
-			BoxRec tile, stack[64], *clipped, *c;
+			BoxRec tile, stack[64], *clipped;
 			int step;
 
 tile:
@@ -1369,38 +1390,43 @@ tile:
 						goto fallback;
 					}
 
-					c = clipped;
-					for (n = 0; n < nbox; n++) {
-						*c = box[n];
-						if (!box_intersect(c, &tile))
-							continue;
+					if (sigtrap_get() == 0) {
+						BoxRec *c = clipped;
+						for (n = 0; n < nbox; n++) {
+							*c = box[n];
+							if (!box_intersect(c, &tile))
+								continue;
 
-						DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
-						     __FUNCTION__,
-						     c->x1, c->y1,
-						     c->x2, c->y2,
-						     src_dx, src_dy,
-						     c->x1 - tile.x1,
-						     c->y1 - tile.y1));
-						memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
-							   stride, src_bo->pitch,
-							   c->x1 + src_dx,
-							   c->y1 + src_dy,
-							   c->x1 - tile.x1,
-							   c->y1 - tile.y1,
-							   c->x2 - c->x1,
-							   c->y2 - c->y1,
-							   and, or);
-						c++;
-					}
+							DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+							     __FUNCTION__,
+							     c->x1, c->y1,
+							     c->x2, c->y2,
+							     src_dx, src_dy,
+							     c->x1 - tile.x1,
+							     c->y1 - tile.y1));
+							memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
+								   stride, src_bo->pitch,
+								   c->x1 + src_dx,
+								   c->y1 + src_dy,
+								   c->x1 - tile.x1,
+								   c->y1 - tile.y1,
+								   c->x2 - c->x1,
+								   c->y2 - c->y1,
+								   and, or);
+							c++;
+						}
+
+						if (c != clipped)
+							n = sna->render.copy_boxes(sna, GXcopy,
+										   &tmp, src_bo, -tile.x1, -tile.y1,
+										   dst, dst_bo, dst_dx, dst_dy,
+										   clipped, c - clipped, 0);
+						else
+							n = 1;
 
-					if (c != clipped)
-						n = sna->render.copy_boxes(sna, GXcopy,
-									   &tmp, src_bo, -tile.x1, -tile.y1,
-									   dst, dst_bo, dst_dx, dst_dy,
-									   clipped, c - clipped, 0);
-					else
-						n = 1;
+						sigtrap_put();
+					} else
+						n = 0;
 
 					kgem_bo_destroy(&sna->kgem, src_bo);
 
@@ -1424,29 +1450,33 @@ tile:
 			if (!src_bo)
 				goto fallback;
 
-			for (n = 0; n < nbox; n++) {
-				DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
-				     __FUNCTION__,
-				     box[n].x1, box[n].y1,
-				     box[n].x2, box[n].y2,
-				     src_dx, src_dy,
-				     box[n].x1 - extents.x1,
-				     box[n].y1 - extents.y1));
-				memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
-					   stride, src_bo->pitch,
-					   box[n].x1 + src_dx,
-					   box[n].y1 + src_dy,
-					   box[n].x1 - extents.x1,
-					   box[n].y1 - extents.y1,
-					   box[n].x2 - box[n].x1,
-					   box[n].y2 - box[n].y1,
-					   and, or);
-			}
+			if (sigtrap_get() == 0) {
+				for (n = 0; n < nbox; n++) {
+					DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+					     __FUNCTION__,
+					     box[n].x1, box[n].y1,
+					     box[n].x2, box[n].y2,
+					     src_dx, src_dy,
+					     box[n].x1 - extents.x1,
+					     box[n].y1 - extents.y1));
+					memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
+						   stride, src_bo->pitch,
+						   box[n].x1 + src_dx,
+						   box[n].y1 + src_dy,
+						   box[n].x1 - extents.x1,
+						   box[n].y1 - extents.y1,
+						   box[n].x2 - box[n].x1,
+						   box[n].y2 - box[n].y1,
+						   and, or);
+				}
 
-			n = sna->render.copy_boxes(sna, GXcopy,
-						   &tmp, src_bo, -extents.x1, -extents.y1,
-						   dst, dst_bo, dst_dx, dst_dy,
-						   box, nbox, 0);
+				n = sna->render.copy_boxes(sna, GXcopy,
+							   &tmp, src_bo, -extents.x1, -extents.y1,
+							   dst, dst_bo, dst_dx, dst_dy,
+							   box, nbox, 0);
+				sigtrap_put();
+			} else
+				n = 0;
 
 			kgem_bo_destroy(&sna->kgem, src_bo);
 
@@ -1509,7 +1539,12 @@ tile:
 						    KGEM_BUFFER_WRITE_INPLACE | (nbox ? KGEM_BUFFER_LAST : 0),
 						    &ptr);
 			if (!src_bo)
-				break;
+				goto fallback;
+
+			if (sigtrap_get()) {
+				kgem_bo_destroy(kgem, src_bo);
+				goto fallback;
+			}
 
 			offset = 0;
 			do {
@@ -1565,6 +1600,7 @@ tile:
 				offset += pitch * height;
 			} while (--nbox_this_time);
 			assert(offset == __kgem_buffer_size(src_bo));
+			sigtrap_put();
 
 			if (nbox) {
 				_kgem_submit(kgem);
@@ -1600,7 +1636,12 @@ tile:
 						    KGEM_BUFFER_WRITE_INPLACE | (nbox ? KGEM_BUFFER_LAST : 0),
 						    &ptr);
 			if (!src_bo)
-				break;
+				goto fallback;
+
+			if (sigtrap_get()) {
+				kgem_bo_destroy(kgem, src_bo);
+				goto fallback;
+			}
 
 			offset = 0;
 			do {
@@ -1654,6 +1695,7 @@ tile:
 				offset += pitch * height;
 			} while (--nbox_this_time);
 			assert(offset == __kgem_buffer_size(src_bo));
+			sigtrap_put();
 
 			if (nbox) {
 				_kgem_submit(kgem);


More information about the xorg-commit mailing list