xf86-video-intel: 2 commits - src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_render.c

Chris Wilson ickle at kemper.freedesktop.org
Wed May 1 08:14:29 PDT 2013


 src/sna/kgem.c       |   44 ++++++++++++++++++++++++++++++++++++--------
 src/sna/sna_accel.c  |    2 ++
 src/sna/sna_render.c |   13 ++++++-------
 3 files changed, 44 insertions(+), 15 deletions(-)

New commits:
commit 5637c173f85a5bb9a77572e4c070e0d612e6f49d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 1 15:14:55 2013 +0100

    sna: Be careful not to preemptively upload portions of a SHM pixmap
    
    Only upload the portion of the pixmap being used for this rendering
    operation, as outside of that may remain undefined and to be written by
    the client before a future operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 4241836..c19a283 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -427,13 +427,14 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 		return NULL;
 	}
 
+	if (priv->shm)
+		blt = true;
+
 	if (priv->gpu_bo) {
 		if (priv->cpu_damage &&
 		    sna_damage_contains_box(priv->cpu_damage,
-					    box) != PIXMAN_REGION_OUT) {
-			if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ))
-				return NULL;
-		}
+					    box) != PIXMAN_REGION_OUT)
+			goto upload;
 
 		return priv->gpu_bo;
 	}
@@ -450,9 +451,6 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 		return NULL;
 	}
 
-	if (priv->shm)
-		blt = true;
-
 	if (DBG_FORCE_UPLOAD < 0) {
 		if (!sna_pixmap_force_to_gpu(pixmap,
 					     blt ? MOVE_READ : MOVE_SOURCE_HINT | MOVE_READ))
@@ -493,6 +491,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 	if (!migrate)
 		return NULL;
 
+upload:
 	if (blt) {
 		if (!sna_pixmap_move_area_to_gpu(pixmap, box,
 						 __MOVE_FORCE | MOVE_READ))
commit ca4a32c20d4c1f91552c02b9008ae16435b92d71
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 1 10:18:23 2013 +0100

    sna: Page align requests to userptr
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 615e36f..76451ec 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -286,6 +286,7 @@ static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
 	     bo->handle, bytes(bo)));
 	assert(bo->proxy == NULL);
+	assert(!bo->snoop);
 
 retry_gtt:
 	VG_CLEAR(mmap_arg);
@@ -848,7 +849,9 @@ static bool test_has_userptr(struct kgem *kgem)
 	if (kgem->gen == 040)
 		return false;
 
-	ptr = malloc(PAGE_SIZE);
+	if (posix_memalign(&ptr, PAGE_SIZE, PAGE_SIZE))
+		return false;
+
 	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
 	gem_close(kgem->fd, handle);
 	free(ptr);
@@ -4804,6 +4807,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				bool read_only)
 {
 	struct kgem_bo *bo;
+	uintptr_t first_page, last_page;
 	uint32_t handle;
 
 	assert(MAP(ptr) == ptr);
@@ -4811,22 +4815,44 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	if (!kgem->has_userptr)
 		return NULL;
 
-	handle = gem_userptr(kgem->fd, ptr, size, read_only);
+	first_page = (uintptr_t)ptr;
+	last_page = first_page + size + PAGE_SIZE - 1;
+
+	first_page &= ~(PAGE_SIZE-1);
+	last_page &= ~(PAGE_SIZE-1);
+	assert(last_page > first_page);
+
+	handle = gem_userptr(kgem->fd,
+			     (void *)first_page, last_page-first_page,
+			     read_only);
 	if (handle == 0)
 		return NULL;
 
-	bo = __kgem_bo_alloc(handle, NUM_PAGES(size));
+	bo = __kgem_bo_alloc(handle, (last_page - first_page) / PAGE_SIZE);
 	if (bo == NULL) {
 		gem_close(kgem->fd, handle);
 		return NULL;
 	}
 
 	bo->snoop = !kgem->has_llc;
-	bo->map = MAKE_USER_MAP(ptr);
 	debug_alloc__bo(kgem, bo);
 
-	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
-	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle));
+	if (first_page != (uintptr_t)ptr) {
+		struct kgem_bo *proxy;
+
+		proxy = kgem_create_proxy(kgem, bo,
+					  (uintptr_t)ptr - first_page, size);
+		kgem_bo_destroy(kgem, bo);
+		if (proxy == NULL)
+			return NULL;
+
+		bo = proxy;
+	}
+
+	bo->map = MAKE_USER_MAP(ptr);
+
+	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d (proxy? %d)\n",
+	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle, bo->proxy != NULL));
 	return bo;
 }
 
@@ -4946,6 +4972,8 @@ struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
 	bo->dirty = target->dirty;
 	bo->tiling = target->tiling;
 	bo->pitch = target->pitch;
+	bo->flush = target->flush;
+	bo->snoop = target->snoop;
 
 	assert(!bo->scanout);
 	bo->proxy = kgem_bo_reference(target);
@@ -5154,7 +5182,7 @@ free_cacheing:
 			return NULL;
 
 		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
-		if (posix_memalign(&bo->mem, PAGE_SIZE, alloc *PAGE_SIZE)) {
+		if (posix_memalign(&bo->mem, PAGE_SIZE, alloc * PAGE_SIZE)) {
 			free(bo);
 			return NULL;
 		}
@@ -5757,7 +5785,7 @@ kgem_replace_bo(struct kgem *kgem,
 	assert(src->tiling == I915_TILING_NONE);
 
 	size = height * pitch;
-	size = PAGE_ALIGN(size) / PAGE_SIZE;
+	size = NUM_PAGES(size);
 
 	dst = search_linear_cache(kgem, size, 0);
 	if (dst == NULL)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 43a6d70..051f116 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13726,6 +13726,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			     __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber,
 			     priv->pixmap->refcnt));
+			assert(!priv->flush);
 			ret = sna_pixmap_move_to_cpu(priv->pixmap,
 						     MOVE_READ | MOVE_WRITE);
 			assert(!ret || priv->gpu_bo == NULL);
@@ -13734,6 +13735,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		} else {
 			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber));
+			assert(priv->flush);
 			if (sna_pixmap_move_to_gpu(priv->pixmap,
 						   MOVE_READ | __MOVE_FORCE))
 				kgem_bo_unclean(&sna->kgem, priv->gpu_bo);


More information about the xorg-commit mailing list