xf86-video-intel: Branch 'xwayland' - 1284 commits - autogen.sh configure.ac .gitignore m4/.gitignore Makefile.am man/intel.man NEWS src/brw_defines.h src/brw_structs.h src/common.h src/compat-api.h src/i830_render.c src/i915_render.c src/i915_video.c src/i965_3d.c src/i965_reg.h src/i965_render.c src/i965_video.c src/intel_batchbuffer.c src/intel_batchbuffer.h src/intel_display.c src/intel_dri.c src/intel_driver.c src/intel_driver.h src/intel_glamor.c src/intel_glamor.h src/intel.h src/intel_hwmc.c src/intel_list.h src/intel_memory.c src/intel_module.c src/intel_options.c src/intel_options.h src/intel_shadow.c src/intel_uxa.c src/intel_video.c src/legacy/i810 src/legacy/legacy.h src/Makefile.am src/sna/blt.c src/sna/brw src/sna/compiler.h src/sna/fb src/sna/gen2_render.c src/sna/gen3_render.c src/sna/gen4_render.c src/sna/gen4_render.h src/sna/gen5_render.c src/sna/gen5_render.h src/sna/gen6_render.c src/sna/gen6_render.h src/sna/gen7_render.c src/sna/gen7_render.h src/sna/kgem.c src/sna/kgem_debug.c src/sna/kgem_debug_gen2.c src/sna/kgem_debug_gen3.c src/sna/kgem_debug_gen4.c src/sna/kgem_debug_gen5.c src/sna/kgem_debug_gen6.c src/sna/kgem_debug_gen7.c src/sna/kgem.h src/sna/Makefile.am src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_composite.c src/sna/sna_damage.c src/sna/sna_damage.h src/sna/sna_display.c src/sna/sna_dri.c src/sna/sna_driver.c src/sna/sna_glyphs.c src/sna/sna_gradient.c src/sna/sna.h src/sna/sna_io.c src/sna/sna_module.h src/sna/sna_reg.h src/sna/sna_render.c src/sna/sna_render.h src/sna/sna_render_inline.h src/sna/sna_stream.c src/sna/sna_tiling.c src/sna/sna_transform.c src/sna/sna_trapezoids.c src/sna/sna_video.c src/sna/sna_video.h src/sna/sna_video_hwmc.c src/sna/sna_video_overlay.c src/sna/sna_video_sprite.c src/sna/sna_video_textured.c test/basic-rectangle.c test/basic-string.c test/dri2.c test/dri2.h test/dri2-swap.c test/Makefile.am uxa/uxa-accel.c uxa/uxa.c uxa/uxa-glyphs.c uxa/uxa.h uxa/uxa-priv.h uxa/ uxa-render.c

Kristian Høgsberg krh at kemper.freedesktop.org
Mon Sep 24 13:00:27 PDT 2012


Rebased ref, commits from common ancestor:
commit 860e27c1c385993ae4df3e0a95967af1b388fd16
Author: Kristian Høgsberg <krh at bitplanet.net>
Date:   Mon Sep 24 15:01:09 2012 -0400

    xwayland: Add xwayland support

diff --git a/src/intel.h b/src/intel.h
index a5603fe..b1d8b0d 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -350,6 +350,8 @@ typedef struct intel_screen_private {
 	InputHandlerProc uevent_handler;
 #endif
 	Bool has_prime_vmap_flush;
+
+	struct xwl_screen *xwl_screen;
 } intel_screen_private;
 
 #ifndef I915_PARAM_HAS_PRIME_VMAP_FLUSH
diff --git a/src/intel_dri.c b/src/intel_dri.c
index 8cab73f..038940e 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -50,6 +50,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "xf86.h"
 #include "xf86_OSproc.h"
+#include "xf86Priv.h"
 
 #include "xf86Pci.h"
 #include "xf86drm.h"
@@ -58,6 +59,10 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "shadow.h"
 #include "fb.h"
 
+#ifdef XORG_WAYLAND
+#include <xwayland.h>
+#endif
+
 #include "intel.h"
 #include "i830_reg.h"
 
@@ -1506,6 +1511,21 @@ out_complete:
 	return TRUE;
 }
 
+#ifdef XORG_WAYLAND
+static int intel_auth_magic2(ScreenPtr screen, uint32_t magic)
+{
+	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+
+	/* Not wayland, go stragight to drm */
+	if (!xorgWayland)
+		return drmAuthMagic(intel->drmSubFD, magic);
+
+        /* Forward the request to our host */
+        return xwl_drm_authenticate(intel->xwl_screen, magic);
+}
+#endif
+
 static int dri2_server_generation;
 #endif
 
@@ -1589,6 +1609,13 @@ Bool I830DRI2ScreenInit(ScreenPtr screen)
 	driverNames[0] = info.driverName;
 #endif
 
+#if defined(XORG_WAYLAND) /* If we have XORG_WAYLAND, we have AuthMagic2 */
+	info.version = 4;
+	info.AuthMagic2 = intel_auth_magic2;
+	info.GetMSC = NULL;
+	info.ScheduleWaitMSC = NULL;
+#endif
+
 	return DRI2ScreenInit(screen, &info);
 }
 
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 41f0311..d1da72d 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -70,12 +70,17 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "intel_hwmc.h"
 #endif
 
+#ifdef XORG_WAYLAND
+#include <xwayland.h>
+#endif
+
 #include "legacy/legacy.h"
 #include "uxa.h"
 
 #include <sys/ioctl.h>
 #include "i915_drm.h"
 #include <xf86drmMode.h>
+#include <xf86Priv.h>
 
 #include "intel_glamor.h"
 #include "intel_options.h"
@@ -167,10 +172,16 @@ static Bool i830CreateScreenResources(ScreenPtr screen)
 	if (!(*screen->CreateScreenResources) (screen))
 		return FALSE;
 
+#ifdef XORG_WAYLAND
+	if (intel->xwl_screen)
+		xwl_screen_init(intel->xwl_screen, screen);
+#endif
+
 	if (!intel_uxa_create_screen_resources(screen))
 		return FALSE;
 
-	intel_copy_fb(scrn);
+	if (!intel->xwl_screen)
+		intel_copy_fb(scrn);
 	return TRUE;
 }
 
@@ -451,6 +462,27 @@ static void intel_setup_capabilities(ScrnInfoPtr scrn)
 #endif
 }
 
+#ifdef XORG_WAYLAND
+static int intel_create_window_buffer(struct xwl_window *xwl_window,
+				      PixmapPtr pixmap)
+{
+	uint32_t name;
+	dri_bo *bo;
+
+	bo = intel_get_pixmap_bo(pixmap);
+	if (bo == NULL || dri_bo_flink(bo, &name) != 0)
+		return BadDrawable;
+
+	return xwl_create_window_buffer_drm(xwl_window, pixmap, name);
+}
+
+static struct xwl_driver xwl_driver = {
+	.version = 1,
+	.use_drm = 1,
+	.create_window_buffer = intel_create_window_buffer
+};
+#endif
+
 /**
  * This is called before ScreenInit to do any require probing of screen
  * configuration.
@@ -505,12 +537,6 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	intel->PciInfo = xf86GetPciInfoForEntity(intel->pEnt->index);
 
-	if (!intel_open_drm_master(scrn)) {
-		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "Failed to become DRM master.\n");
-		return FALSE;
-	}
-
 	scrn->monitor = scrn->confScreen->monitor;
 	scrn->progClock = TRUE;
 	scrn->rgbBits = 8;
@@ -549,6 +575,31 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 	intel_check_chipset_option(scrn);
 	intel_check_dri_option(scrn);
 
+#ifdef XORG_WAYLAND
+	if (xorgWayland) {
+		intel->xwl_screen = xwl_screen_create();
+		if (!intel->xwl_screen) {
+			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+				   "Failed to initialize xwayland.\n");
+			return FALSE;
+		}
+
+		if (!xwl_screen_pre_init(scrn, intel->xwl_screen,
+					 0, &xwl_driver)) {
+			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+				   "Failed to pre-init xwayland screen\n");
+			xwl_screen_destroy(intel->xwl_screen);
+		}
+
+		intel->drmSubFD = xwl_screen_get_drm_fd(intel->xwl_screen);
+	}
+#endif
+
+	if (!intel->xwl_screen && !intel_open_drm_master(scrn))
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			   "Failed to become DRM master.\n");
+
+
 	if (!intel_init_bufmgr(intel)) {
 		PreInitCleanup(scrn);
 		return FALSE;
@@ -590,6 +641,9 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 	intel->swapbuffers_wait = xf86ReturnOptValBool(intel->Options,
 						       OPTION_SWAPBUFFERS_WAIT,
 						       TRUE);
+	if (!intel->xwl_screen)
+		intel->swapbuffers_wait = TRUE;
+
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Wait on SwapBuffers? %s\n",
 		   intel->swapbuffers_wait ? "enabled" : "disabled");
 
@@ -611,7 +665,8 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	I830XvInit(scrn);
 
-	if (!intel_mode_pre_init(scrn, intel->drmSubFD, intel->cpp)) {
+	if (!intel->xwl_screen &&
+	    !intel_mode_pre_init(scrn, intel->drmSubFD, intel->cpp)) {
 		PreInitCleanup(scrn);
 		return FALSE;
 	}
@@ -738,6 +793,11 @@ I830BlockHandler(BLOCKHANDLER_ARGS_DECL)
 #ifdef INTEL_PIXMAP_SHARING
 	intel_dirty_update(screen);
 #endif
+
+#ifdef XORG_WAYLAND
+	if (intel->xwl_screen)
+		xwl_screen_post_damage(intel->xwl_screen);
+#endif
 }
 
 static Bool
@@ -773,9 +833,16 @@ intel_flush_callback(CallbackListPtr *list,
 		     pointer user_data, pointer call_data)
 {
 	ScrnInfoPtr scrn = user_data;
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+
 	if (scrn->vtSema) {
 		intel_batch_submit(scrn);
 		intel_glamor_flush(intel_get_screen_private(scrn));
+
+#ifdef XORG_WAYLAND
+		if (intel->xwl_screen)
+			xwl_screen_post_damage(intel->xwl_screen);
+#endif
 	}
 }
 
@@ -1049,7 +1116,8 @@ I830ScreenInit(SCREEN_INIT_ARGS_DECL)
 	if (serverGeneration == 1)
 		xf86ShowUnusedOptions(scrn->scrnIndex, scrn->options);
 
-	intel_mode_init(intel);
+	if (!intel->xwl_screen)
+		intel_mode_init(intel);
 
 	intel->suspended = FALSE;
 
@@ -1162,7 +1230,8 @@ static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 	}
 
 	if (intel->front_buffer) {
-		intel_mode_remove_fb(intel);
+		if (!intel->xwl_screen)
+			intel_mode_remove_fb(intel);
 		drm_intel_bo_unreference(intel->front_buffer);
 		intel->front_buffer = NULL;
 	}
diff --git a/src/intel_module.c b/src/intel_module.c
index e6ca964..7b35cc4 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -33,6 +33,7 @@
 #include <xf86Parser.h>
 #include <xf86drm.h>
 #include <xf86drmMode.h>
+#include <xf86Priv.h>
 #include <i915_drm.h>
 
 #include <xorgVersion.h>
@@ -376,6 +377,11 @@ static Bool intel_driver_func(ScrnInfoPtr pScrn,
 #else
 		(*flag) = HW_IO | HW_MMIO;
 #endif
+
+#ifdef XORG_WAYLAND
+		if (xorgWayland)
+			(*flag) = HW_SKIP_CONSOLE;
+#endif
 		return TRUE;
 	default:
 		/* Unknown or deprecated function */
@@ -481,7 +487,11 @@ static Bool intel_pci_probe(DriverPtr		driver,
 	PciChipsets intel_pci_chipsets[NUM_CHIPSETS];
 	unsigned i;
 
-	if (!has_kernel_mode_setting(device)) {
+	if (!has_kernel_mode_setting(device)
+#ifdef XORG_WAYLAND
+	    && !xorgWayland
+#endif
+	    ) {
 #if KMS_ONLY
 		return FALSE;
 #else
commit c6008068372709c73034163eddc902b47bf87d24
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 21:42:31 2012 +0100

    sna: Check against op->dst.bo rather than priv->cpu_bo for composite upload
    
    In this case, we may also be handling an unattached pixmap, so avoid the
    deferences of the sna_pixmap unless we are sure it will exist.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index b5d7ea7..94b5f4a 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1871,19 +1871,19 @@ clear:
 				goto put;
 		}
 	} else {
-		struct sna_pixmap *priv;
-
 put:
-		priv = sna_pixmap(tmp->dst.pixmap);
-		if (priv->cpu_bo && tmp->dst.bo == priv->cpu_bo) {
-			assert(kgem_bo_is_busy(tmp->dst.bo));
-			tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
-							  FORCE_GPU | PREFER_GPU,
-							  &dst_box,
-							  &tmp->damage);
+		if (tmp->dst.bo) {
+			struct sna_pixmap *priv = sna_pixmap(tmp->dst.pixmap);
 			if (tmp->dst.bo == priv->cpu_bo) {
-				DBG(("%s: forcing the stall to overwrite a busy CPU bo\n", __FUNCTION__));
-				tmp->dst.bo = NULL;
+				assert(kgem_bo_is_busy(tmp->dst.bo));
+				tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
+								  FORCE_GPU | PREFER_GPU,
+								  &dst_box,
+								  &tmp->damage);
+				if (tmp->dst.bo == priv->cpu_bo) {
+					DBG(("%s: forcing the stall to overwrite a busy CPU bo\n", __FUNCTION__));
+					tmp->dst.bo = NULL;
+				}
 			}
 		}
 
commit 5ed840881c26e90eb8e00521b6d77b0ea514de5e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 20:29:21 2012 +0100

    sna: Check that the CPU bo exists before declaring it is busy along upload
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=55251
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 6685549..b5d7ea7 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1875,7 +1875,7 @@ clear:
 
 put:
 		priv = sna_pixmap(tmp->dst.pixmap);
-		if (tmp->dst.bo == priv->cpu_bo) {
+		if (priv->cpu_bo && tmp->dst.bo == priv->cpu_bo) {
 			assert(kgem_bo_is_busy(tmp->dst.bo));
 			tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
 							  FORCE_GPU | PREFER_GPU,
commit a858afc66c1fa2eec65a7041e991f2266f82deca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 13:49:40 2012 +0100

    Silence a couple of potential compiler warnings
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index bf16049..2a3a393 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -385,7 +385,6 @@ intel_crtc_apply(xf86CrtcPtr crtc)
 	ScrnInfoPtr scrn = crtc->scrn;
 	struct intel_crtc *intel_crtc = crtc->driver_private;
 	struct intel_mode *mode = intel_crtc->mode;
-	intel_screen_private *intel = intel_get_screen_private(scrn);
 	xf86CrtcConfigPtr   xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
 	uint32_t *output_ids;
 	int output_count = 0;
diff --git a/src/intel_module.c b/src/intel_module.c
index 7de9da7..e6ca964 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -537,6 +537,8 @@ static Bool intel_pci_probe(DriverPtr		driver,
 #if USE_UXA
 	case UXA: return intel_init_scrn(scrn);
 #endif
+
+	default: break;
 	}
 #endif
 
@@ -592,6 +594,8 @@ intel_platform_probe(DriverPtr driver,
 #if USE_UXA
         case UXA: return intel_init_scrn(scrn);
 #endif
+
+	default: break;
 	}
 #endif
 
commit 9326acc2917109f06dda809107c8fa5a2273c3d2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 13:40:04 2012 +0100

    Allow compilation of a separate i810 driver
    
    Allow --enable-ums-only as a counter-option to --enable-kms-only in case
    the distribution wishes to enable a non-root KMS driver but also offer
    a separate UMS driver for i81x.
    
    On the second pass, use "--enable-ums-only --disable-uxa --disable-sna"
    to get the trimmed down unaccelerated i810 support.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 6b0b0a8..1cd5a92 100644
--- a/configure.ac
+++ b/configure.ac
@@ -125,6 +125,10 @@ AC_ARG_ENABLE(kms-only, AS_HELP_STRING([--enable-kms-only],
                                   [Assume KMS support [[default=no]]]),
               [KMS_ONLY="$enableval"],
               [KMS_ONLY=no])
+AC_ARG_ENABLE(ums-only, AS_HELP_STRING([--enable-ums-only],
+                                  [Assume only UMS (no KMS) support [[default=no]]]),
+              [UMS_ONLY="$enableval"],
+              [UMS_ONLY=no])
 
 required_xorg_xserver_version=1.6
 required_pixman_version=0.24
@@ -230,12 +234,12 @@ if test "x$accel" = xauto; then
 			accel=sna
 		fi
 	fi
-	if test "x$accel" = xauto; then
+	if test "x$accel" = xauto -a "x$UMS_ONLY" != "xyes"; then
 		AC_MSG_ERROR([No default acceleration option])
 	fi
 fi
 
-have_accel=no
+have_accel=none
 if test "x$accel" = xsna; then
 	if test "x$SNA" != "xno"; then
 		AC_DEFINE(DEFAULT_ACCEL_METHOD, SNA, [Default acceleration method])
@@ -254,7 +258,7 @@ if test "x$accel" = xuxa; then
 	fi
 fi
 AC_MSG_RESULT($accel)
-if test "x$accel" = xno; then
+if test "x$accel" = xnone -a "x$UMS_ONLY" != "xyes"; then
 	AC_MSG_ERROR([No default acceleration option])
 fi
 
@@ -361,6 +365,10 @@ AM_CONDITIONAL(KMS_ONLY, test x$KMS_ONLY = xyes)
 if test "x$KMS_ONLY" = xyes; then
 	AC_DEFINE(KMS_ONLY,1,[Assume KMS support])
 fi
+AM_CONDITIONAL(UMS_ONLY, test x$UMS_ONLY = xyes)
+if test "x$UMS_ONLY" = xyes; then
+	AC_DEFINE(UMS_ONLY,1,[Assume only UMS (no KMS) support])
+fi
 
 AM_CONDITIONAL(DEBUG, test x$DEBUG != xno)
 AM_CONDITIONAL(FULL_DEBUG, test x$FULL_DEBUG == xfull)
diff --git a/src/intel_module.c b/src/intel_module.c
index bc0b6d2..7de9da7 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -214,6 +214,7 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_I815, &intel_i81x_info ),
 #endif
 
+#if !UMS_ONLY
 	INTEL_DEVICE_MATCH (PCI_CHIP_I830_M, &intel_i830_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_845_G, &intel_i845_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I854, &intel_i855_info ),
@@ -309,6 +310,8 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
 
 	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
+#endif
+
 	{ 0, 0, 0 },
 };
 
@@ -422,6 +425,7 @@ static Bool has_kernel_mode_setting(struct pci_device *dev)
 	return ret;
 }
 
+#if !UMS_ONLY
 extern XF86ConfigPtr xf86configptr;
 
 static XF86ConfDevicePtr
@@ -459,6 +463,7 @@ static enum accel_method { UXA, SNA } get_accel_method(void)
 
 	return accel_method;
 }
+#endif
 
 /*
  * intel_pci_probe --
@@ -523,6 +528,7 @@ static Bool intel_pci_probe(DriverPtr		driver,
 	}
 #endif
 
+#if !UMS_ONLY
 	switch (get_accel_method()) {
 #if USE_SNA
 	case SNA: return sna_init_scrn(scrn, entity_num);
@@ -531,9 +537,10 @@ static Bool intel_pci_probe(DriverPtr		driver,
 #if USE_UXA
 	case UXA: return intel_init_scrn(scrn);
 #endif
-
-	default: return FALSE;
 	}
+#endif
+
+	return FALSE;
 }
 
 #ifdef XSERVER_PLATFORM_BUS
@@ -577,6 +584,7 @@ intel_platform_probe(DriverPtr driver,
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "using device path '%s'\n", path ? path : "Default device");
 
+#if !UMS_ONLY
 	switch (get_accel_method()) {
 #if USE_SNA
         case SNA: return sna_init_scrn(scrn, entity_num);
@@ -584,8 +592,10 @@ intel_platform_probe(DriverPtr driver,
 #if USE_UXA
         case UXA: return intel_init_scrn(scrn);
 #endif
-	default: return FALSE;
 	}
+#endif
+
+	return FALSE;
 }
 #endif
 
commit 0afb7efe8c48b5fc839e2137e870bea0f5fb3c9c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 09:41:59 2012 +0100

    sna: Avoid overflows when translating the box16 extents during a copy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 58029c8..a2afce0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3332,6 +3332,16 @@ static int16_t bound(int16_t a, uint16_t b)
 	return v;
 }
 
+static int16_t clamp(int16_t a, int16_t b)
+{
+	int v = (int)a + (int)b;
+	if (v > MAXSHORT)
+		return MAXSHORT;
+	if (v < MINSHORT)
+		return MINSHORT;
+	return v;
+}
+
 static inline bool box32_to_box16(const Box32Rec *b32, BoxRec *b16)
 {
 	b16->x1 = b32->x1;
@@ -4880,10 +4890,10 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		return NULL;
 	}
 
-	region.extents.x1 += sx - dx;
-	region.extents.x2 += sx - dx;
-	region.extents.y1 += sy - dy;
-	region.extents.y2 += sy - dy;
+	region.extents.x1 = clamp(region.extents.x1, sx - dx);
+	region.extents.x2 = clamp(region.extents.x2, sx - dx);
+	region.extents.y1 = clamp(region.extents.y1, sy - dy);
+	region.extents.y2 = clamp(region.extents.y2, sy - dy);
 
 	/* Compute source clip region */
 	clip = NULL;
commit 0fc6e5820e4543d52bcf8d0285ca6f69b5213831
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 09:32:49 2012 +0100

    sna: Add missing protection against int16 overflow when copying
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index fbee637..58029c8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4867,8 +4867,8 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	region.extents.x1 = dx;
 	region.extents.y1 = dy;
-	region.extents.x2 = dx + width;
-	region.extents.y2 = dy + height;
+	region.extents.x2 = bound(dx, width);
+	region.extents.y2 = bound(dy, height);
 	region.data = NULL;
 
 	DBG(("%s: dst extents (%d, %d), (%d, %d)\n", __FUNCTION__,
commit c2ca1c5abca687adb08d6e137bc3b70bdecb083d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 23 08:56:50 2012 +0100

    Reorder INTEL_XVMC define so that we pick up Option "XvMC"
    
    Otherwise it will remain disabled.
    
    Reported-by: Giacomo Comes <comes at naic.edu>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index 48ec386..a5603fe 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -68,6 +68,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "i915_drm.h"
 
 #include "intel_driver.h"
+#include "intel_options.h"
 #include "intel_list.h"
 #include "compat-api.h"
 
@@ -142,12 +143,6 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo);
 
 #include "common.h"
 
-#ifdef XvMCExtension
-#ifdef ENABLE_XVMC
-#define INTEL_XVMC 1
-#endif
-#endif
-
 #define PITCH_NONE 0
 
 /** enumeration of 3d consumers so some can maintain invariant state. */
diff --git a/src/intel_options.h b/src/intel_options.h
index 39c0b73..3b5262a 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -23,8 +23,9 @@ enum intel_options {
 	OPTION_PREFER_OVERLAY,
 	OPTION_HOTPLUG,
 	OPTION_RELAXED_FENCING,
-#ifdef INTEL_XVMC
+#if defined(XvMCExtension) && defined(ENABLE_XVMC)
 	OPTION_XVMC,
+#define INTEL_XVMC 1
 #endif
 #ifdef USE_SNA
 	OPTION_THROTTLE,
commit 92dbedc6138b923aa473935013ecb0346280c4d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 22 08:56:49 2012 +0100

    sna: Force the stall before trying to upload into a busy CPU bo
    
    Under the circumstances where we can not instead stream the write into
    the GPU bo, we need to use the busy CPU bo.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54978
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index b97df22..6685549 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1614,6 +1614,7 @@ prepare_blt_put(struct sna *sna,
 	assert(src->devPrivate.ptr);
 
 	if (op->dst.bo) {
+		assert(op->dst.bo == sna_pixmap(op->dst.pixmap)->gpu_bo);
 		if (alpha_fixup) {
 			op->u.blt.pixel = alpha_fixup;
 			op->blt   = blt_put_composite_with_alpha;
@@ -1870,25 +1871,33 @@ clear:
 				goto put;
 		}
 	} else {
+		struct sna_pixmap *priv;
+
 put:
-		if (!tmp->dst.bo) {
+		priv = sna_pixmap(tmp->dst.pixmap);
+		if (tmp->dst.bo == priv->cpu_bo) {
+			assert(kgem_bo_is_busy(tmp->dst.bo));
+			tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
+							  FORCE_GPU | PREFER_GPU,
+							  &dst_box,
+							  &tmp->damage);
+			if (tmp->dst.bo == priv->cpu_bo) {
+				DBG(("%s: forcing the stall to overwrite a busy CPU bo\n", __FUNCTION__));
+				tmp->dst.bo = NULL;
+			}
+		}
+
+		if (tmp->dst.bo == NULL) {
 			RegionRec region;
 
 			region.extents = dst_box;
 			region.data = NULL;
 
 			if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
-							MOVE_INPLACE_HINT | MOVE_WRITE))
+							MOVE_INPLACE_HINT | MOVE_READ | MOVE_WRITE))
 				return false;
-		} else {
-			if (tmp->dst.bo == sna_pixmap(tmp->dst.pixmap)->cpu_bo) {
-				assert(kgem_bo_is_busy(tmp->dst.bo));
-				tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
-								  FORCE_GPU | PREFER_GPU,
-								  &dst_box,
-								  &tmp->damage);
-			}
 		}
+
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
 	}
 
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index d3df17d..60d39cd 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -527,7 +527,7 @@ sna_composite(CARD8 op,
 	     get_drawable_dx(dst->pDrawable),
 	     get_drawable_dy(dst->pDrawable)));
 
-	if (op <= PictOpSrc) {
+	if (op <= PictOpSrc && priv->cpu_damage) {
 		int16_t x, y;
 
 		get_drawable_deltas(dst->pDrawable, pixmap, &x, &y);
@@ -535,6 +535,10 @@ sna_composite(CARD8 op,
 			pixman_region_translate(&region, x, y);
 
 		sna_damage_subtract(&priv->cpu_damage, &region);
+		if (priv->cpu_damage == NULL) {
+			list_del(&priv->list);
+			priv->cpu = false;
+		}
 
 		if (x|y)
 			pixman_region_translate(&region, -x, -y);
commit 3e1be265cf950976b5929b14a9dad0664deaa2c1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 21 17:37:58 2012 +0100

    sna: Force an inplace upload if already wedged
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index cdaadc0..60ea517 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -587,6 +587,9 @@ static bool upload_inplace(struct kgem *kgem,
 {
 	unsigned int bytes;
 
+	if (kgem->wedged)
+		return true;
+
 	if (!kgem_bo_can_map(kgem, bo) && !upload_inplace__tiled(kgem, bo))
 		return false;
 
commit b8967aff382c1b6bef2335dea51c979a3f0800c7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 21 12:16:27 2012 +0100

    sna: Do not query for the NULL edid property
    
    If the EDID blob id is set to 0, that means that it does not exist and
    so we can safely skip it.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=55193
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a0129e4..d7b131f 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1630,9 +1630,12 @@ sna_output_attach_edid(xf86OutputPtr output)
 		if (strcmp(prop.name, "EDID"))
 			continue;
 
+		if (koutput->prop_values[i] == 0)
+			continue;
+
 		VG_CLEAR(blob);
 		blob.length = 0;
-		blob.data =0;
+		blob.data = 0;
 		blob.blob_id = koutput->prop_values[i];
 
 		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
commit 0be1d964713ca407f029278a8256d02d925dc9da
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 11 21:48:24 2012 +0100

    sna: Use inplace X tiling for LLC uploads
    
    Based on a suggestion by Chad Versace (taken from a patch for mesa).
    
    This allows for a faster upload of pixel data through a ShmImage, or for
    complete replacement of a GPU bo.
    
    Using a modified version of x11perf to upload to a pixmap rather than
    scanout on an IVB i7-3720qm:
    
    Before:
    40000000 trep @   0.0007 msec (1410000.0/sec): ShmPutImage 10x10 square
     4000000 trep @   0.0110 msec (  90700.0/sec): ShmPutImage 100x100 square
      160000 trep @   0.1689 msec (   5920.0/sec): ShmPutImage 500x500 square
    
    After:
    40000000 trep @   0.0007 msec (1450000.0/sec): ShmPutImage 10x10 square
     6000000 trep @   0.0061 msec ( 164000.0/sec): ShmPutImage 100x100 square
      400000 trep @   0.1126 msec (   8880.0/sec): ShmPutImage 500x500 square
    
    However, the real takeaway from this is that the overheads for
    ShmPutImage are substantial, only hitting around 70% expected efficiency,
    and overshadowed by PutImage, which for reference is
    
    60000000 trep @   0.0006 msec (1800000.0/sec): PutImage 10x10 square
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index 853eb20..4735d14 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -214,6 +214,123 @@ memcpy_blt(const void *src, void *dst, int bpp,
 }
 
 void
+memcpy_to_tiled_x(const void *src, void *dst, int bpp, int swizzling,
+		  int32_t src_stride, int32_t dst_stride,
+		  int16_t src_x, int16_t src_y,
+		  int16_t dst_x, int16_t dst_y,
+		  uint16_t width, uint16_t height)
+{
+	const unsigned tile_width = 512;
+	const unsigned tile_height = 8;
+	const unsigned tile_size = 4096;
+
+	const unsigned cpp = bpp / 8;
+	const unsigned stride_tiles = dst_stride / tile_width;
+	const unsigned swizzle_pixels = (swizzling ? 64 : tile_width) / cpp;
+	const unsigned tile_pixels = ffs(tile_width / cpp) - 1;
+	const unsigned tile_mask = (1 << tile_pixels) - 1;
+
+	unsigned x, y;
+
+	DBG(("%s(bpp=%d, swizzling=%d): src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
+	     __FUNCTION__, bpp, swizzling, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
+
+	src = (const uint8_t *)src + src_y * src_stride + src_x * cpp;
+
+	for (y = 0; y < height; ++y) {
+		const uint32_t dy = y + dst_y;
+		const uint32_t tile_row =
+			(dy / tile_height * stride_tiles * tile_size +
+			 (dy & (tile_height-1)) * tile_width);
+		const uint8_t *src_row = (const uint8_t *)src + src_stride * y;
+		uint32_t dx = dst_x, offset;
+
+		x = width * cpp;
+		if (dx & (swizzle_pixels - 1)) {
+			const uint32_t swizzle_bound_pixels = ALIGN(dx + 1, swizzle_pixels);
+			const uint32_t length = min(dst_x + width, swizzle_bound_pixels) - dx;
+			offset = tile_row +
+				(dx >> tile_pixels) * tile_size +
+				(dx & tile_mask) * cpp;
+			switch (swizzling) {
+			case I915_BIT_6_SWIZZLE_NONE:
+				break;
+			case I915_BIT_6_SWIZZLE_9:
+				offset ^= (offset >> 3) & 64;
+				break;
+			case I915_BIT_6_SWIZZLE_9_10:
+				offset ^= ((offset ^ (offset >> 1)) >> 3) & 64;
+				break;
+			case I915_BIT_6_SWIZZLE_9_11:
+				offset ^= ((offset ^ (offset >> 2)) >> 3) & 64;
+				break;
+			}
+
+			memcpy((char *)dst + offset, src_row, length * cpp);
+
+			src_row += length * cpp;
+			x -= length * cpp;
+			dx += length;
+		}
+		if (swizzling) {
+			while (x >= 64) {
+				offset = tile_row +
+					(dx >> tile_pixels) * tile_size +
+					(dx & tile_mask) * cpp;
+				switch (swizzling) {
+				case I915_BIT_6_SWIZZLE_9:
+					offset ^= (offset >> 3) & 64;
+					break;
+				case I915_BIT_6_SWIZZLE_9_10:
+					offset ^= ((offset ^ (offset >> 1)) >> 3) & 64;
+					break;
+				case I915_BIT_6_SWIZZLE_9_11:
+					offset ^= ((offset ^ (offset >> 2)) >> 3) & 64;
+					break;
+				}
+
+				memcpy((char *)dst + offset, src_row, 64);
+
+				src_row += 64;
+				x -= 64;
+				dx += swizzle_pixels;
+			}
+		} else {
+			while (x >= 512) {
+				assert((dx & tile_mask) == 0);
+				offset = tile_row + (dx >> tile_pixels) * tile_size;
+
+				memcpy((char *)dst + offset, src_row, 512);
+
+				src_row += 512;
+				x -= 512;
+				dx += swizzle_pixels;
+			}
+		}
+		if (x) {
+			offset = tile_row +
+				(dx >> tile_pixels) * tile_size +
+				(dx & tile_mask) * cpp;
+			switch (swizzling) {
+			case I915_BIT_6_SWIZZLE_NONE:
+				break;
+			case I915_BIT_6_SWIZZLE_9:
+				offset ^= (offset >> 3) & 64;
+				break;
+			case I915_BIT_6_SWIZZLE_9_10:
+				offset ^= ((offset ^ (offset >> 1)) >> 3) & 64;
+				break;
+			case I915_BIT_6_SWIZZLE_9_11:
+				offset ^= ((offset ^ (offset >> 2)) >> 3) & 64;
+				break;
+			}
+
+			memcpy((char *)dst + offset, src_row, x);
+		}
+	}
+}
+
+void
 memmove_box(const void *src, void *dst,
 	    int bpp, int32_t stride,
 	    const BoxRec *box,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index fc7c881..0ea14f0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4082,6 +4082,56 @@ retry:
 	return (void *)(uintptr_t)mmap_arg.addr_ptr;
 }
 
+void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
+{
+	struct drm_i915_gem_mmap mmap_arg;
+
+	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
+	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
+        assert(bo->refcnt);
+	assert(!bo->purged);
+	assert(list_is_empty(&bo->list));
+	assert(bo->proxy == NULL);
+
+	if (IS_CPU_MAP(bo->map))
+		return MAP(bo->map);
+
+retry:
+	VG_CLEAR(mmap_arg);
+	mmap_arg.handle = bo->handle;
+	mmap_arg.offset = 0;
+	mmap_arg.size = bytes(bo);
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
+		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
+		       __FUNCTION__, bo->handle, bytes(bo), errno);
+		if (__kgem_throttle_retire(kgem, 0))
+			goto retry;
+
+		return NULL;
+	}
+
+	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
+	if (bo->map == NULL) {
+		DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
+		bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
+	}
+	return (void *)(uintptr_t)mmap_arg.addr_ptr;
+}
+
+void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr)
+{
+	DBG(("%s(handle=%d, size=%d)\n",
+	     __FUNCTION__, bo->handle, bytes(bo)));
+        assert(bo->refcnt);
+
+	if (IS_CPU_MAP(bo->map)) {
+                assert(ptr == MAP(bo->map));
+                return;
+        }
+
+	munmap(ptr, bytes(bo));
+}
+
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_gem_flink flink;
@@ -4961,6 +5011,19 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
 	}
 }
 
+int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo)
+{
+	struct drm_i915_gem_get_tiling tiling;
+
+	VG_CLEAR(tiling);
+	tiling.handle = bo->handle;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling))
+		return 0;
+
+	assert(bo->tiling == tiling.tiling_mode);
+	return tiling.swizzle_mode;
+}
+
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
 		struct kgem_bo *src,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 832b3f0..cdbb7cb 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -262,6 +262,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
 void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
+int kgem_bo_get_swizzling(struct kgem *kgem, struct kgem_bo *bo);
 
 void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
@@ -419,6 +420,8 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
+void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
+void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
@@ -494,7 +497,7 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
 }
 
-static inline bool kgem_bo_mapped(struct kgem_bo *bo)
+static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
 	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
@@ -502,12 +505,15 @@ static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 	if (bo->map == NULL)
 		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
 
+	if (bo->tiling == I915_TILING_X && !bo->scanout && kgem->has_llc)
+		return IS_CPU_MAP(bo->map);
+
 	return IS_CPU_MAP(bo->map) == !bo->tiling;
 }
 
 static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 {
-	if (kgem_bo_mapped(bo))
+	if (kgem_bo_mapped(kgem, bo))
 		return true;
 
 	if (!bo->tiling && kgem->has_llc)
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 382c0a5..28dff6d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -764,6 +764,12 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	   int16_t dst_x, int16_t dst_y,
 	   uint16_t width, uint16_t height);
 void
+memcpy_to_tiled_x(const void *src, void *dst, int bpp, int swizzling,
+		  int32_t src_stride, int32_t dst_stride,
+		  int16_t src_x, int16_t src_y,
+		  int16_t dst_x, int16_t dst_y,
+		  uint16_t width, uint16_t height);
+void
 memmove_box(const void *src, void *dst,
 	    int bpp, int32_t stride,
 	    const BoxRec *box,
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index a466f55..cdaadc0 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -482,6 +482,49 @@ fallback:
 	sna->blt_state.fill_bo = 0;
 }
 
+static bool upload_inplace__tiled(struct kgem *kgem, struct kgem_bo *bo)
+{
+	if (kgem->gen < 50) /* bit17 swizzling :( */
+		return false;
+
+	if (bo->tiling != I915_TILING_X)
+		return false;
+
+	if (bo->scanout)
+		return false;
+
+	return bo->domain == DOMAIN_CPU || kgem->has_llc;
+}
+
+static bool
+write_boxes_inplace__tiled(struct kgem *kgem,
+                           const uint8_t *src, int stride, int bpp, int16_t src_dx, int16_t src_dy,
+                           struct kgem_bo *bo, int16_t dst_dx, int16_t dst_dy,
+                           const BoxRec *box, int n)
+{
+	uint8_t *dst;
+	int swizzle;
+
+	assert(bo->tiling == I915_TILING_X);
+
+	dst = __kgem_bo_map__cpu(kgem, bo);
+	if (dst == NULL)
+		return false;
+
+	kgem_bo_sync__cpu(kgem, bo);
+	swizzle = kgem_bo_get_swizzling(kgem, bo);
+	do {
+		memcpy_to_tiled_x(src, dst, bpp, swizzle, stride, bo->pitch,
+				  box->x1 + src_dx, box->y1 + src_dy,
+				  box->x1 + dst_dx, box->y1 + dst_dy,
+				  box->x2 - box->x1, box->y2 - box->y1);
+		box++;
+	} while (--n);
+	__kgem_bo_unmap__cpu(kgem, bo, dst);
+
+	return true;
+}
+
 static bool write_boxes_inplace(struct kgem *kgem,
 				const void *src, int stride, int bpp, int16_t src_dx, int16_t src_dy,
 				struct kgem_bo *bo, int16_t dst_dx, int16_t dst_dy,
@@ -492,6 +535,11 @@ static bool write_boxes_inplace(struct kgem *kgem,
 	DBG(("%s x %d, handle=%d, tiling=%d\n",
 	     __FUNCTION__, n, bo->handle, bo->tiling));
 
+	if (upload_inplace__tiled(kgem, bo) &&
+	    write_boxes_inplace__tiled(kgem, src, stride, bpp, src_dx, src_dy,
+				       bo, dst_dx, dst_dy, box, n))
+		return true;
+
 	if (!kgem_bo_can_map(kgem, bo))
 		return false;
 
@@ -539,7 +587,7 @@ static bool upload_inplace(struct kgem *kgem,
 {
 	unsigned int bytes;
 
-	if (!kgem_bo_can_map(kgem, bo))
+	if (!kgem_bo_can_map(kgem, bo) && !upload_inplace__tiled(kgem, bo))
 		return false;
 
 	if (FORCE_INPLACE)
@@ -871,8 +919,6 @@ write_boxes_inplace__xor(struct kgem *kgem,
 			 const BoxRec *box, int n,
 			 uint32_t and, uint32_t or)
 {
-	int dst_pitch = bo->pitch;
-	int src_pitch = stride;
 	void *dst;
 
 	DBG(("%s x %d, tiling=%d\n", __FUNCTION__, n, bo->tiling));
@@ -888,10 +934,22 @@ write_boxes_inplace__xor(struct kgem *kgem,
 		     box->x1 + src_dx, box->y1 + src_dy,
 		     box->x1 + dst_dx, box->y1 + dst_dy,
 		     box->x2 - box->x1, box->y2 - box->y1,
-		     bpp, src_pitch, dst_pitch));
+		     bpp, stride, bo->pitch));
+
+		assert(box->x2 > box->x1);
+		assert(box->y2 > box->y1);
+
+		assert(box->x1 + dst_dx >= 0);
+		assert((box->x2 + dst_dx)*bpp <= 8*bo->pitch);
+		assert(box->y1 + dst_dy >= 0);
+		assert((box->y2 + dst_dy)*bo->pitch <= kgem_bo_size(bo));
+
+		assert(box->x1 + src_dx >= 0);
+		assert((box->x2 + src_dx)*bpp <= 8*stride);
+		assert(box->y1 + src_dy >= 0);
 
 		memcpy_xor(src, dst, bpp,
-			   src_pitch, dst_pitch,
+			   stride, bo->pitch,
 			   box->x1 + src_dx, box->y1 + src_dy,
 			   box->x1 + dst_dx, box->y1 + dst_dy,
 			   box->x2 - box->x1, box->y2 - box->y1,
@@ -1282,6 +1340,19 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling, busy));
 
+	if (!busy && upload_inplace__tiled(kgem, bo)) {
+		BoxRec box;
+
+		box.x1 = box.y1 = 0;
+		box.x2 = pixmap->drawable.width;
+		box.y2 = pixmap->drawable.height;
+
+		if (write_boxes_inplace__tiled(kgem, src,
+					       stride, pixmap->drawable.bitsPerPixel, 0, 0,
+					       bo, 0, 0, &box, 1))
+			return true;
+	}
+
 	if ((busy || !kgem_bo_can_map(kgem, bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
 		return true;
@@ -1304,6 +1375,19 @@ bool sna_replace(struct sna *sna,
 				   (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8))
 			goto err;
 	} else {
+		if (upload_inplace__tiled(kgem, bo)) {
+			BoxRec box;
+
+			box.x1 = box.y1 = 0;
+			box.x2 = pixmap->drawable.width;
+			box.y2 = pixmap->drawable.height;
+
+			if (write_boxes_inplace__tiled(kgem, src,
+						       stride, pixmap->drawable.bitsPerPixel, 0, 0,
+						       bo, 0, 0, &box, 1))
+				goto done;
+		}
+
 		if (kgem_bo_is_mappable(kgem, bo)) {
 			dst = kgem_bo_map(kgem, bo);
 			if (!dst)
@@ -1330,6 +1414,7 @@ bool sna_replace(struct sna *sna,
 		}
 	}
 
+done:
 	if (bo != *_bo)
 		kgem_bo_destroy(kgem, *_bo);
 	*_bo = bo;
commit d853064e7eebc5719645c12605782f995131a6fe
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 20 22:43:26 2012 +0100

    sna/gen3+: Trim the target extents to the CompositeClip
    
    When computing the active region with of a composite operation with
    unknown extents we try to simply use the whole Drawable. However, this
    needs to be clipped otherwise it may trigger assertion failure with an
    offscreen pixmap.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=55164
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 723dd5e..4c4271e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2500,12 +2500,8 @@ gen3_composite_set_target(struct sna *sna,
 		box.y1 = y;
 		box.x2 = x + w;
 		box.y2 = y + h;
-	} else {
-		box.x1 = dst->pDrawable->x;
-		box.y1 = dst->pDrawable->y;
-		box.x2 = box.x1 + dst->pDrawable->width;
-		box.y2 = box.y1 + dst->pDrawable->height;
-	}
+	} else
+		sna_render_picture_extents(dst, &box);
 
 	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
 					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index c9a2cc0..a141766 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1983,12 +1983,8 @@ gen5_composite_set_target(struct sna *sna,
 		box.y1 = y;
 		box.x2 = x + w;
 		box.y2 = y + h;
-	} else {
-		box.x1 = dst->pDrawable->x;
-		box.y1 = dst->pDrawable->y;
-		box.x2 = box.x1 + dst->pDrawable->width;
-		box.y2 = box.y1 + dst->pDrawable->height;
-	}
+	} else
+		sna_render_picture_extents(dst, &box);
 
 	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
 					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 8e10af3..4990062 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2331,12 +2331,8 @@ gen6_composite_set_target(struct sna *sna,
 		box.y1 = y;
 		box.x2 = x + w;
 		box.y2 = y + h;
-	} else {
-		box.x1 = dst->pDrawable->x;
-		box.y1 = dst->pDrawable->y;
-		box.x2 = box.x1 + dst->pDrawable->width;
-		box.y2 = box.y1 + dst->pDrawable->height;
-	}
+	} else
+		sna_render_picture_extents(dst, &box);
 
 	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
 					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 51b002e..1f59eb3 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2438,12 +2438,8 @@ gen7_composite_set_target(struct sna *sna,
 		box.y1 = y;
 		box.x2 = x + w;
 		box.y2 = y + h;
-	} else {
-		box.x1 = dst->pDrawable->x;
-		box.y1 = dst->pDrawable->y;
-		box.x2 = box.x1 + dst->pDrawable->width;
-		box.y2 = box.y1 + dst->pDrawable->height;
-	}
+	} else
+		sna_render_picture_extents(dst, &box);
 
 	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
 					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 64f8a46..216937a 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -140,6 +140,25 @@ sna_render_get_alpha_gradient(struct sna *sna)
 }
 
 static inline void
+sna_render_picture_extents(PicturePtr p, BoxRec *box)
+{
+	box->x1 = p->pDrawable->x;
+	box->y1 = p->pDrawable->y;
+	box->x2 = p->pDrawable->x + p->pDrawable->width;
+	box->y2 = p->pDrawable->y + p->pDrawable->height;
+
+	if (box->x1 < p->pCompositeClip->extents.x1)
+		box->x1 = p->pCompositeClip->extents.x1;
+	if (box->y1 < p->pCompositeClip->extents.y1)
+		box->y1 = p->pCompositeClip->extents.y1;
+
+	if (box->x2 > p->pCompositeClip->extents.x2)
+		box->x2 = p->pCompositeClip->extents.x2;
+	if (box->y2 > p->pCompositeClip->extents.y2)
+		box->y2 = p->pCompositeClip->extents.y2;
+}
+
+static inline void
 sna_render_reduce_damage(struct sna_composite_op *op,
 			 int dst_x, int dst_y,
 			 int width, int height)
commit 853beff4306d5a168e310af82d0ddf9db319fdce
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 19 08:30:59 2012 +0100

    sna/dri:Add some DBG and assertions to validate names passed back to clients
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index b1fba20..1f65b0f 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -227,11 +227,15 @@ sna_dri_create_buffer(DrawablePtr draw,
 		pixmap = get_drawable_pixmap(draw);
 		buffer = sna_pixmap_get_buffer(pixmap);
 		if (buffer) {
-			DBG(("%s: reusing front buffer attachment\n",
-			     __FUNCTION__));
-
 			private = get_private(buffer);
+
+			DBG(("%s: reusing front buffer attachment, pixmap=%ld, handle=%d, name=%d\n",
+			     __FUNCTION__, pixmap->drawable.serialNumber,
+			     private->bo->handle, buffer->name));
+
 			assert(private->pixmap == pixmap);
+			assert(sna_pixmap(pixmap)->gpu_bo == private->bo);
+			assert(kgem_bo_flink(&sna->kgem, private->bo) == buffer->name);
 
 			private->refcnt++;
 			return buffer;
@@ -429,6 +433,8 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 		       pixmap->drawable.width,
 		       pixmap->drawable.height);
 	sna_damage_destroy(&priv->cpu_damage);
+	list_del(&priv->list);
+	priv->cpu = false;
 	priv->undamaged = false;
 
 	assert(bo->refcnt);
@@ -957,6 +963,8 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	assert(sna_pixmap_get_buffer(sna->front) == info->front);
+
 	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
 		return false;
@@ -1152,6 +1160,7 @@ sna_dri_exchange_buffers(DrawablePtr draw,
 	DBG(("%s: front_bo pitch=%d, size=%d\n",
 	     __FUNCTION__, front_bo->pitch, kgem_bo_size(front_bo)));
 
+	assert(sna_pixmap_get_buffer(pixmap) == front);
 	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
 	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
 
@@ -1322,6 +1331,8 @@ sna_dri_flip_continue(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	assert(sna_pixmap_get_buffer(get_drawable_pixmap(draw)) == info->front);
+
 	name = info->back->name;
 	bo = get_private(info->back)->bo;
 	assert(get_drawable_pixmap(draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
commit 8c1e60a15431b193b57d94a27b8e8de7c5a7e683
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 18 15:01:52 2012 +0100

    sna: Add some asserts useful for tracking an xserver drawing bug
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f10eda9..fbee637 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5281,6 +5281,10 @@ sna_fill_spans__fill(DrawablePtr drawable,
 			b->y2 = b->y1 + 1;
 			DBG(("%s: (%d, %d), (%d, %d)\n",
 			     __FUNCTION__, b->x1, b->y1, b->x2, b->y2));
+			assert(b->x1 >= drawable->x);
+			assert(b->x2 <= drawable->x + drawable->width);
+			assert(b->y1 >= drawable->y);
+			assert(b->y2 <= drawable->y + drawable->height);
 			if (b->x2 > b->x1) {
 				if (b != box &&
 				    b->y1 == b[-1].y2 &&
commit cd23ac69ffb670468b2993242ce0d596081bbca4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 18 13:39:33 2012 +0100

    sna/gen7: Add some ring switching sanity checks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 383caa4..51b002e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1784,10 +1784,13 @@ gen7_get_batch(struct sna *sna)
 		DBG(("%s: flushing batch: %d < %d+%d\n",
 		     __FUNCTION__, sna->kgem.surface - sna->kgem.nbatch,
 		     150, 4*8));
-		kgem_submit(&sna->kgem);
+		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
+	assert(sna->kgem.mode == KGEM_RENDER);
+	assert(sna->kgem.ring == KGEM_RENDER);
+
 	if (sna->render_state.gen7.needs_invariant)
 		gen7_emit_invariant(sna);
 }
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 073635f..832b3f0 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -344,6 +344,7 @@ static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
 static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
 {
 	assert(kgem->mode == KGEM_NONE);
+	assert(kgem->nbatch == 0);
 	kgem->context_switch(kgem, mode);
 	kgem->mode = mode;
 }
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a29227b..aa4031c 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -386,7 +386,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	Gamma zeros = { 0.0, 0.0, 0.0 };
 	int fd;
 
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s flags=%x, numEntities=%d\n",
+	     __FUNCTION__, flags, scrn->numEntities));
 
 	if (scrn->numEntities != 1)
 		return FALSE;
commit 22c84d02ade83c39940d76fc616a9122e535606a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 18 12:01:49 2012 +0100

    sna: Remove special casing of drawing depth=1 glyphs
    
    It turns out that they were not so special after all, and the "fast
    path" was missing the important handling such as validating the GC. As a
    result we ended up trying to render the glyphs through a planemask of 0
    and similar garbage resulting in corruption. An example given by Kaus
    Dittrich was the use of rotated glyphs in gnuplot.
    
    Reported-by: Klaus Dittrich
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbgc.c b/src/sna/fb/fbgc.c
index 0969040..83956c4 100644
--- a/src/sna/fb/fbgc.c
+++ b/src/sna/fb/fbgc.c
@@ -159,6 +159,9 @@ fbValidateGC(GCPtr gc, unsigned long changes, DrawablePtr drawable)
 		mask = FbFullMask(drawable->bitsPerPixel);
 		depthMask = FbFullMask(drawable->depth);
 
+		DBG(("%s: computing rrop mask=%08x, depthMask=%08x, fg=%08x, bg=%08x, planemask=%08x\n",
+		     __FUNCTION__, mask, depthMask, (int)gc->fgPixel, (int)gc->bgPixel, (int)gc->planemask));
+
 		pgc->fg = gc->fgPixel & mask;
 		pgc->bg = gc->bgPixel & mask;
 
@@ -178,6 +181,9 @@ fbValidateGC(GCPtr gc, unsigned long changes, DrawablePtr drawable)
 		pgc->xor = fbXor(gc->alu, pgc->fg, pgc->pm);
 		pgc->bgand = fbAnd(gc->alu, pgc->bg, pgc->pm);
 		pgc->bgxor = fbXor(gc->alu, pgc->bg, pgc->pm);
+
+		DBG(("%s: rrop fg=%08x, bg=%08x, pm=%08x, and=%08x, xor=%08x, bgand=%08x, bgxor=%08x\n",
+		     __FUNCTION__, pgc->fg, pgc->bg, pgc->pm, pgc->and, pgc->xor, pgc->bgand, pgc->bgxor));
 	}
 
 	if (changes & GCDashList) {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 58fea22..f10eda9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12552,9 +12552,6 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	long unsigned i, n;
 	uint32_t fg;
 
-	if (drawable->depth < 8)
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -12579,22 +12576,22 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 		return x + extents.overallRight;
 
 	if (FORCE_FALLBACK)
-		goto force_fallback;
+		goto fallback;
 
 	if (!ACCEL_POLY_TEXT8)
-		goto force_fallback;
+		goto fallback;
 
 	if (sna_font_too_large(gc->font))
-		goto force_fallback;
+		goto fallback;
 
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		goto force_fallback;
+		goto fallback;
 
 	if (!gc_is_solid(gc, &fg))
-		goto force_fallback;
+		goto fallback;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1, true)) {
-force_fallback:
+fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
@@ -12615,22 +12612,6 @@ out_gc:
 out:
 	RegionUninit(&region);
 	return x + extents.overallRight;
-
-fallback:
-	DBG(("%s: fallback -- depth=%d\n", __FUNCTION__, drawable->depth));
-	gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
-			     Linear8Bit, &n, info);
-	if (n == 0)
-		return x;
-
-	extents.overallWidth = x;
-	for (i = 0; i < n; i++)
-		extents.overallWidth += info[i]->metrics.characterWidth;
-
-	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
-	fbPolyGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
-
-	return extents.overallWidth;
 }
 
 static int
@@ -12645,9 +12626,6 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	long unsigned i, n;
 	uint32_t fg;
 
-	if (drawable->depth < 8)
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -12672,22 +12650,22 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 		return x + extents.overallRight;
 
 	if (FORCE_FALLBACK)
-		goto force_fallback;
+		goto fallback;
 
 	if (!ACCEL_POLY_TEXT16)
-		goto force_fallback;
+		goto fallback;
 
 	if (sna_font_too_large(gc->font))
-		goto force_fallback;
+		goto fallback;
 
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		goto force_fallback;
+		goto fallback;
 
 	if (!gc_is_solid(gc, &fg))
-		goto force_fallback;
+		goto fallback;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1, true)) {
-force_fallback:
+fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
@@ -12709,23 +12687,6 @@ out_gc:
 out:
 	RegionUninit(&region);
 	return x + extents.overallRight;
-
-fallback:
-	DBG(("%s: fallback -- depth=%d\n", __FUNCTION__, drawable->depth));
-	gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
-			     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
-			     &n, info);
-	if (n == 0)
-		return x;
-
-	extents.overallWidth = x;
-	for (i = 0; i < n; i++)
-		extents.overallWidth += info[i]->metrics.characterWidth;
-
-	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
-	fbPolyGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
-
-	return extents.overallWidth;
 }
 
 static void
@@ -12739,9 +12700,6 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	RegionRec region;
 	long unsigned i, n;
 
-	if (drawable->depth < 8)
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -12778,20 +12736,20 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	     region.extents.x2, region.extents.y2));
 
 	if (FORCE_FALLBACK)
-		goto force_fallback;
+		goto fallback;
 
 	if (!ACCEL_IMAGE_TEXT8)
-		goto force_fallback;
+		goto fallback;
 
 	if (sna_font_too_large(gc->font))
-		goto force_fallback;
+		goto fallback;
 
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		goto force_fallback;
+		goto fallback;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region,
 			   gc->fgPixel, gc->bgPixel, false)) {
-force_fallback:
+fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
@@ -12811,16 +12769,6 @@ out_gc:
 	}
 out:
 	RegionUninit(&region);
-	return;
-
-fallback:
-	DBG(("%s: fallback, depth=%d\n", __FUNCTION__, drawable->depth));
-	gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
-			     Linear8Bit, &n, info);
-	if (n) {
-		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
-		fbImageGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
-	}
 }
 
 static void
@@ -12834,9 +12782,6 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	RegionRec region;
 	long unsigned i, n;
 
-	if (drawable->depth < 8)
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -12873,20 +12818,20 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	     region.extents.x2, region.extents.y2));
 
 	if (FORCE_FALLBACK)
-		goto force_fallback;
+		goto fallback;
 
 	if (!ACCEL_IMAGE_TEXT16)
-		goto force_fallback;
+		goto fallback;
 
 	if (sna_font_too_large(gc->font))
-		goto force_fallback;
+		goto fallback;
 
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		goto force_fallback;
+		goto fallback;
 
 	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region,
 			   gc->fgPixel, gc->bgPixel, false)) {
-force_fallback:
+fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
@@ -12907,17 +12852,6 @@ out_gc:
 	}
 out:
 	RegionUninit(&region);
-	return;
-
-fallback:
-	DBG(("%s: fallback -- depth=%d\n", __FUNCTION__, drawable->depth));
-	gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
-			     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
-			     &n, info);
-	if (n) {
-		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
-		fbImageGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
-	}
 }
 
 /* XXX Damage bypasses the Text interface and so we lose our custom gluphs */
commit 71fa350325bb06a6141f89ef14431d96f0c4956a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 17 13:40:24 2012 +0100

    sna: Check that we do not keep a GPU bo around after flushing a ShmPixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6832a7c..58fea22 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13754,6 +13754,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			     priv->pixmap->drawable.serialNumber));
 			ret = sna_pixmap_move_to_cpu(priv->pixmap,
 						     MOVE_READ | MOVE_WRITE);
+			assert(!ret || priv->gpu_bo == NULL);
 			if (priv->pixmap->refcnt == 0)
 				__sna_free_pixmap(sna, priv->pixmap, priv);
 		} else {
commit 88502b32d5d2b826fc08344392c98300341410e1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 16 09:02:46 2012 +0100

    2.20.8 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 8836a11..62ec266 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,49 @@
+Release 2.20.8 (2012-09-16)
+===========================
+Another new small feature, another new release. And a few more bugs
+fixed as well! But what is this new feature, do I hear you ask? Why, it
+is nothing less than enabling the ValleyView SDV! The lucky person to
+have their hands on one will now be able to enjoy X in full TechniColor.
+
+For the rest of us, a few more bugs were fixed with interesting
+combinations of software and rendering patterns.
+
+ * Add an extra layer of defence against trying to use a non-GEM device
+   with UXA. This should already be taken care of with the new probe,
+   but the extra sanity check already existed in the code but was doing
+   nothing.
+   https://bugs.launchpad.net/ubuntu/+source/xserver-xorg-video-intel/+bug/962892
+
+ * Fix computation of valid CRTCs bitmask for ZaphodHead
+   Some systems can only handle certain outputs on certain pipes
+   (Screens), and this information was not being propagated through to X
+   and randr correctly, breaking valid configurations and not detecting
+   invalid configurations correctly.
+
+ * Disable global glyph caching with ZaphodHeads.
+   The glyph privates need to be Screen private, but at the moment are
+   global leading to conflicts and invalid rendering with multiple heads.
+   https://bugs.freedesktop.org/show_bug.cgi?id=54707
+
+ * Prevent direct read back of unmappable buffers
+   https://bugs.freedesktop.org/show_bug.cgi?id=54808
+
+ * Tile large uploads whilst replacing the alpha channel
+   https://bugs.freedesktop.org/show_bug.cgi?id=54808
+
+ * Correct the source offset when converting a RENDER composite
+   operation into a BLT composite operation.
+   https://bugs.freedesktop.org/show_bug.cgi?id=54868
+
+ * Correct a minor typo in flattening alphamaps which caused the
+   replacement pixmap to often end up with a height of zero.
+   https://bugs.gentoo.org/show_bug.cgi?id=434860
+
+ * Don't discard the CPU damage if only part of is being replaced.
+   This is most evident when combining software renders like Opera with
+   accelerated rendering.
+   https://bugs.freedesktop.org/show_bug.cgi?id=54937
+
 Release 2.20.7 (2012-09-08)
 ===========================
 Continuing the flurry of releases, this week saw the release of
diff --git a/configure.ac b/configure.ac
index 2d651e1..6b0b0a8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.7],
+        [2.20.8],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 395d738b5086f24659b56b4b3536bffaebad8a3e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 15 21:18:35 2012 +0100

    sna: Improve handling of probe failure during initialisation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f68b054..fc7c881 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -880,8 +880,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	     kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
 
 	VG_CLEAR(aperture);
-	aperture.aper_size = 64*1024*1024;
+	aperture.aper_size = 0;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+	if (aperture.aper_size == 0)
+		aperture.aper_size = 64*1024*1024;
 
 	kgem->aperture_total = aperture.aper_size;
 	kgem->aperture_high = aperture.aper_size * 3/4;
@@ -949,11 +951,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
 
-	if (kgem->max_upload_tile_size > kgem->large_object_size)
-		kgem->max_upload_tile_size = kgem->large_object_size;
-	if (kgem->max_copy_tile_size > kgem->large_object_size)
-		kgem->max_copy_tile_size = kgem->large_object_size;
-
 	if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
commit fe78904a45979d47bb092ce37d321854e2418344
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 22:15:14 2012 +0100

    Add basic support for ValleyView
    
    Bind to the ValleyView SDV for verifying the render routines.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.h b/src/intel_driver.h
index ac02cc7..b719062 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -229,6 +229,8 @@
 #define PCI_CHIP_HASWELL_CRW_S_GT2	0x0D2A
 #define PCI_CHIP_HASWELL_CRW_S_GT2_PLUS	0x0D3A
 
+#define PCI_CHIP_VALLEYVIEW_PO		0x0f30
+
 #endif
 
 #define I85X_CAPID			0x44
diff --git a/src/intel_module.c b/src/intel_module.c
index 65a91b1..bc0b6d2 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -103,6 +103,10 @@ static const struct intel_device_info intel_ivybridge_info = {
 	.gen = 70,
 };
 
+static const struct intel_device_info intel_valleyview_info = {
+	.gen = 70,
+};
+
 static const struct intel_device_info intel_haswell_info = {
 	.gen = 75,
 };
@@ -192,6 +196,7 @@ static const SymTabRec _intel_chipsets[] = {
 	{PCI_CHIP_HASWELL_CRW_S_GT1,		"Haswell CRW Server (GT1)" },
 	{PCI_CHIP_HASWELL_CRW_S_GT2,		"Haswell CRW Server (GT2)" },
 	{PCI_CHIP_HASWELL_CRW_S_GT2_PLUS,	"Haswell CRW Server (GT2+)" },
+	{PCI_CHIP_VALLEYVIEW_PO,		"ValleyView PO board" },
 	{-1,					NULL}
 };
 #define NUM_CHIPSETS (sizeof(_intel_chipsets) / sizeof(_intel_chipsets[0]))
@@ -301,6 +306,8 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
 
+	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
+
 	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
 	{ 0, 0, 0 },
 };
commit 3b9259f7cedc179617bc24b3912d2d8d75c5a824
Author: Ian Stakenvicius <axs at gentoo.org>
Date:   Fri Sep 14 21:04:42 2012 +0100

    configure: Make udev dependency optional
    
    In order to support buildbots where the udev headers may exist on the
    build system but not the target, we need explicit control over optional
    dependencies.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54942

diff --git a/configure.ac b/configure.ac
index f94f140..2d651e1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -85,9 +85,20 @@ fi
 PKG_CHECK_MODULES(GEN4ASM, [intel-gen4asm >= 1.2], [gen4asm=yes], [gen4asm=no])
 AM_CONDITIONAL(HAVE_GEN4ASM, test x$gen4asm = xyes)
 
-PKG_CHECK_MODULES(UDEV, [libudev], [udev=yes], [udev=no])
-if test x"$udev" = xyes; then
-	AC_DEFINE(HAVE_UDEV,1,[Enable udev-based monitor hotplug detection])
+AC_ARG_ENABLE(udev,
+              AS_HELP_STRING([--disable-udev],
+                             [Disable udev-based monitor hotplug detection [default=auto]]),
+              [UDEV="$enableval"],
+              [UDEV=auto])
+
+if test x$UDEV != "xno"; then
+	PKG_CHECK_MODULES(UDEV, [libudev], [udev=yes], [udev=no])
+	if test x$UDEV == xyes -a x$udev != xyes; then
+		AC_MSG_ERROR([udev support requested but not found (libudev)])
+	fi
+	if test x$udev = xyes; then
+		AC_DEFINE(HAVE_UDEV,1,[Enable udev-based monitor hotplug detection])
+	fi
 fi
 
 PKG_CHECK_MODULES(X11, [x11 xrender xext pixman-1], [x11=yes], [x11=no])
commit d995705fb01842652a79076cbecee4392f653bfe
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 19:32:06 2012 +0100

    sna: Only discard CPU damage if we completely overwrite its extents
    
    If we are performing a clipped copy, then we must be careful not to
    completely discard the CPU damage as it may not be entirely replaced.
    
    Reported-by: Roman Jarosz <kedgedev at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54937
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7db7368..6832a7c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4410,7 +4410,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			sna_damage_destroy(&dst_priv->cpu_damage);
 			list_del(&dst_priv->list);
 		}
-		hint |= IGNORE_CPU;
+		if (region->data == NULL)
+			hint |= IGNORE_CPU;
 	}
 
 	bo = sna_drawable_use_bo(&dst_pixmap->drawable, hint,
commit deacab87e4ba7fe09f0f16568dc2848949f2707a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 17:01:13 2012 +0100

    sna/dri: flip->next_front.bo is owned by the DRI drawable not by the flip
    
    Be careful not to delete the reference we presume we hold as it is
    borrowed from the DRI drawable.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index d6cc2aa..b1fba20 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -941,9 +941,6 @@ sna_dri_frame_event_info_free(struct sna *sna,
 	if (info->old_front.bo)
 		kgem_bo_destroy(&sna->kgem, info->old_front.bo);
 
-	if (info->next_front.bo)
-		kgem_bo_destroy(&sna->kgem, info->next_front.bo);
-
 	if (info->cache.bo)
 		kgem_bo_destroy(&sna->kgem, info->cache.bo);
 
@@ -1431,7 +1428,6 @@ static void sna_dri_flip_event(struct sna *sna,
 
 			flip->cache = flip->old_front;
 			flip->old_front = flip->next_front;
-			flip->next_front.bo = NULL;
 
 			flip->count = sna_page_flip(sna,
 						    get_private(flip->front)->bo,
@@ -1457,8 +1453,6 @@ static void sna_dri_flip_event(struct sna *sna,
 			sna->dri.flip_pending = flip;
 		} else {
 finish_async_flip:
-			flip->next_front.bo = NULL;
-
 			DBG(("%s: async flip completed\n", __FUNCTION__));
 			sna_dri_frame_event_info_free(sna, draw, flip);
 		}
commit 5369408d596a0611d4f76333dac2e1c93e63e8b6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 16:12:00 2012 +0100

    sna/dri: And the hunt for the use-after-free continues...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 2296ab6..d6cc2aa 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -196,11 +196,13 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 
 constant static inline void *sna_pixmap_get_buffer(PixmapPtr pixmap)
 {
+	assert(pixmap->refcnt);
 	return ((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[2];
 }
 
 static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
 {
+	assert(pixmap->refcnt);
 	((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[2] = ptr;
 }
 
@@ -429,8 +431,11 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	sna_damage_destroy(&priv->cpu_damage);
 	priv->undamaged = false;
 
-	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
-	priv->gpu_bo = ref(bo);
+	assert(bo->refcnt);
+	if (priv->gpu_bo != bo) {
+		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+		priv->gpu_bo = ref(bo);
+	}
 	if (bo->domain != DOMAIN_GPU)
 		bo->domain = DOMAIN_NONE;
 
commit 8e9d64203428b344433ac8f86dc1f06d2c32529a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 14:06:38 2012 +0100

    sna: Fix typo for sna_render_picture_flatten()
    
    Pass it the correct value for the pixmap height, and not its 'y'
    coordinate!
    
    Reported-by: Eugene Rosenzweig <ugn at outlook.com>
    Bugzilla: https://bugs.gentoo.org/show_bug.cgi?id=434860
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 4493bf6..5844922 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1500,7 +1500,7 @@ sna_render_picture_fixup(struct sna *sna,
 		DBG(("%s: alphamap\n", __FUNCTION__));
 		if (is_gpu(picture->pDrawable) || is_gpu(picture->alphaMap->pDrawable)) {
 			return sna_render_picture_flatten(sna, picture, channel,
-							  x, y, w, y, dst_x, dst_y);
+							  x, y, w, h, dst_x, dst_y);
 		}
 
 		goto do_fixup;
commit 6ee9969f0e99975a24e113af9591909d9023d8af
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 13:04:50 2012 +0100

    sna/dri: Update AsyncSwap for recent changes in tracking DRI drawables
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index de0f84d..2296ab6 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1417,7 +1417,9 @@ static void sna_dri_flip_event(struct sna *sna,
 		     __FUNCTION__, flip->pipe,
 		     sna->dri.flip_pending != NULL,
 		     flip->front->name != flip->old_front.name));
-		assert(sna->dri.flip_pending == flip);
+
+		if (sna->dri.flip_pending)
+			goto finish_async_flip;
 
 		if (flip->front->name != flip->next_front.name) {
 			DBG(("%s: async flip continuing\n", __FUNCTION__));
@@ -1435,6 +1437,8 @@ static void sna_dri_flip_event(struct sna *sna,
 			flip->next_front.bo = get_private(flip->front)->bo;
 			flip->next_front.name = flip->front->name;
 			flip->off_delay = 5;
+
+			sna->dri.flip_pending = flip;
 		} else if (--flip->off_delay) {
 			DBG(("%s: queuing no-flip [delay=%d]\n",
 			     __FUNCTION__, flip->off_delay));
@@ -1444,12 +1448,13 @@ static void sna_dri_flip_event(struct sna *sna,
 						    flip, flip->pipe);
 			if (flip->count == 0)
 				goto finish_async_flip;
+
+			sna->dri.flip_pending = flip;
 		} else {
 finish_async_flip:
 			flip->next_front.bo = NULL;
 
 			DBG(("%s: async flip completed\n", __FUNCTION__));
-			sna->dri.flip_pending = NULL;
 			sna_dri_frame_event_info_free(sna, draw, flip);
 		}
 		break;
@@ -1992,6 +1997,7 @@ blit:
 			goto blit;
 
 		info->client = client;
+		info->draw = draw;
 		info->type = DRI2_ASYNC_FLIP;
 		info->pipe = pipe;
 		info->front = front;
commit d2dbb991384f2b820a714eba262691ed97a6a22a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 13:04:14 2012 +0100

    sna/dri: Add a bunch of assertions for hunting a use-after-free
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 36122e2..a0129e4 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2495,6 +2495,7 @@ sna_page_flip(struct sna *sna,
 	int count;
 
 	DBG(("%s: handle %d attached\n", __FUNCTION__, bo->handle));
+	assert(bo->refcnt);
 
 	kgem_submit(&sna->kgem);
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index ff6b587..de0f84d 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -115,6 +115,7 @@ get_private(DRI2Buffer2Ptr buffer)
 
 static inline struct kgem_bo *ref(struct kgem_bo *bo)
 {
+	assert(bo->refcnt);
 	bo->refcnt++;
 	return bo;
 }
@@ -877,6 +878,8 @@ sna_dri_remove_frame_event(WindowPtr win,
 
 	while (chain->chain != info)
 		chain = chain->chain;
+	assert(chain != info);
+	assert(info->chain != chain);
 	chain->chain = info->chain;
 }
 
@@ -912,8 +915,11 @@ sna_dri_add_frame_event(DrawablePtr draw, struct sna_dri_frame_event *info)
 		return;
 	}
 
+	assert(chain != info);
 	while (chain->chain != NULL)
 		chain = chain->chain;
+
+	assert(chain != info);
 	chain->chain = info;
 }
 
@@ -2031,6 +2037,7 @@ blit:
 				    CREATE_SCANOUT | CREATE_EXACT);
 		name = kgem_bo_flink(&sna->kgem, bo);
 	}
+	assert(bo->refcnt);
 	get_private(info->back)->bo = bo;
 	info->back->name = name;
 
commit d8756091474aeb44e5d8eed6b20b65c23b6574e7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 12:17:28 2012 +0100

    sna/dri: Version bump for prototypical AsyncSwap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 8dc2f01..ff6b587 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -50,7 +50,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #error DRI2 version supported by the Xserver is too old
 #endif
 
-#if DRI2INFOREC_VERSION < 9
+#if DRI2INFOREC_VERSION < 10
 #define USE_ASYNC_SWAP 0
 #endif
 
@@ -2250,7 +2250,7 @@ bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 #endif
 
 #if USE_ASYNC_SWAP
-	info.version = 9;
+	info.version = 10;
 	info.AsyncSwap = sna_dri_async_swap;
 #endif
 
commit 0fc3d020f3086b5c6ecef1fed1aaedef7aa52b01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 14 09:49:00 2012 +0100

    Skip adding unknown special modes
    
    In light of the discussion on how to add stereo modes it was revealed
    that we would happily add modes with unknown and unhandled strange
    flags. Adam Jackson mentioned that he has plans to fix that with some
    upcoming work, but as a first step we can simply eradicate them whilst
    pondering how to support the stereoscopic vision of the future.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 0372f9f..bf16049 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -52,6 +52,8 @@
 
 #include "intel_glamor.h"
 
+#define KNOWN_MODE_FLAGS ((1<<14)-1)
+
 struct intel_mode {
 	int fd;
 	uint32_t fb_id;
@@ -316,7 +318,7 @@ mode_from_kmode(ScrnInfoPtr scrn,
 	mode->VTotal = kmode->vtotal;
 	mode->VScan = kmode->vscan;
 
-	mode->Flags = kmode->flags; //& FLAG_BITS;
+	mode->Flags = kmode->flags;
 	mode->name = strdup(kmode->name);
 
 	if (kmode->type & DRM_MODE_TYPE_DRIVER)
@@ -324,6 +326,9 @@ mode_from_kmode(ScrnInfoPtr scrn,
 	if (kmode->type & DRM_MODE_TYPE_PREFERRED)
 		mode->type |= M_T_PREFERRED;
 
+	if (mode->status == MODE_OK && kmode->flags & ~KNOWN_MODE_FLAGS)
+		mode->status = MODE_BAD; /* unknown flags => unhandled */
+
 	xf86SetModeCrtc (mode, scrn->adjustFlags);
 }
 
@@ -347,7 +352,7 @@ mode_to_kmode(ScrnInfoPtr scrn,
 	kmode->vtotal = mode->VTotal;
 	kmode->vscan = mode->VScan;
 
-	kmode->flags = mode->Flags; //& FLAG_BITS;
+	kmode->flags = mode->Flags;
 	if (mode->name)
 		strncpy(kmode->name, mode->name, DRM_DISPLAY_MODE_LEN);
 	kmode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a9c5440..36122e2 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -51,6 +51,8 @@
 
 #include "intel_options.h"
 
+#define KNOWN_MODE_FLAGS ((1<<14)-1)
+
 #if 0
 #define __DBG DBG
 #else
@@ -549,7 +551,7 @@ mode_from_kmode(ScrnInfoPtr scrn,
 	mode->VTotal = kmode->vtotal;
 	mode->VScan = kmode->vscan;
 
-	mode->Flags = kmode->flags; //& FLAG_BITS;
+	mode->Flags = kmode->flags;
 	mode->name = strdup(kmode->name);
 
 	if (kmode->type & DRM_MODE_TYPE_DRIVER)
@@ -557,6 +559,9 @@ mode_from_kmode(ScrnInfoPtr scrn,
 	if (kmode->type & DRM_MODE_TYPE_PREFERRED)
 		mode->type |= M_T_PREFERRED;
 
+	if (mode->status == MODE_OK && kmode->flags & ~KNOWN_MODE_FLAGS)
+		mode->status = MODE_BAD; /* unknown flags => unhandled */
+
 	xf86SetModeCrtc (mode, scrn->adjustFlags);
 }
 
@@ -578,7 +583,7 @@ mode_to_kmode(struct drm_mode_modeinfo *kmode, DisplayModePtr mode)
 	kmode->vtotal = mode->VTotal;
 	kmode->vscan = mode->VScan;
 
-	kmode->flags = mode->Flags; //& FLAG_BITS;
+	kmode->flags = mode->Flags;
 	if (mode->name)
 		strncpy(kmode->name, mode->name, DRM_DISPLAY_MODE_LEN);
 	kmode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
commit 35d81d97b38d05f132a4823e57fff4012e966504
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 22:32:47 2012 +0100

    sna/gen7: Defensively program default GT values
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 53fcb2a..383caa4 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -83,6 +83,13 @@ struct gt_info {
 	} urb;
 };
 
+static const struct gt_info ivb_gt_info = {
+	.max_vs_threads = 16,
+	.max_gs_threads = 16,
+	.max_wm_threads = (16-1) << IVB_PS_MAX_THREADS_SHIFT,
+	.urb = { 128, 64, 64 },
+};
+
 static const struct gt_info ivb_gt1_info = {
 	.max_vs_threads = 36,
 	.max_gs_threads = 36,
@@ -4255,9 +4262,12 @@ static bool gen7_render_setup(struct sna *sna)
 	int i, j, k, l, m;
 
 	if (sna->kgem.gen == 70) {
-		state->info = &ivb_gt1_info;
-		if (DEVICE_ID(sna->PciInfo) & 0x20)
-			state->info = &ivb_gt2_info; /* XXX requires GT_MODE WiZ disabled */
+		state->info = &ivb_gt_info;
+		if (DEVICE_ID(sna->PciInfo) & 0xf) {
+			state->info = &ivb_gt1_info;
+			if (DEVICE_ID(sna->PciInfo) & 0x20)
+				state->info = &ivb_gt2_info; /* XXX requires GT_MODE WiZ disabled */
+		}
 	} else if (sna->kgem.gen == 75) {
 		state->info = &hsw_gt_info;
 	} else
commit 3fbdedbf9d509c5ca58ae0a01fe8e54dcc990cf8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 22:10:08 2012 +0100

    sna: Fix analysis of source extents for BLT composite
    
    After we have computed the source offset vector for the transformed
    source bo, we need to use that with respect to the destination rectangle
    to verify that the source sample is wholly within bounds.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 7d51823..ca61bd3 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1780,9 +1780,7 @@ gen2_render_composite(struct sna *sna,
 	case 1:
 		if (mask == NULL && tmp->src.bo &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 		break;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 60e197e..723dd5e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2868,9 +2868,7 @@ gen3_render_composite(struct sna *sna,
 	case 1:
 		if (mask == NULL && tmp->src.bo &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d8b76a1..ceef528 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2315,9 +2315,7 @@ gen4_render_composite(struct sna *sna,
 	case 1:
 		if (mask == NULL &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 5eff871..c9a2cc0 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2330,9 +2330,7 @@ gen5_render_composite(struct sna *sna,
 	case 1:
 		if (mask == NULL &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 363e8db..8e10af3 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2708,9 +2708,7 @@ gen6_render_composite(struct sna *sna,
 		if (mask == NULL &&
 		    prefer_blt_composite(sna, tmp) &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 705a17d..53fcb2a 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2811,9 +2811,7 @@ gen7_render_composite(struct sna *sna,
 		if (mask == NULL &&
 		    prefer_blt_composite(sna, tmp) &&
 		    sna_blt_composite__convert(sna,
-					       src_x, src_y,
-					       width, height,
-					       dst_x, dst_y,
+					       dst_x, dst_y, width, height,
 					       tmp))
 			return true;
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 757447b..b97df22 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1914,7 +1914,6 @@ bool
 sna_blt_composite__convert(struct sna *sna,
 			   int x, int y,
 			   int width, int height,
-			   int dst_x, int dst_y,
 			   struct sna_composite_op *tmp)
 {
 	uint32_t alpha_fixup;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 442c78d..03a7005 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -574,7 +574,6 @@ bool sna_blt_composite(struct sna *sna,
 bool sna_blt_composite__convert(struct sna *sna,
 				int x, int y,
 				int width, int height,
-				int dst_x, int dst_y,
 				struct sna_composite_op *tmp);
 
 bool sna_blt_fill(struct sna *sna, uint8_t alu,
commit 6c7f998f7bda4f4f1286e079412d5d3a5b2eb073
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 20:41:23 2012 +0100

    sna: Fix BLT composite offset
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54868
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 120d9a5..757447b 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1990,8 +1990,8 @@ sna_blt_composite__convert(struct sna *sna,
 	}
 
 	tmp->u.blt.src_pixmap = NULL;
-	tmp->u.blt.sx = x - dst_x;
-	tmp->u.blt.sy = y - dst_y;
+	tmp->u.blt.sx = tmp->src.offset[0];
+	tmp->u.blt.sy = tmp->src.offset[1];
 	DBG(("%s: blt dst offset (%d, %d), source offset (%d, %d), with alpha fixup? %x\n",
 	     __FUNCTION__,
 	     tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
commit 2575cd0d236b4e1694e3185a487ebfd8bfe6499f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 20:08:37 2012 +0100

    sna: Avoid corrupting the CPU bo when compositing fallback data through BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index c286918..120d9a5 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1866,10 +1866,11 @@ clear:
 			     __FUNCTION__));
 		} else {
 			ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
-			if (fallback)
-				ret = prepare_blt_put(sna, tmp, alpha_fixup);
+			if (fallback && !ret)
+				goto put;
 		}
 	} else {
+put:
 		if (!tmp->dst.bo) {
 			RegionRec region;
 
@@ -1879,6 +1880,14 @@ clear:
 			if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
 							MOVE_INPLACE_HINT | MOVE_WRITE))
 				return false;
+		} else {
+			if (tmp->dst.bo == sna_pixmap(tmp->dst.pixmap)->cpu_bo) {
+				assert(kgem_bo_is_busy(tmp->dst.bo));
+				tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
+								  FORCE_GPU | PREFER_GPU,
+								  &dst_box,
+								  &tmp->damage);
+			}
 		}
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
 	}
commit 5781de2e5ab0d7e8e62965bda714789879555ed4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 19:53:29 2012 +0100

    sna/gen3: Tidy NO_COMPOSITE debugging
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index c5ec9bc..60e197e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2811,17 +2811,6 @@ gen3_render_composite(struct sna *sna,
 		return false;
 	}
 
-#if NO_COMPOSITE
-	if (mask)
-		return false;
-
-	return sna_blt_composite(sna, op,
-				 src, dst,
-				 src_x, src_y,
-				 dst_x, dst_y,
-				 width, height, tmp);
-#endif
-
 	/* Try to use the BLT engine unless it implies a
 	 * 3D -> 2D context switch.
 	 */
@@ -4688,7 +4677,9 @@ bool gen3_render_init(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 
+#if !NO_COMPOSITE
 	render->composite = gen3_render_composite;
+#endif
 #if !NO_COMPOSITE_SPANS
 	render->check_composite_spans = gen3_check_composite_spans;
 	render->composite_spans = gen3_render_composite_spans;
commit dd1432b2c00c94ac75af4740b66b5cd1a573e261
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 19:47:29 2012 +0100

    sna: Do not attempt to change tiling if wedged
    
    The caller will just have to live with the current tiling, which should
    be fine in most cases, in other cases the gpu is wedged...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5bff247..7db7368 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -564,6 +564,11 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 		return NULL;
 	}
 
+	if (wedged(sna)) {
+		DBG(("%s: can't convert bo, wedged\n", __FUNCTION__));
+		return NULL;
+	}
+
 	assert_pixmap_damage(pixmap);
 
 	bo = kgem_create_2d(&sna->kgem,
commit 8c7dd2219fa777bf6354c4e0ef38a2f09fe09675
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 17:14:23 2012 +0100

    sna: Prefer to use indirect uploads for very small updates
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index fa87133..a466f55 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -537,6 +537,8 @@ static bool upload_inplace(struct kgem *kgem,
 			   const BoxRec *box,
 			   int n, int bpp)
 {
+	unsigned int bytes;
+
 	if (!kgem_bo_can_map(kgem, bo))
 		return false;
 
@@ -547,16 +549,15 @@ static bool upload_inplace(struct kgem *kgem,
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (__kgem_bo_is_busy(kgem, bo)) {
-		unsigned int bytes = 0;
-		while (n--) {
-			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
-			box++;
-		}
-		return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages;
+	bytes = 0;
+	while (n--) {
+		bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
+		box++;
 	}
-
-	return true;
+	if (__kgem_bo_is_busy(kgem, bo))
+		return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages;
+	else
+		return bytes * bpp >> 12;
 }
 
 bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
commit d87a56ed1789e0c5058b302bb930d7e952ff3e5e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 13 09:57:30 2012 +0100

    sna: Teach sna_replace__xor() how to tile large uploads
    
    This path is hit using eog+cairo-1.10 and a large image, e.g.
    http://marsrovers.jpl.nasa.gov/gallery/press/opportunity/20120705a/PIA15689_Greeley_Pan_wDeck_L257F.jpg
    
    Reported-by: Michael Laß <bevan at bi-co.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54808
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 738ec8e..fa87133 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -81,7 +81,7 @@ static void read_boxes_inplace(struct kgem *kgem,
 	DBG(("%s x %d, tiling=%d\n", __FUNCTION__, n, bo->tiling));
 
 	if (!kgem_bo_can_map(kgem, bo))
-		return false;
+		return;
 
 	kgem_bo_submit(kgem, bo);
 
@@ -907,6 +907,8 @@ void sna_write_boxes__xor(struct sna *sna, PixmapPtr dst,
 {
 	struct kgem *kgem = &sna->kgem;
 	struct kgem_bo *src_bo;
+	BoxRec extents;
+	bool can_blt;
 	void *ptr;
 	int offset;
 	int n, cmd, br13;
@@ -923,65 +925,172 @@ fallback:
 		return;
 	}
 
-	/* Try to avoid switching rings... */
-	if (dst_bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) {
-		PixmapRec tmp;
-		BoxRec extents;
+	can_blt = kgem_bo_can_blt(kgem, dst_bo) &&
+		(box[0].x2 - box[0].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
+	extents = box[0];
+	for (n = 1; n < nbox; n++) {
+		if (box[n].x1 < extents.x1)
+			extents.x1 = box[n].x1;
+		if (box[n].x2 > extents.x2)
+			extents.x2 = box[n].x2;
 
-		/* XXX Composite? Not that we should ever reach here! */
+		if (can_blt)
+			can_blt = (box[n].x2 - box[n].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
 
-		extents = box[0];
-		for (n = 1; n < nbox; n++) {
-			if (box[n].x1 < extents.x1)
-				extents.x1 = box[n].x1;
-			if (box[n].x2 > extents.x2)
-				extents.x2 = box[n].x2;
+		if (box[n].y1 < extents.y1)
+			extents.y1 = box[n].y1;
+		if (box[n].y2 > extents.y2)
+			extents.y2 = box[n].y2;
+	}
 
-			if (box[n].y1 < extents.y1)
-				extents.y1 = box[n].y1;
-			if (box[n].y2 > extents.y2)
-				extents.y2 = box[n].y2;
-		}
+	/* Try to avoid switching rings... */
+	if (!can_blt || kgem->ring == KGEM_RENDER ||
+	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
+		PixmapRec tmp;
 
-		tmp.drawable.width = extents.x2 - extents.x1;
+		tmp.drawable.width  = extents.x2 - extents.x1;
 		tmp.drawable.height = extents.y2 - extents.y1;
-		tmp.drawable.depth = dst->drawable.depth;
+		tmp.drawable.depth  = dst->drawable.depth;
 		tmp.drawable.bitsPerPixel = dst->drawable.bitsPerPixel;
 		tmp.devPrivate.ptr = NULL;
 
 		assert(tmp.drawable.width);
 		assert(tmp.drawable.height);
 
-		src_bo = kgem_create_buffer_2d(kgem,
-					       tmp.drawable.width,
-					       tmp.drawable.height,
-					       tmp.drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE_INPLACE,
-					       &ptr);
-		if (!src_bo)
-			goto fallback;
+		DBG(("%s: upload (%d, %d)x(%d, %d), max %dx%d\n",
+		     __FUNCTION__,
+		     extents.x1, extents.y1,
+		     tmp.drawable.width, tmp.drawable.height,
+		     sna->render.max_3d_size, sna->render.max_3d_size));
+		if (must_tile(sna, tmp.drawable.width, tmp.drawable.height)) {
+			BoxRec tile, stack[64], *clipped, *c;
+			int step;
 
-		for (n = 0; n < nbox; n++) {
-			memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
-				   stride, src_bo->pitch,
-				   box[n].x1 + src_dx,
-				   box[n].y1 + src_dy,
-				   box[n].x1 - extents.x1,
-				   box[n].y1 - extents.y1,
-				   box[n].x2 - box[n].x1,
-				   box[n].y2 - box[n].y1,
-				   and, or);
-		}
+tile:
+			step = MIN(sna->render.max_3d_size - 4096 / dst->drawable.bitsPerPixel,
+				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
+			while (step * step * 4 > sna->kgem.max_upload_tile_size)
+				step /= 2;
 
-		n = sna->render.copy_boxes(sna, GXcopy,
-					   &tmp, src_bo, -extents.x1, -extents.y1,
-					   dst, dst_bo, dst_dx, dst_dy,
-					   box, nbox, 0);
+			DBG(("%s: tiling upload, using %dx%d tiles\n",
+			     __FUNCTION__, step, step));
+
+			if (n > ARRAY_SIZE(stack)) {
+				clipped = malloc(sizeof(BoxRec) * n);
+				if (clipped == NULL)
+					goto fallback;
+			} else
+				clipped = stack;
 
-		kgem_bo_destroy(&sna->kgem, src_bo);
+			for (tile.y1 = extents.y1; tile.y1 < extents.y2; tile.y1 = tile.y2) {
+				tile.y2 = tile.y1 + step;
+				if (tile.y2 > extents.y2)
+					tile.y2 = extents.y2;
 
-		if (!n)
-			goto fallback;
+				for (tile.x1 = extents.x1; tile.x1 < extents.x2; tile.x1 = tile.x2) {
+					tile.x2 = tile.x1 + step;
+					if (tile.x2 > extents.x2)
+						tile.x2 = extents.x2;
+
+					tmp.drawable.width  = tile.x2 - tile.x1;
+					tmp.drawable.height = tile.y2 - tile.y1;
+
+					src_bo = kgem_create_buffer_2d(kgem,
+								       tmp.drawable.width,
+								       tmp.drawable.height,
+								       tmp.drawable.bitsPerPixel,
+								       KGEM_BUFFER_WRITE_INPLACE,
+								       &ptr);
+					if (!src_bo) {
+						if (clipped != stack)
+							free(clipped);
+						goto fallback;
+					}
+
+					c = clipped;
+					for (n = 0; n < nbox; n++) {
+						*c = box[n];
+						if (!box_intersect(c, &tile))
+							continue;
+
+						DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+						     __FUNCTION__,
+						     c->x1, c->y1,
+						     c->x2, c->y2,
+						     src_dx, src_dy,
+						     c->x1 - tile.x1,
+						     c->y1 - tile.y1));
+						memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
+							   stride, src_bo->pitch,
+							   c->x1 + src_dx,
+							   c->y1 + src_dy,
+							   c->x1 - tile.x1,
+							   c->y1 - tile.y1,
+							   c->x2 - c->x1,
+							   c->y2 - c->y1,
+							   and, or);
+						c++;
+					}
+
+					if (c != clipped)
+						n = sna->render.copy_boxes(sna, GXcopy,
+									   &tmp, src_bo, -tile.x1, -tile.y1,
+									   dst, dst_bo, dst_dx, dst_dy,
+									   clipped, c - clipped, 0);
+					else
+						n = 1;
+
+					kgem_bo_destroy(&sna->kgem, src_bo);
+
+					if (!n) {
+						if (clipped != stack)
+							free(clipped);
+						goto fallback;
+					}
+				}
+			}
+
+			if (clipped != stack)
+				free(clipped);
+		} else {
+			src_bo = kgem_create_buffer_2d(kgem,
+						       tmp.drawable.width,
+						       tmp.drawable.height,
+						       tmp.drawable.bitsPerPixel,
+						       KGEM_BUFFER_WRITE_INPLACE,
+						       &ptr);
+			if (!src_bo)
+				goto fallback;
+
+			for (n = 0; n < nbox; n++) {
+				DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+				     __FUNCTION__,
+				     box[n].x1, box[n].y1,
+				     box[n].x2, box[n].y2,
+				     src_dx, src_dy,
+				     box[n].x1 - extents.x1,
+				     box[n].y1 - extents.y1));
+				memcpy_xor(src, ptr, tmp.drawable.bitsPerPixel,
+					   stride, src_bo->pitch,
+					   box[n].x1 + src_dx,
+					   box[n].y1 + src_dy,
+					   box[n].x1 - extents.x1,
+					   box[n].y1 - extents.y1,
+					   box[n].x2 - box[n].x1,
+					   box[n].y2 - box[n].y1,
+					   and, or);
+			}
+
+			n = sna->render.copy_boxes(sna, GXcopy,
+						   &tmp, src_bo, -extents.x1, -extents.y1,
+						   dst, dst_bo, dst_dx, dst_dy,
+						   box, nbox, 0);
+
+			kgem_bo_destroy(&sna->kgem, src_bo);
+
+			if (!n)
+				goto tile;
+		}
 
 		return;
 	}
commit 58a96f0f684fe0d7d1a7890c630539ef8b065d1e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 23:53:44 2012 +0100

    sna: Also remove bogus assertion from sna_replace__xor()
    
    Like the assertion in sna_replace() this is now incorrect due to the
    automatic flagging of large bo with flush.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index d17f387..738ec8e 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -1246,7 +1246,6 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
 	     pixmap->drawable.height,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
-	assert(!bo->flush);
 
 	if (kgem_bo_is_busy(bo)) {
 		struct kgem_bo *new_bo;
commit 6069e78238ca57e3433d528680fe04766a1e5307
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 16:22:49 2012 +0100

    sna: Keep a very small, short-lived cache of large buffers
    
    As we now regularly retire and so discard the temporary large buffers,
    we find them in short supply and ourselves wasting lots of time creating
    and destroying the transient buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d675ebb..f68b054 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -858,6 +858,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->active_buffers);
 	list_init(&kgem->flushing);
 	list_init(&kgem->large);
+	list_init(&kgem->large_inactive);
 	list_init(&kgem->snoop);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
@@ -1287,8 +1288,10 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert(!bo->needs_flush);
 	assert(list_is_empty(&bo->vma));
 
+	kgem->need_expire = true;
+
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
-		kgem_bo_free(kgem, bo);
+		list_move(&bo->list, &kgem->large_inactive);
 		return;
 	}
 
@@ -1306,8 +1309,6 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 			kgem->vma[type].count++;
 		}
 	}
-
-	kgem->need_expire = true;
 }
 
 inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
@@ -2411,6 +2412,13 @@ bool kgem_expire_cache(struct kgem *kgem)
 		free(rq);
 	}
 
+	while (!list_is_empty(&kgem->large_inactive)) {
+		kgem_bo_free(kgem,
+			     list_first_entry(&kgem->large_inactive,
+					      struct kgem_bo, list));
+
+	}
+
 	expire = 0;
 	list_for_each_entry(bo, &kgem->snoop, list) {
 		if (bo->delta) {
@@ -3095,9 +3103,9 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		     __FUNCTION__, size, bucket));
 
 		if (flags & CREATE_INACTIVE)
-			goto create;
+			goto large_inactive;
 
-		tiled_height = kgem_aligned_height(kgem, height, I915_TILING_Y);
+		tiled_height = kgem_aligned_height(kgem, height, tiling);
 		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
 
 		list_for_each_entry(bo, &kgem->large, list) {
@@ -3105,18 +3113,28 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			assert(bo->refcnt == 0);
 			assert(bo->reusable);
 
-			if (bo->tiling) {
+			if (kgem->gen < 40) {
 				if (bo->pitch < pitch) {
 					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
 					     bo->tiling, tiling,
 					     bo->pitch, pitch));
 					continue;
 				}
-			} else
-				bo->pitch = untiled_pitch;
 
-			if (bo->pitch * tiled_height > bytes(bo))
-				continue;
+				if (bo->pitch * tiled_height > bytes(bo))
+					continue;
+			} else {
+				if (num_pages(bo) < size)
+					continue;
+
+				if (bo->pitch != pitch || bo->tiling != tiling) {
+					if (gem_set_tiling(kgem->fd, bo->handle,
+							   tiling, pitch) != tiling)
+						continue;
+
+					bo->pitch = pitch;
+				}
+			}
 
 			kgem_bo_remove_from_active(kgem, bo);
 
@@ -3129,6 +3147,39 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			return bo;
 		}
 
+large_inactive:
+		list_for_each_entry(bo, &kgem->large_inactive, list) {
+			assert(bo->refcnt == 0);
+			assert(bo->reusable);
+
+			if (size > num_pages(bo))
+				continue;
+
+			if (bo->tiling != tiling ||
+			    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
+				if (tiling != gem_set_tiling(kgem->fd,
+							     bo->handle,
+							     tiling, pitch))
+					continue;
+			}
+
+			if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
+				kgem_bo_free(kgem, bo);
+				break;
+			}
+
+			list_del(&bo->list);
+
+			bo->unique_id = kgem_get_unique_id(kgem);
+			bo->pitch = pitch;
+			bo->delta = 0;
+			DBG(("  1:from large inactive: pitch=%d, tiling=%d, handle=%d, id=%d\n",
+			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
+			bo->refcnt = 1;
+			return bo;
+		}
+
 		goto create;
 	}
 
@@ -3407,6 +3458,8 @@ search_inactive:
 	}
 
 create:
+	if (bucket >= NUM_CACHE_BUCKETS)
+		size = ALIGN(size, 1024);
 	handle = gem_create(kgem->fd, size);
 	if (handle == 0)
 		return NULL;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index fb8be3d..073635f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -123,6 +123,7 @@ struct kgem {
 
 	struct list flushing;
 	struct list large;
+	struct list large_inactive;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list snoop;
commit 1e2a03ad19664e846ece4110e53d26babb5a2159
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 16:15:22 2012 +0100

    sna: Propagate busyness when creating the proxy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0fabb01..d675ebb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4178,6 +4178,13 @@ struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
 
 	bo->proxy = kgem_bo_reference(target);
 	bo->delta = offset;
+
+	if (target->exec) {
+		list_move_tail(&bo->request, &kgem->next_request->buffers);
+		bo->exec = &_kgem_dummy_exec;
+	}
+	bo->rq = target->rq;
+
 	return bo;
 }
 
commit e28f5a2537821dc170a7ef78bf33ecbd3c032da0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 15:21:27 2012 +0100

    sna: Add some DBG as to why kgem_check_bo() flushes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d6a6d28..0fabb01 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3595,14 +3595,23 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	if (!num_pages)
 		return true;
 
-	if (kgem->aperture > kgem->aperture_low)
+	if (kgem->aperture > kgem->aperture_low && kgem_is_idle(kgem)) {
+		DBG(("%s: current aperture usage (%d) is greater than low water mark (%d)\n",
+		     __FUNCTION__, kgem->aperture, kgem->aperture_low));
 		return false;
+	}
 
-	if (num_pages + kgem->aperture > kgem->aperture_high)
+	if (num_pages + kgem->aperture > kgem->aperture_high) {
+		DBG(("%s: final aperture usage (%d) is greater than high water mark (%d)\n",
+		     __FUNCTION__, num_pages + kgem->aperture, kgem->aperture_high));
 		return false;
+	}
 
-	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
+	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) {
+		DBG(("%s: out of exec slots (%d + %d / %d)\n", __FUNCTION__,
+		     kgem->nexec, num_exec, KGEM_EXEC_SIZE(kgem)));
 		return false;
+	}
 
 	return true;
 }
commit 1af1dae1feefd6567017170bab241570fb528e5c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 15:20:49 2012 +0100

    sna: Ensure tiling upload buffers are trimmed to fit in the cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 727cb51..d6a6d28 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -947,6 +947,12 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->large_object_size = MAX_CACHE_SIZE;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
+
+	if (kgem->max_upload_tile_size > kgem->large_object_size)
+		kgem->max_upload_tile_size = kgem->large_object_size;
+	if (kgem->max_copy_tile_size > kgem->large_object_size)
+		kgem->max_copy_tile_size = kgem->large_object_size;
+
 	if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
commit e5f137807c318588f546960668345eef34159e26
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 14:11:43 2012 +0100

    sna: Avoid fallbacks to shadow pixels if FORCE_GPU is in effect
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=54808
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e12585e..5bff247 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2613,9 +2613,12 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	}
 
 	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
-		DBG(("%s: use CPU fast path (all-damaged)\n", __FUNCTION__));
-		assert(priv->gpu_damage == NULL);
-		goto use_cpu_bo;
+		if ((flags & FORCE_GPU) == 0 || priv->cpu_bo) {
+			DBG(("%s: use CPU fast path (all-damaged), and not forced-gpu\n",
+			     __FUNCTION__));
+			assert(priv->gpu_damage == NULL);
+			goto use_cpu_bo;
+		}
 	}
 
 	DBG(("%s: gpu? %d, damaged? %d; cpu? %d, damaged? %d\n", __FUNCTION__,
@@ -2653,9 +2656,10 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 				}
 			}
 
-			if (priv->cpu_damage) {
-				if ((flags & (PREFER_GPU | FORCE_GPU)) == 0) {
-					DBG(("%s: prefer cpu", __FUNCTION__));
+			if ((flags & FORCE_GPU) == 0 && priv->cpu_damage) {
+				if ((flags & PREFER_GPU) == 0) {
+					DBG(("%s: already damaged and prefer cpu",
+					     __FUNCTION__));
 					goto use_cpu_bo;
 				}
 
commit 0d17208a66a7e54d4106f8a4034d3a928e28bb62
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 14:04:50 2012 +0100

    sna: Avoid readback inplace if the target is unmappable
    
    We have to use the tiling indirect path, or else fail.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=54808
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 0860dec..d17f387 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -80,6 +80,9 @@ static void read_boxes_inplace(struct kgem *kgem,
 
 	DBG(("%s x %d, tiling=%d\n", __FUNCTION__, n, bo->tiling));
 
+	if (!kgem_bo_can_map(kgem, bo))
+		return false;
+
 	kgem_bo_submit(kgem, bo);
 
 	src = kgem_bo_map(kgem, bo);
@@ -114,6 +117,9 @@ static void read_boxes_inplace(struct kgem *kgem,
 
 static bool download_inplace(struct kgem *kgem, struct kgem_bo *bo)
 {
+	if (!kgem_bo_can_map(kgem, bo))
+		return false;
+
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
@@ -531,12 +537,12 @@ static bool upload_inplace(struct kgem *kgem,
 			   const BoxRec *box,
 			   int n, int bpp)
 {
-	if (FORCE_INPLACE)
-		return FORCE_INPLACE > 0;
-
 	if (!kgem_bo_can_map(kgem, bo))
 		return false;
 
+	if (FORCE_INPLACE)
+		return FORCE_INPLACE > 0;
+
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
 	 * operation.
commit 4b4abdaae94d164d5d0b2755907e76b9cbe0c988
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 12 13:47:26 2012 +0100

    sna: Flush after operating on large buffers
    
    As we know that such operations are likely to be slow and consume
    precious GTT space, mark them as candidates for flushing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 902cba7..727cb51 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1286,6 +1286,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 		return;
 	}
 
+	assert(bo->flush == false);
 	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
 	if (bo->map) {
 		int type = IS_CPU_MAP(bo->map);
@@ -1504,7 +1505,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->snoop == false);
 	assert(bo->io == false);
 	assert(bo->scanout == false);
-	assert(bo->flush == false);
 
 	if (bo->rq) {
 		struct list *cache;
@@ -3416,13 +3416,19 @@ create:
 	bo->pitch = pitch;
 	if (tiling != I915_TILING_NONE)
 		bo->tiling = gem_set_tiling(kgem->fd, handle, tiling, pitch);
+	if (bucket >= NUM_CACHE_BUCKETS) {
+		DBG(("%s: marking large bo for automatic flushing\n",
+		     __FUNCTION__));
+		bo->flush = true;
+	}
 
 	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
 
 	debug_alloc__bo(kgem, bo);
 
-	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d\n",
-	     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d, num_pages=%d [%d], bucket=%d\n",
+	     bo->pitch, bo->tiling, bo->handle, bo->unique_id,
+	     size, num_pages(bo), bucket(bo)));
 	return bo;
 }
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 1dc9c67..fb8be3d 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -532,6 +532,8 @@ static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
 	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	if (kgem_flush(kgem))
+		kgem_submit(kgem);
 	if (bo->rq && !bo->exec)
 		kgem_retire(kgem);
 	return kgem_bo_is_busy(bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4ef9019..e12585e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3524,10 +3524,9 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		}
 
 		/* And mark as having a valid GTT mapping for future uploads */
-		if (priv->stride &&
-		    !kgem_bo_is_busy(priv->gpu_bo)) {
+		if (priv->stride && kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) {
 			pixmap->devPrivate.ptr =
-				kgem_bo_map(&sna->kgem, priv->gpu_bo);
+				kgem_bo_map__async(&sna->kgem, priv->gpu_bo);
 			if (pixmap->devPrivate.ptr) {
 				priv->mapped = true;
 				pixmap->devKind = priv->gpu_bo->pitch;
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 733e542..0860dec 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -1165,7 +1165,6 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.height,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling, busy));
-	assert(!bo->flush);
 
 	if ((busy || !kgem_bo_can_map(kgem, bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
commit 15911f533d6a7ef40e42ba5921fac7c62b290f8b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 11 14:59:53 2012 +0100

    prime: Reorder probe messages so that the driver prefix is correct
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index b4f0ad2..65a91b1 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -557,19 +557,19 @@ intel_platform_probe(DriverPtr driver,
 	if (scrn == NULL)
 		return FALSE;
 
-	xf86DrvMsg(scrn->scrnIndex, X_INFO,
-		   "using device path '%s'\n", path ? path : "Default device");
-
-	if (xf86IsEntitySharable(entity_num))
-		xf86SetEntityShared(entity_num);
-	xf86AddEntityToScreen(scrn, entity_num);
-
 	scrn->driverVersion = INTEL_VERSION;
 	scrn->driverName = INTEL_DRIVER_NAME;
 	scrn->name = INTEL_NAME;
 	scrn->driverPrivate = (void *)(match_data | 1);
 	scrn->Probe = NULL;
 
+	if (xf86IsEntitySharable(entity_num))
+		xf86SetEntityShared(entity_num);
+	xf86AddEntityToScreen(scrn, entity_num);
+
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "using device path '%s'\n", path ? path : "Default device");
+
 	switch (get_accel_method()) {
 #if USE_SNA
         case SNA: return sna_init_scrn(scrn, entity_num);
commit bc73acbd4da53bc50752c0413adcd0ce876e0a03
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 10 18:55:04 2012 +0100

    uxa: Error out after failing to become DRM master during PreInit
    
    As we set the card fd to -1 upon failure, the error here is
    non-recoverable, so make sure it gets propagated and we abort loading
    the intel ddx.
    
    Bugzilla: https://bugs.launchpad.net/ubuntu/+source/xserver-xorg-video-intel/+bug/962892
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 1b2c616..41f0311 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -505,9 +505,11 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	intel->PciInfo = xf86GetPciInfoForEntity(intel->pEnt->index);
 
-	if (!intel_open_drm_master(scrn))
+	if (!intel_open_drm_master(scrn)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "Failed to become DRM master.\n");
+		return FALSE;
+	}
 
 	scrn->monitor = scrn->confScreen->monitor;
 	scrn->progClock = TRUE;
commit b0d14071f7b60729c223af925935227393fbd3ee
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 10 13:53:45 2012 +0100

    sna: Workaround issue with global glyph privates and shared ZaphodHeads
    
    Under ZaphodHeads we end up with multple screens accessing the common
    sna_glyph_key and so cause conflicting updates and erroneous references
    into the screen-local texture atlases.
    
    Two approaches can be tried here. Transition to a screen-specific
    private key introduced with xorg-1.13, or to move the glyph cache (and
    the rest of the gpu state tracker) down into the device private rather
    than screen private. This is neither of those, but a workaround to avoid
    reusing the incorrect entries from shared screens.
    
    Reported-by: Stephen Liang <inteldriver at angrywalls.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54707
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 44e7f6e..382c0a5 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -709,6 +709,12 @@ void sna_glyphs(CARD8 op,
 		int nlist,
 		GlyphListPtr list,
 		GlyphPtr *glyphs);
+void sna_glyphs__shared(CARD8 op,
+			PicturePtr src,
+			PicturePtr dst,
+			PictFormatPtr mask,
+			INT16 src_x, INT16 src_y,
+			int nlist, GlyphListPtr list, GlyphPtr *glyphs);
 void sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph);
 void sna_glyphs_close(struct sna *sna);
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7574847..4ef9019 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -14357,6 +14357,8 @@ static bool sna_picture_init(ScreenPtr screen)
 	ps->Composite = sna_composite;
 	ps->CompositeRects = sna_composite_rectangles;
 	ps->Glyphs = sna_glyphs;
+	if (xf86IsEntityShared(xf86ScreenToScrn(screen)->entityList[0]))
+		ps->Glyphs = sna_glyphs__shared;
 	ps->UnrealizeGlyph = sna_glyph_unrealize;
 	ps->AddTraps = sna_add_traps;
 	ps->Trapezoids = sna_composite_trapezoids;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 7a96fab..53494e3 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -194,6 +194,9 @@ bool sna_glyphs_create(struct sna *sna)
 	if (!can_render(sna))
 		return true;
 
+	if (xf86IsEntityShared(sna->scrn->entityList[0]))
+		return true;
+
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
 		struct sna_glyph_cache *cache = &sna->render.glyph[i];
 		struct sna_pixmap *priv;
@@ -409,6 +412,9 @@ glyph_cache(ScreenPtr screen,
 	assert(cache->glyphs[pos] == NULL);
 
 	priv = sna_glyph(glyph);
+	DBG(("%s(%d): adding glyph to cache %d, pos %d\n",
+	     __FUNCTION__, screen->myNum,
+	     PICT_FORMAT_RGB(glyph_picture->format) != 0, pos));
 	cache->glyphs[pos] = priv;
 	priv->atlas = cache->picture;
 	priv->size = size;
@@ -1694,11 +1700,308 @@ fallback:
 	glyphs_fallback(op, src, dst, mask, src_x, src_y, nlist, list, glyphs);
 }
 
+static bool
+glyphs_via_image(struct sna *sna,
+		 CARD8 op,
+		 PicturePtr src,
+		 PicturePtr dst,
+		 PictFormatPtr format,
+		 INT16 src_x, INT16 src_y,
+		 int nlist, GlyphListPtr list, GlyphPtr *glyphs)
+{
+	ScreenPtr screen = dst->pDrawable->pScreen;
+	CARD32 component_alpha;
+	PixmapPtr pixmap;
+	PicturePtr mask;
+	int16_t x, y, width, height;
+	pixman_image_t *mask_image;
+	int error;
+	BoxRec box;
+
+	if (NO_GLYPHS_VIA_MASK)
+		return false;
+
+	DBG(("%s(op=%d, src=(%d, %d), nlist=%d,  dst=(%d, %d)+(%d, %d))\n",
+	     __FUNCTION__, op, src_x, src_y, nlist,
+	     list->xOff, list->yOff, dst->pDrawable->x, dst->pDrawable->y));
+
+	glyph_extents(nlist, list, glyphs, &box);
+	if (box.x2 <= box.x1 || box.y2 <= box.y1)
+		return true;
+
+	DBG(("%s: bounds=((%d, %d), (%d, %d))\n", __FUNCTION__,
+	     box.x1, box.y1, box.x2, box.y2));
+
+	if (!sna_compute_composite_extents(&box,
+					   src, NULL, dst,
+					   src_x, src_y,
+					   0, 0,
+					   box.x1, box.y1,
+					   box.x2 - box.x1,
+					   box.y2 - box.y1))
+		return true;
+
+	DBG(("%s: extents=((%d, %d), (%d, %d))\n", __FUNCTION__,
+	     box.x1, box.y1, box.x2, box.y2));
+
+	width  = box.x2 - box.x1;
+	height = box.y2 - box.y1;
+	box.x1 -= dst->pDrawable->x;
+	box.y1 -= dst->pDrawable->y;
+	x = -box.x1;
+	y = -box.y1;
+	src_x += box.x1 - list->xOff;
+	src_y += box.y1 - list->yOff;
+
+	if (format->depth < 8) {
+		format = PictureMatchFormat(screen, 8, PICT_a8);
+		if (!format)
+			return false;
+	}
+
+	DBG(("%s: small mask [format=%lx, depth=%d, size=%d], rendering glyphs to upload buffer\n",
+	     __FUNCTION__, (unsigned long)format->format,
+	     format->depth, (uint32_t)width*height*format->depth));
+
+	pixmap = sna_pixmap_create_upload(screen,
+					  width, height,
+					  format->depth,
+					  KGEM_BUFFER_WRITE);
+	if (!pixmap)
+		return false;
+
+	mask_image =
+		pixman_image_create_bits(format->depth << 24 | format->format,
+					 width, height,
+					 pixmap->devPrivate.ptr,
+					 pixmap->devKind);
+	if (mask_image == NULL)
+		goto err_pixmap;
+
+	memset(pixmap->devPrivate.ptr, 0, pixmap->devKind*height);
+#if HAS_PIXMAN_GLYPHS
+	if (sna->render.glyph_cache) {
+		pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
+		pixman_glyph_t *pglyphs = stack_glyphs;
+		pixman_glyph_cache_t *cache;
+		int count, n;
+
+		cache = sna->render.glyph_cache;
+		pixman_glyph_cache_freeze(cache);
+
+		count = 0;
+		for (n = 0; n < nlist; ++n)
+			count += list[n].len;
+		if (count > N_STACK_GLYPHS) {
+			pglyphs = malloc (count * sizeof(pixman_glyph_t));
+			if (pglyphs == NULL)
+				goto err_pixmap;
+		}
+
+		count = 0;
+		do {
+			n = list->len;
+			x += list->xOff;
+			y += list->yOff;
+			while (n--) {
+				GlyphPtr g = *glyphs++;
+				const void *ptr;
+
+				if (g->info.width == 0 || g->info.height == 0)
+					goto next_pglyph;
+
+				ptr = pixman_glyph_cache_lookup(cache, g, NULL);
+				if (ptr == NULL) {
+					pixman_image_t *glyph_image;
+
+					glyph_image = sna_glyph_get_image(g, screen);
+					if (glyph_image == NULL)
+						goto next_pglyph;
+
+					ptr = pixman_glyph_cache_insert(cache, g, NULL,
+									g->info.x,
+									g->info.y,
+									glyph_image);
+					if (ptr == NULL)
+						goto next_pglyph;
+				}
+
+				pglyphs[count].x = x;
+				pglyphs[count].y = y;
+				pglyphs[count].glyph = ptr;
+				count++;
+
+next_pglyph:
+				x += g->info.xOff;
+				y += g->info.yOff;
+			}
+			list++;
+		} while (--nlist);
+
+		pixman_composite_glyphs_no_mask(PIXMAN_OP_ADD,
+						sna->render.white_image,
+						mask_image,
+						0, 0,
+						0, 0,
+						cache, count, pglyphs);
+		pixman_glyph_cache_thaw(cache);
+		if (pglyphs != stack_glyphs)
+			free(pglyphs);
+	} else
+#endif
+		do {
+			int n = list->len;
+			x += list->xOff;
+			y += list->yOff;
+			while (n--) {
+				GlyphPtr g = *glyphs++;
+				pixman_image_t *glyph_image;
+				int16_t xi, yi;
+
+				if (g->info.width == 0 || g->info.height == 0)
+					goto next_image;
+
+				/* If the mask has been cropped, it is likely
+				 * that some of the glyphs fall outside.
+				 */
+				xi = x - g->info.x;
+				yi = y - g->info.y;
+				if (xi >= width || yi >= height)
+					goto next_image;
+				if (xi + g->info.width  <= 0 ||
+				    yi + g->info.height <= 0)
+					goto next_image;
+
+				glyph_image =
+					sna_glyph_get_image(g, dst->pDrawable->pScreen);
+
+				DBG(("%s: glyph to mask (%d, %d)x(%d, %d)\n",
+				     __FUNCTION__,
+				     xi, yi,
+				     g->info.width,
+				     g->info.height));
+
+				if (list->format == format) {
+					assert(pixman_image_get_format(glyph_image) == pixman_image_get_format(mask_image));
+					pixman_image_composite(PictOpAdd,
+							       glyph_image,
+							       NULL,
+							       mask_image,
+							       0, 0,
+							       0, 0,
+							       xi, yi,
+							       g->info.width,
+							       g->info.height);
+				} else {
+					pixman_image_composite(PictOpAdd,
+							       sna->render.white_image,
+							       glyph_image,
+							       mask_image,
+							       0, 0,
+							       0, 0,
+							       xi, yi,
+							       g->info.width,
+							       g->info.height);
+				}
+
+next_image:
+				x += g->info.xOff;
+				y += g->info.yOff;
+			}
+			list++;
+		} while (--nlist);
+	pixman_image_unref(mask_image);
+
+	component_alpha = NeedsComponent(format->format);
+
+	mask = CreatePicture(0, &pixmap->drawable,
+			     format, CPComponentAlpha,
+			     &component_alpha, serverClient, &error);
+	if (!mask)
+		goto err_pixmap;
+
+	ValidatePicture(mask);
+
+	sna_composite(op,
+		      src, mask, dst,
+		      src_x, src_y,
+		      0, 0,
+		      box.x1, box.y1,
+		      width, height);
+	FreePicture(mask, 0);
+err_pixmap:
+	sna_pixmap_destroy(pixmap);
+	return TRUE;
+}
+
+void
+sna_glyphs__shared(CARD8 op,
+		   PicturePtr src,
+		   PicturePtr dst,
+		   PictFormatPtr mask,
+		   INT16 src_x, INT16 src_y,
+		   int nlist, GlyphListPtr list, GlyphPtr *glyphs)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
+
+	DBG(("%s(op=%d, nlist=%d, src=(%d, %d))\n",
+	     __FUNCTION__, op, nlist, src_x, src_y));
+
+	if (REGION_NUM_RECTS(dst->pCompositeClip) == 0)
+		return;
+
+	if (FALLBACK)
+		goto fallback;
+
+	if (!can_render(sna)) {
+		DBG(("%s: wedged\n", __FUNCTION__));
+		goto fallback;
+	}
+
+	if (dst->alphaMap) {
+		DBG(("%s: fallback -- dst alpha map\n", __FUNCTION__));
+		goto fallback;
+	}
+
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback -- destination unattached\n", __FUNCTION__));
+		goto fallback;
+	}
+
+	if ((too_small(priv) || DAMAGE_IS_ALL(priv->cpu_damage)) &&
+	    !picture_is_gpu(src)) {
+		DBG(("%s: fallback -- too small (%dx%d)\n",
+		     __FUNCTION__, dst->pDrawable->width, dst->pDrawable->height));
+		goto fallback;
+	}
+
+	if (!mask) {
+		mask = glyphs_format(nlist, list, glyphs);
+		DBG(("%s: substituting mask? %d\n", __FUNCTION__, mask!=NULL));
+	}
+	if (mask) {
+		if (glyphs_via_image(sna, op,
+				     src, dst, mask,
+				     src_x, src_y,
+				     nlist, list, glyphs))
+			return;
+	}
+
+fallback:
+	glyphs_fallback(op, src, dst, mask, src_x, src_y, nlist, list, glyphs);
+}
+
 void
 sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph)
 {
 	struct sna_glyph *priv = sna_glyph(glyph);
 
+	DBG(("%s: screen=%d, glyph(image?=%d, atlas?=%d)\n",
+	     __FUNCTION__, screen->myNum, !!priv->image, !!priv->atlas));
+
 	if (priv->image) {
 #if HAS_PIXMAN_GLYPHS
 		struct sna *sna = to_sna_from_screen(screen);
@@ -1713,6 +2016,8 @@ sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph)
 	if (priv->atlas) {
 		struct sna *sna = to_sna_from_screen(screen);
 		struct sna_glyph_cache *cache = &sna->render.glyph[priv->pos&1];
+		DBG(("%s: releasing glyph pos %d from cache %d\n",
+		     __FUNCTION__, priv->pos >> 1, priv->pos & 1));
 		assert(cache->glyphs[priv->pos >> 1] == priv);
 		cache->glyphs[priv->pos >> 1] = NULL;
 		priv->atlas = NULL;
commit cfa0c6162bbd2f6edab1301b6aed886af7e8ab38
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 10 10:50:03 2012 +0100

    sna: Fixup possible_crtcs for ZaphodHeads
    
    As the possible_crtcs is a bitmask of the available crtcs exposed to the
    Xserver, we need to adjust it for the limited view given by Zaphod mode.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index be5d983..a9c5440 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -804,8 +804,8 @@ static void update_flush_interval(struct sna *sna)
 			continue;
 		}
 
-		DBG(("%s: CRTC:%d (pipe %d) vrefresh=%d\n",
-		     __FUNCTION__,i, to_sna_crtc(crtc)->pipe,
+		DBG(("%s: CRTC:%d (pipe %d) vrefresh=%f\n",
+		     __FUNCTION__, i, to_sna_crtc(crtc)->pipe,
 		     xf86ModeVRefresh(&crtc->mode)));
 		max_vrefresh = max(max_vrefresh, xf86ModeVRefresh(&crtc->mode));
 	}
@@ -2192,7 +2192,6 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	struct drm_mode_get_encoder enc;
 	struct sna_output *sna_output;
 	const char *output_name;
-	const char *s;
 	char name[32];
 
 	koutput = drmModeGetConnector(sna->kgem.fd,
@@ -2212,9 +2211,23 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	snprintf(name, 32, "%s%d", output_name, koutput->connector_type_id);
 
 	if (xf86IsEntityShared(scrn->entityList[0])) {
-		s = xf86GetOptValString(sna->Options, OPTION_ZAPHOD);
-		if (s && !sna_zaphod_match(s, name))
+		const char *str;
+
+		str = xf86GetOptValString(sna->Options, OPTION_ZAPHOD);
+		if (str && !sna_zaphod_match(str, name))
+			goto cleanup_connector;
+
+		if ((enc.possible_crtcs & (1 << scrn->confScreen->device->screen)) == 0) {
+			if (str) {
+				xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+					   "%s is an invalid output for screen (pipe) %d\n",
+					   name, scrn->confScreen->device->screen);
+			}
 			goto cleanup_connector;
+		}
+
+		enc.possible_crtcs = 1;
+		enc.possible_clones = 0;
 	}
 
 	output = xf86OutputCreate(scrn, &sna_output_funcs, name);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a5c106e..a29227b 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -256,12 +256,15 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	dev = sna_device(scrn);
 	if (dev) {
 		dev->open_count++;
+		DBG(("%s: reusing device, count=%d\n",
+		     __FUNCTION__, dev->open_count));
 		return dev->fd;
 	}
 
 	snprintf(busid, sizeof(busid), "pci:%04x:%02x:%02x.%d",
 		 pci->domain, pci->bus, pci->dev, pci->func);
 
+	DBG(("%s: opening device '%s'\n",  __FUNCTION__, busid));
 	fd = drmOpen(NULL, busid);
 	if (fd == -1) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
@@ -1080,6 +1083,7 @@ static Bool sna_pm_event(SCRN_ARG_TYPE arg, pmEvent event, Bool undo)
 
 Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 {
+	DBG(("%s: entity_num=%d\n", __FUNCTION__, entity_num));
 #if defined(USE_GIT_DESCRIBE)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled from %s\n", git_version);
@@ -1099,8 +1103,6 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled with extra pixmap/damage validation\n");
 #endif
-
-	DBG(("%s\n", __FUNCTION__));
 	DBG(("pixman version: %s\n", pixman_version_string()));
 
 	if (sna_device_key == -1)
commit 95c71d1626dfbd97733e7e7e03fdde3af833446e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 8 20:12:32 2012 +0100

    NEWS: Fix version reference 'bugs since 2.20.6'!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 82cf4a4..8836a11 100644
--- a/NEWS
+++ b/NEWS
@@ -7,7 +7,7 @@ light of that advance, this release includes the support code by Dave
 Airlie to integrate PRIME into xf86-video-intel and make those new
 features available.
 
-Aside from landing PRIME, a few other bugs were fixed since 2.20.5:
+Aside from landing PRIME, a few other bugs were fixed since 2.20.6:
 
  * Fix framebuffer leak on server regeneration
 
commit 0659711dfa9c9fc52adeaba766049887d5b6f040
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 8 20:01:32 2012 +0100

    2.20.7 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 1291b51..82cf4a4 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,29 @@
+Release 2.20.7 (2012-09-08)
+===========================
+Continuing the flurry of releases, this week saw the release of
+xorg-1.13 bringing with it the first steps towards hotpluggable gpu
+support and the ability to offload DRI rendering onto other GPUs. In
+light of that advance, this release includes the support code by Dave
+Airlie to integrate PRIME into xf86-video-intel and make those new
+features available.
+
+Aside from landing PRIME, a few other bugs were fixed since 2.20.5:
+
+ * Fix framebuffer leak on server regeneration
+
+ * Fix texture cache flushing on IvyBridge and Kwin with plastique
+   https://bugs.freedesktop.org/show_bug.cgi?id=54488
+
+ * Redirect large solid fills on SandyBridge+. By large I mean greater
+   than 8192 or 16384 pixels on SandyBridge and IvyBridge respectively.
+   https://bugs.freedesktop.org/show_bug.cgi?id=54134
+
+ * Fix up backlight option handling in the manpage and uxa.
+   https://bugs.freedesktop.org/show_bug.cgi?id=54397
+
+ * Unbreak ZaphodHeads.
+   https://bugs.freedesktop.org/show_bug.cgi?id=52438
+
 Release 2.20.6 (2012-09-02)
 ===========================
 A serious bug that caused a crash on SandyBridge and IvyBridge when
diff --git a/configure.ac b/configure.ac
index c401a61..f94f140 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.6],
+        [2.20.7],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 2c8e48f8149499040342836491333402fd05b762
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 23:00:47 2012 +0100

    sna: Remember that LineDoubleDash involves updating the GC between segments
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9881050..7574847 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5088,6 +5088,13 @@ struct sna_fill_spans {
 };
 
 static void
+sna_poly_point__cpu(DrawablePtr drawable, GCPtr gc,
+		    int mode, int n, DDXPointPtr pt)
+{
+	fbPolyPoint(drawable, gc, mode, n, pt, -1);
+}
+
+static void
 sna_poly_point__fill(DrawablePtr drawable, GCPtr gc,
 		     int mode, int n, DDXPointPtr pt)
 {
@@ -5123,6 +5130,52 @@ sna_poly_point__fill(DrawablePtr drawable, GCPtr gc,
 }
 
 static void
+sna_poly_point__gpu(DrawablePtr drawable, GCPtr gc,
+		     int mode, int n, DDXPointPtr pt)
+{
+	struct sna_fill_spans *data = sna_gc(gc)->priv;
+	struct sna_fill_op fill;
+	BoxRec box[512];
+	DDXPointRec last;
+
+	if (!sna_fill_init_blt(&fill,
+			       data->sna, data->pixmap,
+			       data->bo, gc->alu, gc->fgPixel))
+		return;
+
+	DBG(("%s: count=%d\n", __FUNCTION__, n));
+
+	last.x = drawable->x;
+	last.y = drawable->y;
+	while (n) {
+		BoxRec *b = box;
+		unsigned nbox = n;
+		if (nbox > ARRAY_SIZE(box))
+			nbox = ARRAY_SIZE(box);
+		n -= nbox;
+		do {
+			*(DDXPointRec *)b = *pt++;
+
+			b->x1 += last.x;
+			b->y1 += last.y;
+			if (mode == CoordModePrevious)
+				last = *(DDXPointRec *)b;
+
+			if (RegionContainsPoint(&data->region,
+						b->x1, b->y1, NULL)) {
+				b->x1 += data->dx;
+				b->y1 += data->dy;
+				b->x2 = b->x1 + 1;
+				b->y2 = b->y1 + 1;
+				b++;
+			}
+		} while (--nbox);
+		fill.boxes(data->sna, &fill, box, b - box);
+	}
+	fill.done(data->sna, &fill);
+}
+
+static void
 sna_poly_point__fill_clip_extents(DrawablePtr drawable, GCPtr gc,
 				  int mode, int n, DDXPointPtr pt)
 {
@@ -9275,45 +9328,64 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 		get_drawable_deltas(drawable, data.pixmap, &data.dx, &data.dy);
 
 		if (gc_is_solid(gc, &color)) {
-			struct sna_fill_op fill;
+			sna_gc(gc)->priv = &data;
 
-			if (!sna_fill_init_blt(&fill,
-					       data.sna, data.pixmap,
-					       data.bo, gc->alu, color))
-				goto fallback;
+			assert(gc->miTranslate);
+			if (gc->lineStyle == LineSolid) {
+				struct sna_fill_op fill;
 
-			data.op = &fill;
-			sna_gc(gc)->priv = &data;
+				if (!sna_fill_init_blt(&fill,
+						       data.sna, data.pixmap,
+						       data.bo, gc->alu, color))
+					goto fallback;
 
-			if ((data.flags & 2) == 0) {
-				if (data.dx | data.dy)
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_offset;
+				if ((data.flags & 2) == 0) {
+					if (data.dx | data.dy)
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_offset;
+					else
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill;
+					sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill;
+				} else {
+					region_maybe_clip(&data.region,
+							  gc->pCompositeClip);
+					if (!RegionNotEmpty(&data.region))
+						return;
+
+					if (region_is_singular(&data.region)) {
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_extents;
+						sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill_clip_extents;
+					} else {
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_boxes;
+						sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill_clip_boxes;
+					}
+				}
+
+				data.op = &fill;
+				gc->ops = &sna_gc_ops__tmp;
+				if (gc->lineWidth == 0)
+					miZeroPolyArc(drawable, gc, n, arc);
 				else
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill;
-				sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill;
+					miPolyArc(drawable, gc, n, arc);
+				gc->ops = (GCOps *)&sna_gc_ops;
+
+				fill.done(data.sna, &fill);
 			} else {
 				region_maybe_clip(&data.region,
 						  gc->pCompositeClip);
 				if (!RegionNotEmpty(&data.region))
 					return;
 
-				if (region_is_singular(&data.region)) {
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_extents;
-					sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill_clip_extents;
-				} else {
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_boxes;
-					sna_gc_ops__tmp.PolyPoint = sna_poly_point__fill_clip_boxes;
-				}
+				sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
+				sna_gc_ops__tmp.PolyPoint = sna_poly_point__gpu;
+
+				gc->ops = &sna_gc_ops__tmp;
+				if (gc->lineWidth == 0)
+					miZeroPolyArc(drawable, gc, n, arc);
+				else
+					miPolyArc(drawable, gc, n, arc);
+				gc->ops = (GCOps *)&sna_gc_ops;
 			}
-			assert(gc->miTranslate);
-			gc->ops = &sna_gc_ops__tmp;
-			if (gc->lineWidth == 0)
-				miZeroPolyArc(drawable, gc, n, arc);
-			else
-				miPolyArc(drawable, gc, n, arc);
-			gc->ops = (GCOps *)&sna_gc_ops;
 
-			fill.done(data.sna, &fill);
 			if (data.damage) {
 				if (data.dx | data.dy)
 					pixman_region_translate(&data.region, data.dx, data.dy);
@@ -13431,7 +13503,7 @@ static const GCOps sna_gc_ops__cpu = {
 	fbPutImage,
 	fbCopyArea,
 	fbCopyPlane,
-	fbPolyPoint,
+	sna_poly_point__cpu,
 	fbPolyLine,
 	fbPolySegment,
 	miPolyRectangle,
commit ed7b8db24921dc0bb6ea59dacf35ea41a61d59bf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 22:12:17 2012 +0100

    sna: Pass the hints down to the PolyPoint fallback
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fb.h b/src/sna/fb/fb.h
index 215aec9..adefaa1 100644
--- a/src/sna/fb/fb.h
+++ b/src/sna/fb/fb.h
@@ -515,7 +515,9 @@ extern RegionPtr
 fbBitmapToRegion(PixmapPtr pixmap);
 
 extern void
-fbPolyPoint(DrawablePtr drawable, GCPtr gc, int mode, int n, xPoint *pt);
+fbPolyPoint(DrawablePtr drawable, GCPtr gc,
+	    int mode, int n, xPoint *pt,
+	    unsigned flags);
 
 extern void
 fbPushImage(DrawablePtr drawable, GCPtr gc,
diff --git a/src/sna/fb/fbpoint.c b/src/sna/fb/fbpoint.c
index e3974fc..3df79a2 100644
--- a/src/sna/fb/fbpoint.c
+++ b/src/sna/fb/fbpoint.c
@@ -25,21 +25,27 @@
 #include <micoord.h>
 
 #define DOTS	    fbDots8
+#define DOTS__SIMPLE fbDots8__simple
 #define BITS	    BYTE
 #include "fbpointbits.h"
 #undef BITS
+#undef DOTS__SIMPLE
 #undef DOTS
 
 #define DOTS	    fbDots16
+#define DOTS__SIMPLE fbDots16__simple
 #define BITS	    CARD16
 #include "fbpointbits.h"
 #undef BITS
+#undef DOTS__SIMPLE
 #undef DOTS
 
 #define DOTS	    fbDots32
+#define DOTS__SIMPLE fbDots32__simple
 #define BITS	    CARD32
 #include "fbpointbits.h"
 #undef BITS
+#undef DOTS__SIMPLE
 #undef DOTS
 
 static void
@@ -74,7 +80,7 @@ fbDots(FbBits *dstOrig, FbStride dstStride, int dstBpp,
 
 void
 fbPolyPoint(DrawablePtr drawable, GCPtr gc,
-	    int mode, int n, xPoint *pt)
+	    int mode, int n, xPoint *pt, unsigned flags)
 {
 	FbBits *dst;
 	FbStride dstStride;
@@ -97,16 +103,30 @@ fbPolyPoint(DrawablePtr drawable, GCPtr gc,
 
 	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
 	dots = fbDots;
-	switch (dstBpp) {
-	case 8:
-		dots = fbDots8;
-		break;
-	case 16:
-		dots = fbDots16;
-		break;
-	case 32:
-		dots = fbDots32;
-		break;
+	if ((flags & 2) == 0 && fb_gc(gc)->and == 0) {
+		switch (dstBpp) {
+		case 8:
+			dots = fbDots8__simple;
+			break;
+		case 16:
+			dots = fbDots16__simple;
+			break;
+		case 32:
+			dots = fbDots32__simple;
+			break;
+		}
+	} else {
+		switch (dstBpp) {
+		case 8:
+			dots = fbDots8;
+			break;
+		case 16:
+			dots = fbDots16;
+			break;
+		case 32:
+			dots = fbDots32;
+			break;
+		}
 	}
 	dots(dst, dstStride, dstBpp, gc->pCompositeClip, pt, n,
 	     drawable->x, drawable->y, dstXoff, dstYoff,
diff --git a/src/sna/fb/fbpointbits.h b/src/sna/fb/fbpointbits.h
index 2d71c1d..60bf488 100644
--- a/src/sna/fb/fbpointbits.h
+++ b/src/sna/fb/fbpointbits.h
@@ -106,5 +106,43 @@ DOTS(FbBits * dst,
 	}
 }
 
+static void
+DOTS__SIMPLE(FbBits * dst,
+	     FbStride dstStride,
+	     int dstBpp,
+	     RegionPtr region,
+	     xPoint * ptsOrig,
+	     int npt, int xorg, int yorg, int xoff, int yoff,
+	     FbBits and, FbBits xor)
+{
+	uint32_t *pts = (uint32_t *) ptsOrig;
+	BITS *bits = (BITS *) dst, *p;
+	BITS bxor = (BITS) xor;
+	FbStride bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
+
+	bits += bitsStride * (yorg + yoff) + (xorg + xoff);
+	while (npt >= 2) {
+		union {
+			uint32_t pt32[2];
+			uint64_t pt64;
+		} pt;
+		pt.pt64 = *(uint64_t *)pts;
+
+		p = bits + intToY(pt.pt32[0]) * bitsStride + intToX(pt.pt32[0]);
+		WRITE(p, bxor);
+
+		p = bits + intToY(pt.pt32[1]) * bitsStride + intToX(pt.pt32[1]);
+		WRITE(p, bxor);
+
+		pts += 2;
+		npt -= 2;
+	}
+	if (npt) {
+		uint32_t pt = *pts;
+		p = bits + intToY(pt) * bitsStride + intToX(pt);
+		WRITE(p, bxor);
+	}
+}
+
 #undef RROP
 #undef isClipped
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 792d007..9881050 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6696,7 +6696,7 @@ fallback:
 		goto out_gc;
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
-	fbPolyPoint(drawable, gc, mode, n, pt);
+	fbPolyPoint(drawable, gc, mode, n, pt, flags);
 	FALLBACK_FLUSH(drawable);
 out_gc:
 	sna_gc_move_to_gpu(gc);
commit 6c9df48fa8f1ac93103ce771ae28f47faa321619
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 22:05:19 2012 +0100

    sna: Fix PolyPoint fallback through a complex clip
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbpoint.c b/src/sna/fb/fbpoint.c
index d3f796e..e3974fc 100644
--- a/src/sna/fb/fbpoint.c
+++ b/src/sna/fb/fbpoint.c
@@ -26,26 +26,19 @@
 
 #define DOTS	    fbDots8
 #define BITS	    BYTE
-#define BITS2	    CARD16
-#define BITS4	    CARD32
 #include "fbpointbits.h"
 #undef BITS
-#undef BITS2
-#undef BITS4
 #undef DOTS
 
 #define DOTS	    fbDots16
 #define BITS	    CARD16
-#define BITS2	    CARD32
 #include "fbpointbits.h"
 #undef BITS
-#undef BITS2
 #undef DOTS
 
 #define DOTS	    fbDots32
 #define BITS	    CARD32
 #include "fbpointbits.h"
-#undef ARC
 #undef BITS
 #undef DOTS
 
@@ -93,16 +86,16 @@ fbPolyPoint(DrawablePtr drawable, GCPtr gc,
 		     int xorg, int yorg,
 		     int xoff, int yoff,
 		     FbBits and, FbBits xor);
-	FbBits and, xor;
 
-	DBG(("%s x %d\n", __FUNCTION__, n));
+	DBG(("%s x %d, clip=[(%d, %d), (%d, %d)]x%d\n", __FUNCTION__, n,
+	     gc->pCompositeClip->extents.x1, gc->pCompositeClip->extents.y1,
+	     gc->pCompositeClip->extents.x2, gc->pCompositeClip->extents.y2,
+	     RegionNumRects(gc->pCompositeClip)));
 
 	if (mode == CoordModePrevious)
 		fbFixCoordModePrevious(n, pt);
 
 	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
-	and = fb_gc(gc)->and;
-	xor = fb_gc(gc)->xor;
 	dots = fbDots;
 	switch (dstBpp) {
 	case 8:
@@ -116,5 +109,6 @@ fbPolyPoint(DrawablePtr drawable, GCPtr gc,
 		break;
 	}
 	dots(dst, dstStride, dstBpp, gc->pCompositeClip, pt, n,
-	     drawable->x, drawable->y, dstXoff, dstYoff, and, xor);
+	     drawable->x, drawable->y, dstXoff, dstYoff,
+	     fb_gc(gc)->and, fb_gc(gc)->xor);
 }
diff --git a/src/sna/fb/fbpointbits.h b/src/sna/fb/fbpointbits.h
index 40a25c6..2d71c1d 100644
--- a/src/sna/fb/fbpointbits.h
+++ b/src/sna/fb/fbpointbits.h
@@ -38,14 +38,13 @@ DOTS(FbBits * dst,
 	BITS band = (BITS) and;
 	FbStride bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
 
-	bits += bitsStride * (yorg + yoff) + (xorg + xoff);
-
 	if (region->data == NULL) {
 		INT32 ul = coordToInt(region->extents.x1 - xorg,
 				      region->extents.y1 - yorg);
 		INT32 lr = coordToInt(region->extents.x2 - xorg - 1,
 				      region->extents.y2 - yorg - 1);
 
+		bits += bitsStride * (yorg + yoff) + (xorg + xoff);
 		if (and == 0) {
 			while (npt >= 2) {
 				union {
@@ -82,23 +81,24 @@ DOTS(FbBits * dst,
 			}
 		}
 	} else {
+		bits += bitsStride * yoff + xoff;
 		if (and == 0) {
 			while (npt--) {
 				uint32_t pt = *pts++;
-				if (RegionContainsPoint(region,
-							intToX(pt), intToY(pt),
-							NULL)) {
-					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+				int x = intToX(pt) + xorg;
+				int y = intToY(pt) + yorg;
+				if (RegionContainsPoint(region, x, y, NULL)) {
+					BITS *point = bits + y * bitsStride + x;
 					WRITE(point, bxor);
 				}
 			}
 		} else {
 			while (npt--) {
 				uint32_t pt = *pts++;
-				if (RegionContainsPoint(region,
-							intToX(pt), intToY(pt),
-							NULL)) {
-					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+				int x = intToX(pt) + xorg;
+				int y = intToY(pt) + yorg;
+				if (RegionContainsPoint(region, x, y, NULL)) {
+					BITS *point = bits + y * bitsStride + x;
 					RROP(point, band, bxor);
 				}
 			}
commit 7d76eb155ed78343abfa15b7c8af87fc3d4211e4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 18:06:25 2012 +0100

    sna: First scan for a backlight associated with the device
    
    The goal is to find the right interface in a multi-GPU system.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index beafb3b..be5d983 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -318,8 +318,84 @@ has_user_backlight_override(xf86OutputPtr output)
 	return str;
 }
 
-static void
-sna_output_backlight_init(xf86OutputPtr output)
+static char *
+has_device_backlight(xf86OutputPtr output, int *best_type)
+{
+	struct sna_output *sna_output = output->driver_private;
+	struct sna *sna = to_sna(output->scrn);
+	struct pci_device *pci = sna->PciInfo;
+	char path[1024];
+	char *best_iface = NULL;
+	DIR *dir;
+	struct dirent *de;
+
+	snprintf(path, sizeof(path),
+		 "/sys/bus/pci/devices/%04x:%02x:%02x.%d/backlight",
+		 pci->domain, pci->bus, pci->dev, pci->func);
+
+	DBG(("%s: scanning %s\n", __FUNCTION__, path));
+	dir = opendir(path);
+	if (dir == NULL)
+		return NULL;
+
+	while ((de = readdir(dir))) {
+		char buf[100];
+		int fd, v;
+
+		if (*de->d_name == '.')
+			continue;
+
+		DBG(("%s: %s\n", __FUNCTION__, de->d_name));
+		snprintf(path, sizeof(path), "%s/%s/type",
+			 BACKLIGHT_CLASS, de->d_name);
+
+		v = -1;
+		fd = open(path, O_RDONLY);
+		if (fd >= 0) {
+			v = read(fd, buf, sizeof(buf)-1);
+			close(fd);
+		}
+		if (v > 0) {
+			while (v > 0 && isspace(buf[v-1]))
+				v--;
+			buf[v] = '\0';
+
+			if (strcmp(buf, "raw") == 0)
+				v = RAW;
+			else if (strcmp(buf, "platform") == 0)
+				v = PLATFORM;
+			else if (strcmp(buf, "firmware") == 0)
+				v = FIRMWARE;
+			else
+				v = INT_MAX;
+		} else
+			v = INT_MAX;
+
+		if (v < *best_type) {
+			char *copy;
+			int max;
+
+			sna_output->backlight_iface = de->d_name;
+			max = sna_output_backlight_get_max(output);
+			sna_output->backlight_iface = NULL;
+			if (max <= 0)
+				continue;
+
+			copy = strdup(de->d_name);
+			if (copy) {
+				free(best_iface);
+				best_iface = copy;
+				*best_type = v;
+			}
+		}
+	}
+	closedir(dir);
+
+	return best_iface;
+}
+
+static char *
+has_backlight(xf86OutputPtr output, int *best_type)
 {
 	static const char *known_interfaces[] = {
 		"gmux_backlight",
@@ -335,21 +411,14 @@ sna_output_backlight_init(xf86OutputPtr output)
 		"acpi_video0",
 		"intel_backlight",
 	};
-	MessageType from = X_PROBED;
 	struct sna_output *sna_output = output->driver_private;
-	char *best_iface;
-	int best_type;
+	char *best_iface = NULL;
 	DIR *dir;
 	struct dirent *de;
 
-	best_type = INT_MAX;
-	best_iface = has_user_backlight_override(output);
-	if (best_iface)
-		goto skip;
-
 	dir = opendir(BACKLIGHT_CLASS);
 	if (dir == NULL)
-		return;
+		return NULL;
 
 	while ((de = readdir(dir))) {
 		char path[1024];
@@ -394,7 +463,7 @@ sna_output_backlight_init(xf86OutputPtr output)
 			v += i;
 		}
 
-		if (v < best_type) {
+		if (v < *best_type) {
 			char *copy;
 			int max;
 
@@ -410,16 +479,39 @@ sna_output_backlight_init(xf86OutputPtr output)
 			if (copy) {
 				free(best_iface);
 				best_iface = copy;
-				best_type = v;
+				*best_type = v;
 			}
 		}
 	}
 	closedir(dir);
 
-	if (!best_iface)
-		return;
+	return best_iface;
+}
+
+static void
+sna_output_backlight_init(xf86OutputPtr output)
+{
+	struct sna_output *sna_output = output->driver_private;
+	MessageType from = X_PROBED;
+	char *best_iface;
+	int best_type;
 
-skip:
+	best_type = INT_MAX;
+	best_iface = has_user_backlight_override(output);
+	if (best_iface)
+		goto done;
+
+	best_iface = has_device_backlight(output, &best_type);
+	if (best_iface)
+		goto done;
+
+	best_iface = has_backlight(output, &best_type);
+	if (best_iface)
+		goto done;
+
+	return;
+
+done:
 	sna_output->backlight_iface = best_iface;
 	sna_output->backlight_max = sna_output_backlight_get_max(output);
 	sna_output->backlight_active_level = sna_output_backlight_get(output);
@@ -435,7 +527,6 @@ skip:
 		   sna_output->backlight_iface, best_iface);
 }
 
-
 static void
 mode_from_kmode(ScrnInfoPtr scrn,
 		drmModeModeInfoPtr kmode,
commit 2de7d2d8150d648815feb74a3ae2239b908b971e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 16:56:33 2012 +0100

    sna/prime: Fixup attaching the randr14 scanout pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6baa37c..792d007 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1020,7 +1020,11 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
 
-	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
+	DBG(("%s: pixmap=%ld, size=%dx%d, depth=%d/%d, stride=%d\n",
+	     __FUNCTION__, pixmap->drawable.serialNumber,
+	     pixmap->drawable.width, pixmap->drawable.height,
+	     pixmap->drawable.depth, pixmap->drawable.bitsPerPixel,
+	     pixmap->devKind));
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL)
@@ -13691,6 +13695,9 @@ static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 	if (sna->vblank_interval == 0)
 		return NULL;
 
+	if (sna->front == NULL)
+		return NULL;
+
 	priv = sna_pixmap(sna->front);
 	return priv && priv->gpu_bo ? priv : NULL;
 }
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ff24ed3..beafb3b 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -569,7 +569,6 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	arg.crtc_id = sna_crtc->id;
 	arg.fb_id = fb_id(sna_crtc->bo);
 	if (sna_crtc->transform) {
-		assert(sna_crtc->shadow);
 		arg.x = 0;
 		arg.y = 0;
 	} else {
@@ -714,6 +713,9 @@ static void update_flush_interval(struct sna *sna)
 			continue;
 		}
 
+		DBG(("%s: CRTC:%d (pipe %d) vrefresh=%d\n",
+		     __FUNCTION__,i, to_sna_crtc(crtc)->pipe,
+		     xf86ModeVRefresh(&crtc->mode)));
 		max_vrefresh = max(max_vrefresh, xf86ModeVRefresh(&crtc->mode));
 	}
 
@@ -942,7 +944,21 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 	struct kgem_bo *bo;
 
 	sna_crtc->transform = false;
-	if (use_shadow(sna, crtc)) {
+	if (sna_crtc->scanout_pixmap) {
+		DBG(("%s: attaching to scanout pixmap\n", __FUNCTION__));
+
+		bo = sna_pixmap_pin(sna_crtc->scanout_pixmap, PIN_SCANOUT);
+		if (bo == NULL)
+			return NULL;
+
+		if (!get_fb(sna, bo,
+			    sna_crtc->scanout_pixmap->drawable.width,
+			    sna_crtc->scanout_pixmap->drawable.height))
+			return NULL;
+
+		sna_crtc->transform = true;
+		return kgem_bo_reference(bo);
+	} else if (use_shadow(sna, crtc)) {
 		if (!sna_crtc_enable_shadow(sna, sna_crtc))
 			return NULL;
 
@@ -963,22 +979,6 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 
 		sna_crtc->transform = true;
 		return bo;
-	} else if (sna_crtc->scanout_pixmap) {
-		DBG(("%s: attaching to scanout pixmap\n", __FUNCTION__));
-		if (!sna_crtc_enable_shadow(sna, sna_crtc))
-			return NULL;
-
-		bo = sna_pixmap_pin(sna_crtc->scanout_pixmap, PIN_SCANOUT);
-		if (bo == NULL)
-			return NULL;
-
-		if (!get_fb(sna, bo,
-			    sna_crtc->scanout_pixmap->drawable.width,
-			    sna_crtc->scanout_pixmap->drawable.height))
-			return NULL;
-
-		sna_crtc->transform = true;
-		return kgem_bo_reference(bo);
 	} else if (sna->flags & SNA_TEAR_FREE) {
 		DBG(("%s: tear-free updates requested\n", __FUNCTION__));
 
commit 448808d8a4e78afa7fe90e318d6d7afdbff66190
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 14:31:52 2012 +0100

    prime: Allocate a GPU_SCREEN when not primary to fulfil the output sink role
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index a5d071b..b4f0ad2 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -538,14 +538,22 @@ intel_platform_probe(DriverPtr driver,
 {
 	ScrnInfoPtr scrn = NULL;
 	char *path = xf86_get_platform_device_attrib(dev, ODEV_ATTRIB_PATH);
+	unsigned scrn_flags = 0;
 
 	if (!dev->pdev)
 		return FALSE;
+
+	/* Allow ourselves to act as a slaved output if not primary */
+	if (flags & PLATFORM_PROBE_GPU_SCREEN) {
+		flags &= ~PLATFORM_PROBE_GPU_SCREEN;
+		scrn_flags |= XF86_ALLOCATE_GPU_SCREEN;
+	}
+
 	/* if we get any flags we don't understand fail to probe for now */
 	if (flags)
 		return FALSE;
 
-	scrn = xf86AllocateScreen(driver, 0);
+	scrn = xf86AllocateScreen(driver, scrn_flags);
 	if (scrn == NULL)
 		return FALSE;
 
commit 3dce661ef3df5558fe9c9d85324ab3395a37b7ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 13:48:12 2012 +0100

    sna: And remember to update the ScreenPixmap after resize
    
    Fixes the regression from the previous commit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f69ca97..ff24ed3 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2178,19 +2178,6 @@ sna_visit_set_window_pixmap(WindowPtr window, pointer data)
     return WT_DONTWALKCHILDREN;
 }
 
-static void
-sna_redirect_screen_pixmap(ScrnInfoPtr scrn, PixmapPtr old, PixmapPtr new)
-{
-	ScreenPtr screen = scrn->pScreen;
-	struct sna_visit_set_pixmap_window visit;
-
-	visit.old = old;
-	visit.new = new;
-	TraverseTree(screen->root, sna_visit_set_window_pixmap, &visit);
-
-	screen->SetScreenPixmap(new);
-}
-
 static void copy_front(struct sna *sna, PixmapPtr old, PixmapPtr new)
 {
 	struct sna_pixmap *old_priv, *new_priv;
@@ -2263,6 +2250,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 {
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
 	struct sna *sna = to_sna(scrn);
+	ScreenPtr screen = scrn->pScreen;
 	PixmapPtr old_front, new_front;
 	int i;
 
@@ -2274,16 +2262,15 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		return TRUE;
 
 	assert(sna->front);
-	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
+	assert(screen->GetScreenPixmap(screen) == sna->front);
 
 	DBG(("%s: creating new framebuffer %dx%d\n",
 	     __FUNCTION__, width, height));
 
 	old_front = sna->front;
-	new_front = scrn->pScreen->CreatePixmap(scrn->pScreen,
-						width, height,
-						scrn->depth,
-						SNA_CREATE_FB);
+	new_front = screen->CreatePixmap(screen,
+					 width, height, scrn->depth,
+					 SNA_CREATE_FB);
 	if (!new_front)
 		return FALSE;
 
@@ -2312,13 +2299,18 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 			sna_crtc_disable(crtc);
 	}
 
-	if (scrn->pScreen->root) {
-		sna_redirect_screen_pixmap(scrn, old_front, sna->front);
-		assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
-		assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
+	if (screen->root) {
+		struct sna_visit_set_pixmap_window visit;
+
+		visit.old = old_front;
+		visit.new = sna->front;
+		TraverseTree(screen->root, sna_visit_set_window_pixmap, &visit);
+		assert(screen->GetWindowPixmap(screen->root) == sna->front);
 	}
+	screen->SetScreenPixmap(sna->front);
+	assert(screen->GetScreenPixmap(screen) == sna->front);
 
-	scrn->pScreen->DestroyPixmap(old_front);
+	screen->DestroyPixmap(old_front);
 
 	return TRUE;
 }
commit 0c39c363d7235d03f27337c731c0ffe3369bf713
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 13:07:52 2012 +0100

    sna: Protect sna_crtc_resize() against early invocation with no RootWindow
    
    If the outputs are resized very early on, in CreateScreenResources, then
    we will not yet have created a RootWindow and so trying to change its
    pixmap is a futile effort.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index bde296d..f69ca97 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2266,24 +2266,24 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	PixmapPtr old_front, new_front;
 	int i;
 
-	DBG(("%s (%d, %d) -> (%d, %d)\n",
-	     __FUNCTION__,
+	DBG(("%s (%d, %d) -> (%d, %d)\n", __FUNCTION__,
 	     scrn->virtualX, scrn->virtualY,
 	     width, height));
 
 	if (scrn->virtualX == width && scrn->virtualY == height)
 		return TRUE;
 
+	assert(sna->front);
 	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
-	assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
+
 	DBG(("%s: creating new framebuffer %dx%d\n",
 	     __FUNCTION__, width, height));
 
 	old_front = sna->front;
 	new_front = scrn->pScreen->CreatePixmap(scrn->pScreen,
-						 width, height,
-						 scrn->depth,
-						 SNA_CREATE_FB);
+						width, height,
+						scrn->depth,
+						SNA_CREATE_FB);
 	if (!new_front)
 		return FALSE;
 
@@ -2312,9 +2312,11 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 			sna_crtc_disable(crtc);
 	}
 
-	sna_redirect_screen_pixmap(scrn, old_front, sna->front);
-	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
-	assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
+	if (scrn->pScreen->root) {
+		sna_redirect_screen_pixmap(scrn, old_front, sna->front);
+		assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
+		assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
+	}
 
 	scrn->pScreen->DestroyPixmap(old_front);
 
commit d7879a5939cabcd8b804e19fc422d2022ab7e3a4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 12:18:08 2012 +0100

    sna/dri: Fix the double-buffer pageflipping path
    
    Notably, we need to remember to exchange the front/back buffers after
    flipping!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 5d90653..bde296d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -132,6 +132,7 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	ScrnInfoPtr scrn = sna->scrn;
 	struct drm_mode_fb_cmd arg;
 
+	assert(bo->refcnt);
 	assert(bo->proxy == NULL);
 	if (bo->delta) {
 		DBG(("%s: reusing fb=%d for handle=%d\n",
@@ -2371,8 +2372,10 @@ disable:
 			continue;
 		}
 
-		kgem_bo_destroy(&sna->kgem, crtc->bo);
-		crtc->bo = kgem_bo_reference(bo);
+		if (crtc->bo != bo) {
+			kgem_bo_destroy(&sna->kgem, crtc->bo);
+			crtc->bo = kgem_bo_reference(bo);
+		}
 
 		count++;
 	}
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index de84ac7..8dc2f01 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1567,6 +1567,10 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 					 DRI2_EXCHANGE_COMPLETE,
 					 info->event_complete,
 					 info->event_data);
+		} else {
+			info->back->name = info->old_front.name;
+			get_private(info->back)->bo = info->old_front.bo;
+			info->old_front.bo = NULL;
 		}
 	} else {
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
commit 2630c81937115602faa352fca369d89fbb926b33
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Sep 7 11:23:02 2012 +0100

    sna/dri: Make sure we set the SCANOUT flag when creating bo for pageflips
    
    In case we should be creating an untiled framebuffer, we need to make
    sure we honour the scanout alignment.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6329246..de84ac7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -257,7 +257,7 @@ sna_dri_create_buffer(DrawablePtr draw,
 				    draw->height,
 				    draw->bitsPerPixel,
 				    color_tiling(sna, draw),
-				    CREATE_EXACT);
+				    CREATE_SCANOUT | CREATE_EXACT);
 		break;
 
 	case DRI2BufferStencil:
@@ -1558,7 +1558,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 					       draw->height,
 					       draw->bitsPerPixel,
 					       get_private(info->front)->bo->tiling,
-					       CREATE_EXACT);
+					       CREATE_SCANOUT | CREATE_EXACT);
 			info->back->name = kgem_bo_flink(&sna->kgem,
 							 get_private(info->back)->bo);
 			sna->dri.flip_pending = info;
@@ -2023,7 +2023,8 @@ blit:
 				    draw->width,
 				    draw->height,
 				    draw->bitsPerPixel,
-				    I915_TILING_X, CREATE_EXACT);
+				    get_private(info->front)->bo->tiling,
+				    CREATE_SCANOUT | CREATE_EXACT);
 		name = kgem_bo_flink(&sna->kgem, bo);
 	}
 	get_private(info->back)->bo = bo;
commit aeef03520f1b2a4b8638ff1ed4fbf19c9b35e7c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 11:46:53 2012 +0100

    sna/prime: Correct the pinning flag when importing the prime bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 678f687..6baa37c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1046,7 +1046,7 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 	priv->stride = pixmap->devKind;
 
 	priv->gpu_bo = bo;
-	priv->pinned |= PIN_DRI;
+	priv->pinned |= PIN_PRIME;
 
 	close((intptr_t)fd_handle);
 	return TRUE;
commit 9bdf46d6c1a29b7b9e3d35e39a2e789334c0e1be
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 11:38:08 2012 +0100

    sna/prime: Distinguish forms of pinned pixmap
    
    So that we can allow prime to replace the backing bo of currently
    exported pixmaps through DRI2.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index b876061..44e7f6e 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -123,7 +123,10 @@ struct sna_pixmap {
 
 #define SOURCE_BIAS 4
 	uint16_t source_count;
-	uint8_t pinned :1;
+	uint8_t pinned :3;
+#define PIN_SCANOUT 0x1
+#define PIN_DRI 0x2
+#define PIN_PRIME 0x4
 	uint8_t mapped :1;
 	uint8_t shm :1;
 	uint8_t clear :1;
@@ -542,7 +545,7 @@ static inline struct kgem_bo *sna_pixmap_get_bo(PixmapPtr pixmap)
 	return sna_pixmap(pixmap)->gpu_bo;
 }
 
-static inline struct kgem_bo *sna_pixmap_pin(PixmapPtr pixmap)
+static inline struct kgem_bo *sna_pixmap_pin(PixmapPtr pixmap, unsigned flags)
 {
 	struct sna_pixmap *priv;
 
@@ -550,7 +553,7 @@ static inline struct kgem_bo *sna_pixmap_pin(PixmapPtr pixmap)
 	if (!priv)
 		return NULL;
 
-	priv->pinned = 1;
+	priv->pinned |= flags;
 	return priv->gpu_bo;
 }
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bb0bc14..678f687 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -937,39 +937,63 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 	if (priv == NULL)
 		return FALSE;
 
+	assert(!priv->shm);
 	assert(priv->gpu_bo);
 	assert(priv->stride);
 
 	/* XXX negotiate format and stride restrictions */
-	if (priv->gpu_bo->tiling &&
-	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) {
-		DBG(("%s: failed to change tiling for export\n", __FUNCTION__));
-		return FALSE;
-	}
-
-	/* nvidia requires a minimum pitch alignment of 256 */
-	if (priv->gpu_bo->pitch & 255) {
+	if (priv->gpu_bo->tiling != I915_TILING_NONE ||
+	    priv->gpu_bo->pitch & 255) {
 		struct kgem_bo *bo;
+		BoxRec box;
 
-		if (priv->pinned) {
-			DBG(("%s: failed to change pitch for export, pinned!\n", __FUNCTION__));
+		DBG(("%s: removing tiling %d, and aligning pitch  for %dx%d pixmap=%ld\n",
+		     __FUNCTION__, priv->gpu_bo->tiling,
+		     pixmap->drawable.width, pixmap->drawable.height,
+		     pixmap->drawable.serialNumber));
+
+		if (priv->pinned & ~(PIN_DRI | PIN_PRIME)) {
+			DBG(("%s: can't convert pinned bo\n", __FUNCTION__));
 			return FALSE;
 		}
 
-		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
-				     pixmap->drawable.width,
-				     pixmap->drawable.height,
-				     ALIGN(priv->gpu_bo->pitch, 256),
-				     pixmap->drawable.bitsPerPixel);
+		assert_pixmap_damage(pixmap);
+
+		bo = kgem_create_2d(&sna->kgem,
+				    pixmap->drawable.width,
+				    pixmap->drawable.height,
+				    pixmap->drawable.bitsPerPixel,
+				    I915_TILING_NONE,
+				    CREATE_GTT_MAP | CREATE_PRIME);
 		if (bo == NULL) {
-			DBG(("%s: failed to change pitch for export\n", __FUNCTION__));
+			DBG(("%s: allocation failed\n", __FUNCTION__));
+			return FALSE;
+		}
+
+		box.x1 = box.y1 = 0;
+		box.x2 = pixmap->drawable.width;
+		box.y2 = pixmap->drawable.height;
+
+		assert(!wedged(sna)); /* XXX */
+		if (!sna->render.copy_boxes(sna, GXcopy,
+					    pixmap, priv->gpu_bo, 0, 0,
+					    pixmap, bo, 0, 0,
+					    &box, 1, 0)) {
+			DBG(("%s: copy failed\n", __FUNCTION__));
+			kgem_bo_destroy(&sna->kgem, bo);
 			return FALSE;
 		}
 
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = bo;
+
+		if (priv->mapped) {
+			pixmap->devPrivate.ptr = NULL;
+			priv->mapped = false;
+		}
 	}
 	assert(priv->gpu_bo->tiling == I915_TILING_NONE);
+	assert((priv->gpu_bo->pitch & 255) == 0);
 
 	/* And export the bo->pitch via pixmap->devKind */
 	pixmap->devPrivate.ptr = kgem_bo_map__async(&sna->kgem, priv->gpu_bo);
@@ -983,7 +1007,7 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 	if (fd == -1)
 		return FALSE;
 
-	priv->pinned = true;
+	priv->pinned |= PIN_PRIME;
 
 	*fd_handle = (void *)(intptr_t)fd;
 	return TRUE;
@@ -1022,7 +1046,7 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 	priv->stride = pixmap->devKind;
 
 	priv->gpu_bo = bo;
-	priv->pinned = true;
+	priv->pinned |= PIN_DRI;
 
 	close((intptr_t)fd_handle);
 	return TRUE;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 59e128b..5d90653 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -967,7 +967,7 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 		if (!sna_crtc_enable_shadow(sna, sna_crtc))
 			return NULL;
 
-		bo = sna_pixmap_pin(sna_crtc->scanout_pixmap);
+		bo = sna_pixmap_pin(sna_crtc->scanout_pixmap, PIN_SCANOUT);
 		if (bo == NULL)
 			return NULL;
 
@@ -1009,7 +1009,7 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 	} else {
 		DBG(("%s: attaching to framebuffer\n", __FUNCTION__));
 		sna_crtc_disable_shadow(sna, sna_crtc);
-		bo = sna_pixmap_pin(sna->front);
+		bo = sna_pixmap_pin(sna->front, PIN_SCANOUT);
 		if (bo == NULL)
 			return NULL;
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index e28ea4a..6329246 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -188,7 +188,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	sna_accel_watch_flush(sna, 1);
 
 	/* Don't allow this named buffer to be replaced */
-	priv->pinned = 1;
+	priv->pinned |= PIN_DRI;
 
 	return priv->gpu_bo;
 }
@@ -368,7 +368,7 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 				     pixmap == sna->front));
 				list_del(&priv->list);
 				sna_accel_watch_flush(sna, -1);
-				priv->pinned = pixmap == sna->front;
+				priv->pinned &= ~PIN_DRI;
 			}
 
 			sna_pixmap_set_buffer(pixmap, NULL);
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index f7331a5..7a96fab 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -219,7 +219,7 @@ bool sna_glyphs_create(struct sna *sna)
 		priv = sna_pixmap(pixmap);
 		if (priv != NULL) {
 			/* Prevent the cache from ever being paged out */
-			priv->pinned = true;
+			priv->pinned = PIN_SCANOUT;
 
 			component_alpha = NeedsComponent(pPictFormat->format);
 			picture = CreatePicture(0, &pixmap->drawable, pPictFormat,
commit 39c4be2b0be66ed83bf059d3007f4143325e340d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 11:28:49 2012 +0100

    uxa: Distinguish forms of pinning
    
    This allows us to replace the prime bo to meet sharing requirements, but
    still maintain the integrity with other users.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index 5b0c5df..48ec386 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -94,7 +94,10 @@ struct intel_pixmap {
 	int8_t busy :2;
 	uint8_t dirty :1;
 	uint8_t offscreen :1;
-	uint8_t pinned :1;
+	uint8_t pinned :3;
+#define PIN_SCANOUT 0x1
+#define PIN_DRI 0x2
+#define PIN_GLAMOR 0x4
 };
 
 #if HAS_DEVPRIVATEKEYREC
diff --git a/src/intel_dri.c b/src/intel_dri.c
index a1bac9a..8cab73f 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -90,7 +90,7 @@ static uint32_t pixmap_flink(PixmapPtr pixmap)
 	if (dri_bo_flink(priv->bo, &name) != 0)
 		return 0;
 
-	priv->pinned = 1;
+	priv->pinned |= PIN_DRI;
 	return name;
 }
 
diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index 53043dd..55eb2de 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -128,7 +128,7 @@ intel_glamor_create_textured_pixmap(PixmapPtr pixmap)
 	if (glamor_egl_create_textured_pixmap(pixmap, priv->bo->handle,
 					      priv->stride)) {
 		drm_intel_bo_disable_reuse(priv->bo);
-		priv->pinned = 1;
+		priv->pinned |= PIN_GLAMOR;
 		return TRUE;
 	} else
 		return FALSE;
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 2148102..6d202c7 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -1158,7 +1158,7 @@ Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 
 	pixmap = screen->GetScreenPixmap(screen);
 	intel_set_pixmap_bo(pixmap, bo);
-	intel_get_pixmap_private(pixmap)->pinned = 1;
+	intel_get_pixmap_private(pixmap)->pinned |= PIN_SCANOUT;
 	screen->ModifyPixmapHeader(pixmap,
 				   scrn->virtualX,
 				   scrn->virtualY,
@@ -1191,7 +1191,7 @@ intel_uxa_share_pixmap_backing(PixmapPtr ppix, ScreenPtr slave, void **fd_handle
 	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
 
 	if (tiling == I915_TILING_X) {
-		if (priv->pinned)
+		if (priv->pinned & ~PIN_DRI)
 			return FALSE;
 
 	        tiling = I915_TILING_NONE;
@@ -1215,7 +1215,7 @@ intel_uxa_share_pixmap_backing(PixmapPtr ppix, ScreenPtr slave, void **fd_handle
 	}
 	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
 	drm_intel_bo_gem_export_to_prime(bo, &handle);
-	priv->pinned = 1;
+	priv->pinned |= PIN_DRI;
 
 	*fd_handle = (void *)(long)handle;
 	return TRUE;
commit 031fa1c1c1129e486a02ffb5b3b9071f03b60048
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 11:08:23 2012 +0100

    uxa/prime: Mark the pixmap as pinned once exported (similar to flink)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 452db6e..2148102 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -1191,6 +1191,9 @@ intel_uxa_share_pixmap_backing(PixmapPtr ppix, ScreenPtr slave, void **fd_handle
 	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
 
 	if (tiling == I915_TILING_X) {
+		if (priv->pinned)
+			return FALSE;
+
 	        tiling = I915_TILING_NONE;
 
 		size = intel_uxa_pixmap_compute_size(ppix, ppix->drawable.width, ppix->drawable.height, &tiling, &stride, INTEL_CREATE_PIXMAP_DRI2);
@@ -1212,6 +1215,7 @@ intel_uxa_share_pixmap_backing(PixmapPtr ppix, ScreenPtr slave, void **fd_handle
 	}
 	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
 	drm_intel_bo_gem_export_to_prime(bo, &handle);
+	priv->pinned = 1;
 
 	*fd_handle = (void *)(long)handle;
 	return TRUE;
commit e5d3f3818660cb875ce2ef4721c51bc95317b78d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 10:54:57 2012 +0100

    sna: Add is-pinned checks to changing the pitch on an existing bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0eaa67a..bb0bc14 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -942,20 +942,29 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 
 	/* XXX negotiate format and stride restrictions */
 	if (priv->gpu_bo->tiling &&
-	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE))
+	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) {
+		DBG(("%s: failed to change tiling for export\n", __FUNCTION__));
 		return FALSE;
+	}
 
 	/* nvidia requires a minimum pitch alignment of 256 */
 	if (priv->gpu_bo->pitch & 255) {
 		struct kgem_bo *bo;
 
+		if (priv->pinned) {
+			DBG(("%s: failed to change pitch for export, pinned!\n", __FUNCTION__));
+			return FALSE;
+		}
+
 		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 				     pixmap->drawable.width,
 				     pixmap->drawable.height,
 				     ALIGN(priv->gpu_bo->pitch, 256),
 				     pixmap->drawable.bitsPerPixel);
-		if (bo == NULL)
+		if (bo == NULL) {
+			DBG(("%s: failed to change pitch for export\n", __FUNCTION__));
 			return FALSE;
+		}
 
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = bo;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index ff18fe3..e28ea4a 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -362,6 +362,10 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 
 			/* Undo the DRI markings on this pixmap */
 			if (priv->flush && --priv->flush == 0) {
+				DBG(("%s: releasing last DRI pixmap=%ld, scanout?=%d\n",
+				     __FUNCTION__,
+				     pixmap->drawable.serialNumber,
+				     pixmap == sna->front));
 				list_del(&priv->list);
 				sna_accel_watch_flush(sna, -1);
 				priv->pinned = pixmap == sna->front;
commit f2affe403baea78b9c94e3d726d1b9d8a0004f35
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 10:24:04 2012 +0100

    sna: Apply the minimum 256 pitch to CREATE_USAGE_SHARED pixmaps as well
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4b1738e..902cba7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -984,12 +984,21 @@ static uint32_t kgem_get_unique_id(struct kgem *kgem)
 	return id;
 }
 
+inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags)
+{
+	if (flags & CREATE_PRIME)
+		return 256;
+	if (flags & CREATE_SCANOUT)
+		return 64;
+	return kgem->min_alignment;
+}
+
 static uint32_t kgem_untiled_pitch(struct kgem *kgem,
 				   uint32_t width, uint32_t bpp,
-				   bool scanout)
+				   unsigned flags)
 {
 	width = ALIGN(width, 2) * bpp >> 3;
-	return ALIGN(width, scanout ? 64 : kgem->min_alignment);
+	return ALIGN(width, kgem_pitch_alignment(kgem, flags));
 }
 
 void kgem_get_tile_size(struct kgem *kgem, int tiling,
@@ -1033,7 +1042,7 @@ void kgem_get_tile_size(struct kgem *kgem, int tiling,
 
 static uint32_t kgem_surface_size(struct kgem *kgem,
 				  bool relaxed_fencing,
-				  bool scanout,
+				  unsigned flags,
 				  uint32_t width,
 				  uint32_t height,
 				  uint32_t bpp,
@@ -1058,7 +1067,7 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 		} else {
 			tile_width = 2 * bpp >> 3;
 			tile_width = ALIGN(tile_width,
-					   scanout ? 64 : kgem->min_alignment);
+					   kgem_pitch_alignment(kgem, flags));
 			tile_height = 2;
 		}
 	} else switch (tiling) {
@@ -1066,7 +1075,7 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 	case I915_TILING_NONE:
 		tile_width = 2 * bpp >> 3;
 		tile_width = ALIGN(tile_width,
-				   scanout ? 64 : kgem->min_alignment);
+				   kgem_pitch_alignment(kgem, flags));
 		tile_height = 2;
 		break;
 	case I915_TILING_X:
@@ -2991,7 +3000,7 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 		return 0;
 	}
 
-	size = kgem_surface_size(kgem, false, false,
+	size = kgem_surface_size(kgem, false, 0,
 				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
 	if (size > 0 && size <= kgem->max_cpu_size)
@@ -3006,7 +3015,7 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 		return 0;
 	}
 
-	size = kgem_surface_size(kgem, false, false,
+	size = kgem_surface_size(kgem, false, 0,
 				 width, height, bpp,
 				 kgem_choose_tiling(kgem, I915_TILING_X,
 						    width, height, bpp),
@@ -3059,18 +3068,17 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	if (tiling < 0)
 		tiling = -tiling, flags |= CREATE_EXACT;
 
-	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, temp?=%d)\n", __FUNCTION__,
+	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
 	     width, height, bpp, tiling,
 	     !!(flags & CREATE_EXACT),
 	     !!(flags & CREATE_INACTIVE),
 	     !!(flags & CREATE_CPU_MAP),
 	     !!(flags & CREATE_GTT_MAP),
 	     !!(flags & CREATE_SCANOUT),
+	     !!(flags & CREATE_PRIME),
 	     !!(flags & CREATE_TEMPORARY)));
 
-	size = kgem_surface_size(kgem,
-				 kgem->has_relaxed_fencing,
-				 flags & CREATE_SCANOUT,
+	size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
 				 width, height, bpp, tiling, &pitch);
 	assert(size && size <= kgem->max_object_size);
 	size /= PAGE_SIZE;
@@ -3084,9 +3092,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			goto create;
 
 		tiled_height = kgem_aligned_height(kgem, height, I915_TILING_Y);
-		untiled_pitch = kgem_untiled_pitch(kgem,
-						   width, bpp,
-						   flags & CREATE_SCANOUT);
+		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
 
 		list_for_each_entry(bo, &kgem->large, list) {
 			assert(!bo->purged);
@@ -3289,14 +3295,10 @@ search_again:
 	}
 
 	if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
-		untiled_pitch = kgem_untiled_pitch(kgem,
-						   width, bpp,
-						   flags & CREATE_SCANOUT);
+		untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
 		i = tiling;
 		while (--i >= 0) {
-			tiled_height = kgem_surface_size(kgem,
-							 kgem->has_relaxed_fencing,
-							 flags & CREATE_SCANOUT,
+			tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
 							 width, height, bpp, tiling, &pitch);
 			cache = active(kgem, tiled_height / PAGE_SIZE, i);
 			tiled_height = kgem_aligned_height(kgem, height, i);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0d5384b..1dc9c67 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -242,9 +242,10 @@ enum {
 	CREATE_CPU_MAP = 0x4,
 	CREATE_GTT_MAP = 0x8,
 	CREATE_SCANOUT = 0x10,
-	CREATE_TEMPORARY = 0x20,
-	CREATE_NO_RETIRE = 0x40,
-	CREATE_NO_THROTTLE = 0x40,
+	CREATE_PRIME = 0x20,
+	CREATE_TEMPORARY = 0x40,
+	CREATE_NO_RETIRE = 0x80,
+	CREATE_NO_THROTTLE = 0x100,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c00b5b8..0eaa67a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1020,7 +1020,8 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 }
 
 static PixmapPtr
-sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen, int depth)
+sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen,
+			 int width, int height, int depth)
 {
 	PixmapPtr pixmap;
 	struct sna_pixmap *priv;
@@ -1032,8 +1033,6 @@ sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen, int depth)
 	if (pixmap == NullPixmap)
 		return NullPixmap;
 
-	pixmap->drawable.width = 0;
-	pixmap->drawable.height = 0;
 	pixmap->devKind = 0;
 	pixmap->devPrivate.ptr = NULL;
 
@@ -1045,6 +1044,42 @@ sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen, int depth)
 
 	priv->stride = 0;
 	priv->create = 0;
+
+	if (width|height) {
+		int bpp = bits_per_pixel(depth);
+
+		priv->gpu_bo = kgem_create_2d(&sna->kgem,
+					      width, height, bpp,
+					      I915_TILING_NONE,
+					      CREATE_GTT_MAP | CREATE_PRIME);
+		if (priv->gpu_bo == NULL) {
+			free(priv);
+			FreePixmap(pixmap);
+			return NullPixmap;
+		}
+
+		/* minimal interface for sharing is linear, 256 byte pitch */
+		assert(priv->gpu_bo->tiling == I915_TILING_NONE);
+		assert((priv->gpu_bo->pitch & 255) == 0);
+
+		pixmap->devPrivate.ptr =
+			kgem_bo_map__async(&sna->kgem, priv->gpu_bo);
+		if (pixmap->devPrivate.ptr == NULL) {
+			free(priv);
+			FreePixmap(pixmap);
+			return FALSE;
+		}
+
+		pixmap->devKind = priv->gpu_bo->pitch;
+		pixmap->drawable.width = width;
+		pixmap->drawable.height = height;
+
+		priv->stride = priv->gpu_bo->pitch;
+		priv->mapped = true;
+
+		sna_damage_all(&priv->gpu_damage, width, height);
+	}
+
 	return pixmap;
 }
 #endif
@@ -1061,23 +1096,18 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
-	if ((width|height) == 0) {
 #ifdef CREATE_PIXMAP_USAGE_SHARED
-		if (usage == CREATE_PIXMAP_USAGE_SHARED)
-			return sna_create_pixmap_shared(sna, screen, depth);
+	if (usage == CREATE_PIXMAP_USAGE_SHARED)
+		return sna_create_pixmap_shared(sna, screen,
+						width, height, depth);
 #endif
+
+	if ((width|height) == 0) {
 		usage = -1;
 		goto fallback;
 	}
 	assert(width && height);
 
-#ifdef CREATE_PIXMAP_USAGE_SHARED
-	if (usage == CREATE_PIXMAP_USAGE_SHARED)
-		return sna_pixmap_create_scratch(screen,
-						 width, height, depth,
-						 I915_TILING_NONE);
-#endif
-
 	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
 	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
commit fbe4080816191ee97347128444f56bcc34858481
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 08:57:34 2012 +0100

    prime: Align pitch of shared untiled buffers to 256 bytes
    
    In order for nvidia to handle the buffers we are currently generating,
    they need to have a pitch alignment of 256 bytes. Make it so.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 9bb0e0a..452db6e 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -228,11 +228,14 @@ intel_uxa_pixmap_compute_size(PixmapPtr pixmap,
 	}
 
 	if (*tiling == I915_TILING_NONE) {
+		/* We only require a 64 byte alignment for scanouts, but
+		 * a 256 byte alignment for sharing with PRIME.
+		 */
+		*stride = ALIGN(pitch, 256);
 		/* Round the height up so that the GPU's access to a 2x2 aligned
 		 * subspan doesn't address an invalid page offset beyond the
 		 * end of the GTT.
 		 */
-		*stride = ALIGN(pitch, 64);
 		size = *stride * ALIGN(h, 2);
 	}
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1a02db3..c00b5b8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -944,6 +944,22 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 	if (priv->gpu_bo->tiling &&
 	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE))
 		return FALSE;
+
+	/* nvidia requires a minimum pitch alignment of 256 */
+	if (priv->gpu_bo->pitch & 255) {
+		struct kgem_bo *bo;
+
+		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
+				     pixmap->drawable.width,
+				     pixmap->drawable.height,
+				     ALIGN(priv->gpu_bo->pitch, 256),
+				     pixmap->drawable.bitsPerPixel);
+		if (bo == NULL)
+			return FALSE;
+
+		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+		priv->gpu_bo = bo;
+	}
 	assert(priv->gpu_bo->tiling == I915_TILING_NONE);
 
 	/* And export the bo->pitch via pixmap->devKind */
commit c5fb025267bf1019907dfcc60f0efef2691be436
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Sep 6 08:55:46 2012 +0100

    sna: Correct assertions for adding damage that bypasses the region
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 0390bf9..d726b61 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -658,6 +658,7 @@ fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(RegionNumRects(&damage->region));
 	assert(damage->region.extents.x2 > damage->region.extents.x1);
 	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
@@ -741,8 +742,10 @@ struct sna_damage *_sna_damage_add_boxes(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
-	assert(damage->region.extents.x2 > damage->region.extents.x1);
-	assert(damage->region.extents.y2 > damage->region.extents.y1);
+	if (RegionNumRects(&damage->region)) {
+		assert(damage->region.extents.x2 > damage->region.extents.x1);
+		assert(damage->region.extents.y2 > damage->region.extents.y1);
+	}
 
 	return damage;
 }
@@ -828,8 +831,10 @@ struct sna_damage *_sna_damage_add_rectangles(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
-	assert(damage->region.extents.x2 > damage->region.extents.x1);
-	assert(damage->region.extents.y2 > damage->region.extents.y1);
+	if (RegionNumRects(&damage->region)) {
+		assert(damage->region.extents.x2 > damage->region.extents.x1);
+		assert(damage->region.extents.y2 > damage->region.extents.y1);
+	}
 
 	return damage;
 }
@@ -912,8 +917,10 @@ struct sna_damage *_sna_damage_add_points(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
-	assert(damage->region.extents.x2 > damage->region.extents.x1);
-	assert(damage->region.extents.y2 > damage->region.extents.y1);
+	if (RegionNumRects(&damage->region)) {
+		assert(damage->region.extents.x2 > damage->region.extents.x1);
+		assert(damage->region.extents.y2 > damage->region.extents.y1);
+	}
 
 	return damage;
 }
@@ -940,6 +947,7 @@ fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(RegionNumRects(&damage->region));
 	assert(damage->region.extents.x2 > damage->region.extents.x1);
 	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
commit 1f5d5a37e57e63fa6e5b336a4847ce4422b89044
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 17:17:58 2012 +0100

    Add PlatformProbe to handle sharing of device entities
    
    Reported-by: Nick Bowler <nbowler at draconx.ca>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54561
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index d764dbb..a5d071b 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -546,6 +546,14 @@ intel_platform_probe(DriverPtr driver,
 		return FALSE;
 
 	scrn = xf86AllocateScreen(driver, 0);
+	if (scrn == NULL)
+		return FALSE;
+
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "using device path '%s'\n", path ? path : "Default device");
+
+	if (xf86IsEntitySharable(entity_num))
+		xf86SetEntityShared(entity_num);
 	xf86AddEntityToScreen(scrn, entity_num);
 
 	scrn->driverVersion = INTEL_VERSION;
@@ -553,20 +561,16 @@ intel_platform_probe(DriverPtr driver,
 	scrn->name = INTEL_NAME;
 	scrn->driverPrivate = (void *)(match_data | 1);
 	scrn->Probe = NULL;
+
 	switch (get_accel_method()) {
 #if USE_SNA
-        case SNA: sna_init_scrn(scrn, entity_num); break;
+        case SNA: return sna_init_scrn(scrn, entity_num);
 #endif
-
 #if USE_UXA
-        case UXA: intel_init_scrn(scrn); break;
+        case UXA: return intel_init_scrn(scrn);
 #endif
 	default: return FALSE;
 	}
-
-	xf86DrvMsg(scrn->scrnIndex, X_INFO,
-		   "using drv %s\n", path ? path : "Default device");
-	return scrn != NULL;
 }
 #endif
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index f0db471..a5c106e 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -226,7 +226,7 @@ struct sna_device {
 	int fd;
 	int open_count;
 };
-static int sna_device_key;
+static int sna_device_key = -1;
 
 static inline struct sna_device *sna_device(ScrnInfoPtr scrn)
 {
@@ -1080,8 +1080,6 @@ static Bool sna_pm_event(SCRN_ARG_TYPE arg, pmEvent event, Bool undo)
 
 Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 {
-	EntityInfoPtr entity;
-
 #if defined(USE_GIT_DESCRIBE)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled from %s\n", git_version);
@@ -1105,7 +1103,8 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	DBG(("%s\n", __FUNCTION__));
 	DBG(("pixman version: %s\n", pixman_version_string()));
 
-	sna_device_key = xf86AllocateEntityPrivateIndex();
+	if (sna_device_key == -1)
+		sna_device_key = xf86AllocateEntityPrivateIndex();
 
 	scrn->PreInit = sna_pre_init;
 	scrn->ScreenInit = sna_screen_init;
@@ -1119,15 +1118,9 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 
 	scrn->ModeSet = sna_mode_set;
 
-	entity = xf86GetEntityInfo(entity_num);
-	if (!entity)
-		return FALSE;
-
 	xf86SetEntitySharable(entity_num);
-	xf86SetEntityInstanceForScreen(scrn,
-				       entity->index,
-				       xf86GetNumEntityInstances(entity->index)-1);
-	free(entity);
+	xf86SetEntityInstanceForScreen(scrn, entity_num,
+				       xf86GetNumEntityInstances(entity_num)-1);
 
 	return TRUE;
 }
commit ddde40afc081f47a3b6b694aeb21682c240c9562
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 16:48:01 2012 +0100

    sna: Harden against initialisation failures
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=54561
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index cfd92e9..d764dbb 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -561,7 +561,7 @@ intel_platform_probe(DriverPtr driver,
 #if USE_UXA
         case UXA: intel_init_scrn(scrn); break;
 #endif
-	default: break;
+	default: return FALSE;
 	}
 
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index b814e1f..f0db471 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -230,6 +230,9 @@ static int sna_device_key;
 
 static inline struct sna_device *sna_device(ScrnInfoPtr scrn)
 {
+	if (scrn->entityList == NULL)
+		return NULL;
+
 	return xf86GetEntityPrivate(scrn->entityList[0], sna_device_key)->ptr;
 }
 
@@ -963,12 +966,11 @@ static void sna_free_screen(FREE_SCREEN_ARGS_DECL)
 
 	DBG(("%s\n", __FUNCTION__));
 
-	if (sna) {
+	if (sna && ((intptr_t)sna & 1) == 0) {
 		sna_mode_fini(sna);
-
 		free(sna);
-		scrn->driverPrivate = NULL;
 	}
+	scrn->driverPrivate = NULL;
 
 	sna_close_drm_master(scrn);
 }
@@ -1117,12 +1119,11 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 
 	scrn->ModeSet = sna_mode_set;
 
-	xf86SetEntitySharable(scrn->entityList[0]);
-
 	entity = xf86GetEntityInfo(entity_num);
 	if (!entity)
 		return FALSE;
 
+	xf86SetEntitySharable(entity_num);
 	xf86SetEntityInstanceForScreen(scrn,
 				       entity->index,
 				       xf86GetNumEntityInstances(entity->index)-1);
commit 4627dc36fa56ab971cd90088bd29a597dd12dc02
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 16:17:17 2012 +0100

    Revert "sna: Flush the batch before a render operation if the GPU is idle"
    
    This reverts commit ad57ac07a273bf376b74884de47d8ee1e7129fb8.
    
    These checks end up being too frequent and not allowing us to batch
    sufficient commands to offset the overhead of batch submission. Hmm.

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index de38f0a..4b1738e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3578,9 +3578,6 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
 	     __FUNCTION__, num_pages, num_exec));
 
-	if (kgem->nexec && kgem_is_idle(kgem))
-		return false;
-
 	if (!num_pages)
 		return true;
 
commit 886f3cef4101a674bb62656cf7f3046643cae4b1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 15:27:21 2012 +0100

    uxa: Fix cut'n'paste error in Option "Backlight"
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 233c6af..0372f9f 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -265,7 +265,7 @@ intel_output_backlight_init(xf86OutputPtr output)
 	if (str != NULL) {
 		sprintf(path, "%s/%s", BACKLIGHT_CLASS, str);
 		if (!stat(path, &buf)) {
-			intel_output->backlight_iface = backlight_interfaces[i];
+			intel_output->backlight_iface = str;
 			intel_output->backlight_max = intel_output_backlight_get_max(output);
 			if (intel_output->backlight_max > 0) {
 				xf86DrvMsg(output->scrn->scrnIndex, X_CONFIG,
commit 6211d0914e2ed1cdd4cc2e170ee16129fc9f0d73
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 12:33:08 2012 +0100

    uxa: Flush batch immediately after queuing a WAIT_FOR_EVENT
    
    We still encounter hangs with kernel-3.5 with the culprit being a wait
    on a disabled pipe. As we thoroughly check before that the pipe is still
    disabled and flush before a modeset, the only possibility that remains
    is that DPMS is disabling the pipe before we submit. Close that race by
    always submitting the batch immediately after a WAIT_FOR_EVENT.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index fa1660c..a1bac9a 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -540,6 +540,11 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
 			  0, 0);
 
 	FreeScratchGC(gc);
+
+	/* And make sure the WAIT_FOR_EVENT is queued before any
+	 * modesetting/dpms operations on the pipe.
+	 */
+	intel_batch_submit(scrn);
 }
 
 #if DRI2INFOREC_VERSION >= 4
diff --git a/src/intel_video.c b/src/intel_video.c
index 0be72e2..09782aa 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -1585,6 +1585,11 @@ I830PutImageTextured(ScrnInfoPtr scrn,
 	intel_get_screen_private(scrn)->needs_flush = TRUE;
 	DamageDamageRegion(drawable, clipBoxes);
 
+	/* And make sure the WAIT_FOR_EVENT is queued before any
+	 * modesetting/dpms operations on the pipe.
+	 */
+	intel_batch_submit(scrn);
+
 	return Success;
 }
 
commit 6a1423547769c24d15f6475628981d14a76a4bb2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 12:15:52 2012 +0100

    sna: Initialise the empty boxes field of the region before subtracting it
    
    Fixes regression from commit 38fb77af757318e5fb6f605b37306ce4585b11a5
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Wed Sep 5 08:23:34 2012 +0100
    
        sna: Don't upload ignored cpu damage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 41f53d4..1a02db3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2590,6 +2590,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 			region.extents.x2 += dx;
 			region.extents.y1 += dy;
 			region.extents.y2 += dy;
+			region.data = NULL;
 
 			sna_damage_subtract(&priv->cpu_damage, &region);
 			if (priv->cpu_damage == NULL) {
commit dff25e5ec4071a0404f82760e8deec3f99f4a0a9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 11:05:28 2012 +0100

    sna: Drop master after discarding framebuffers
    
    As Imre Deak pointed out in the previous patch, drmModeRmFB only works
    when we hold the DRM master, therefore to prevent a leak of the
    framebuffer across server reset we need to defer dropping master until
    after we release our scanouts and modes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a193607..b814e1f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -701,15 +701,12 @@ static void sna_leave_vt(VT_FUNC_ARGS_DECL)
 {
 	SCRN_INFO_PTR(arg);
 	struct sna *sna = to_sna(scrn);
-	int ret;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	xf86RotateFreeShadow(scrn);
 	xf86_hide_cursors(scrn);
 
-	ret = drmDropMaster(sna->kgem.fd);
-	if (ret)
+	if (drmDropMaster(sna->kgem.fd))
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "drmDropMaster failed: %s\n", strerror(errno));
 }
@@ -739,16 +736,14 @@ static Bool sna_early_close_screen(CLOSE_SCREEN_ARGS_DECL)
 	if (sna_mode_has_pending_events(sna))
 		sna_mode_wakeup(sna);
 
-	if (scrn->vtSema == TRUE) {
-		sna_leave_vt(VT_FUNC_ARGS(0));
-		scrn->vtSema = FALSE;
-	}
-
 	if (sna->dri_open) {
 		sna_dri_close(sna, screen);
 		sna->dri_open = false;
 	}
 
+	xf86_hide_cursors(scrn);
+	scrn->vtSema = FALSE;
+
 	xf86_cursors_fini(screen);
 
 	return TRUE;
@@ -769,6 +764,7 @@ static Bool sna_late_close_screen(CLOSE_SCREEN_ARGS_DECL)
 	}
 
 	sna_accel_close(sna);
+	drmDropMaster(sna->kgem.fd);
 
 	depths = screen->allowedDepths;
 	for (d = 0; d < screen->numDepths; d++)
commit 46981d01700c1159bfb6bc0aebc938ff1d447a0f
Author: Imre Deak <imre.deak at intel.com>
Date:   Fri Aug 31 14:31:51 2012 +0300

    uxa: fix leakage of FB when closing the screen
    
    Calling drmModeRmFB is only allowed in DRM master mode. Since leaving
    the VT also drops master mode we need to remove the FB before calling
    I830LeaveVT.
    
    This is only a real leak in case of a server reset, otherwise the server
    process will exit anyway and the kernel will clean up the FB.
    
    Signed-off-by: Imre Deak <imre.deak at intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 218b583..1b2c616 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1136,10 +1136,6 @@ static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 	I830UeventFini(scrn);
 #endif
 
-	if (scrn->vtSema == TRUE) {
-		I830LeaveVT(VT_FUNC_ARGS(0));
-	}
-
 	DeleteCallback(&FlushCallback, intel_flush_callback, scrn);
 
 	intel_glamor_close_screen(screen);
@@ -1169,6 +1165,10 @@ static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 		intel->front_buffer = NULL;
 	}
 
+	if (scrn->vtSema == TRUE) {
+		I830LeaveVT(VT_FUNC_ARGS(0));
+	}
+
 	intel_batch_teardown(scrn);
 
 	if (INTEL_INFO(intel)->gen >= 40)
commit 55cef974a5dad3fda1922648fa27bcf5bb32ea03
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 09:38:47 2012 +0100

    sna: Review validity of damage when discarding CPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3c0736e..41f53d4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1452,12 +1452,11 @@ skip_inplace_map:
 		priv->mapped = false;
 	}
 
-	if (priv->clear) {
-		if (priv->cpu_bo && !priv->cpu_bo->flush &&
-		    __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
-			assert(!priv->shm);
-			sna_pixmap_free_cpu(sna, priv);
-		}
+	if (priv->clear && priv->cpu_bo && !priv->cpu_bo->flush &&
+	    __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
+		assert(!priv->shm);
+		assert(DAMAGE_IS_ALL(priv->gpu_damage));
+		sna_pixmap_free_cpu(sna, priv);
 	}
 
 	if (pixmap->devPrivate.ptr == NULL &&
@@ -3462,36 +3461,33 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		priv->gpu_bo = NULL;
 	}
 
-	if (priv->cpu_bo) {
-		/* If the GPU is currently accessing the CPU pixmap, then
-		 * we will need to wait for that to finish before we can
-		 * modify the memory.
-		 *
-		 * However, we can queue some writes to the GPU bo to avoid
-		 * the wait. Or we can try to replace the CPU bo.
-		 */
-		if (!priv->shm && __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
-			assert(!priv->cpu_bo->flush);
-			DBG(("%s: cpu bo will stall, upload damage and discard\n",
-			     __FUNCTION__));
-			if (priv->cpu_damage) {
-				if (!region_subsumes_drawable(region, &pixmap->drawable)) {
-					sna_damage_subtract(&priv->cpu_damage, region);
-					if (!sna_pixmap_move_to_gpu(pixmap,
-								    MOVE_WRITE))
-						return false;
-				} else {
-					sna_damage_destroy(&priv->cpu_damage);
-					priv->undamaged = false;
-				}
-				assert(priv->cpu_damage == NULL);
+	/* If the GPU is currently accessing the CPU pixmap, then
+	 * we will need to wait for that to finish before we can
+	 * modify the memory.
+	 *
+	 * However, we can queue some writes to the GPU bo to avoid
+	 * the wait. Or we can try to replace the CPU bo.
+	 */
+	if (!priv->shm && priv->cpu_bo && __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
+		assert(!priv->cpu_bo->flush);
+		DBG(("%s: cpu bo will stall, upload damage and discard\n",
+		     __FUNCTION__));
+		if (priv->cpu_damage) {
+			if (!region_subsumes_drawable(region, &pixmap->drawable)) {
+				sna_damage_subtract(&priv->cpu_damage, region);
+				if (!sna_pixmap_move_to_gpu(pixmap,
+							    MOVE_WRITE))
+					return false;
+			} else {
+				sna_damage_destroy(&priv->cpu_damage);
+				priv->undamaged = false;
 			}
-			if (!priv->undamaged)
-				sna_damage_all(&priv->gpu_damage,
-					       pixmap->drawable.width,
-					       pixmap->drawable.height);
-			sna_pixmap_free_cpu(sna, priv);
+			assert(priv->cpu_damage == NULL);
 		}
+		sna_damage_all(&priv->gpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+		sna_pixmap_free_cpu(sna, priv);
 	}
 
 	if (priv->mapped) {
commit 587499bbf55b7eb0e1848822a792d535a8a3db1b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 10:56:18 2012 +0100

    sna/video: Use the scanout flag and FB id for sprite framebuffers
    
    So that we can use the same teardown path as normal scanouts.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 7f687f7..de38f0a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1331,6 +1331,7 @@ static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
 	     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
 	if (bo->delta) {
+		/* XXX will leak if we are not DRM_MASTER. *shrug* */
 		drmModeRmFB(kgem->fd, bo->delta);
 		bo->delta = 0;
 	}
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 4c260cd..59e128b 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -153,6 +153,7 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	arg.depth = scrn->depth;
 	arg.handle = bo->handle;
 
+	assert(sna->scrn->vtSema); /* must be master */
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_ADDFB, &arg)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d: %d\n",
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index b8690ec..e7b335a 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -80,17 +80,12 @@ void sna_video_free_buffers(struct sna *sna, struct sna_video *video)
 
 	for (i = 0; i < ARRAY_SIZE(video->old_buf); i++) {
 		if (video->old_buf[i]) {
-			if (video->old_buf[i]->unique_id)
-				drmModeRmFB(sna->kgem.fd,
-						video->old_buf[i]->unique_id);
 			kgem_bo_destroy(&sna->kgem, video->old_buf[i]);
 			video->old_buf[i] = NULL;
 		}
 	}
 
 	if (video->buf) {
-		if (video->buf->unique_id)
-			drmModeRmFB(sna->kgem.fd, video->buf->unique_id);
 		kgem_bo_destroy(&sna->kgem, video->buf);
 		video->buf = NULL;
 	}
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index 87c5845..a912590 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -203,7 +203,7 @@ sna_video_sprite_show(struct sna *sna,
 	}
 #endif
 
-	if (frame->bo->unique_id == 0) {
+	if (frame->bo->delta == 0) {
 		uint32_t offsets[4], pitches[4], handles[4];
 		uint32_t pixel_format;
 
@@ -227,19 +227,24 @@ sna_video_sprite_show(struct sna *sna,
 		if (drmModeAddFB2(sna->kgem.fd,
 				  frame->width, frame->height, pixel_format,
 				  handles, pitches, offsets,
-				  &frame->bo->unique_id, 0)) {
+				  &frame->bo->delta, 0)) {
 			xf86DrvMsg(sna->scrn->scrnIndex,
 				   X_ERROR, "failed to add fb\n");
 			return false;
 		}
+
+		frame->bo->scanout = true;
 	}
 
 	DBG(("%s: updating plane=%d, handle=%d [fb %d], dst=(%d,%d)x(%d,%d)\n",
-	     __FUNCTION__, plane, frame->bo->handle, frame->bo->unique_id,
+	     __FUNCTION__, plane, frame->bo->handle, frame->bo->delta,
 	     dstBox->x1, dstBox->y1,
 	     dstBox->x2 - dstBox->x1, dstBox->y2 - dstBox->y1));
+	assert(frame->bo->scanout);
+	assert(frame->bo->delta);
+
 	if (drmModeSetPlane(sna->kgem.fd,
-			    plane, sna_crtc_id(crtc), frame->bo->unique_id, 0,
+			    plane, sna_crtc_id(crtc), frame->bo->delta, 0,
 			    dstBox->x1, dstBox->y1,
 			    dstBox->x2 - dstBox->x1, dstBox->y2 - dstBox->y1,
 			    0, 0, frame->width << 16, frame->height << 16))
commit 913adacc5400f94bc754f22375447e6f90ccd510
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 09:20:55 2012 +0100

    sna/gen7: Always emit a stall when flushing the texture cache
    
    Reported-by: Reinhard Karcher <reinhard.karcher at gmx.net>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=54488
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 0cc4cba..705a17d 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -994,12 +994,12 @@ gen7_emit_vertex_elements(struct sna *sna,
 }
 
 inline static void
-gen7_emit_pipe_invalidate(struct sna *sna, bool stall)
+gen7_emit_pipe_invalidate(struct sna *sna)
 {
 	OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
 	OUT_BATCH(GEN7_PIPE_CONTROL_WC_FLUSH |
 		  GEN7_PIPE_CONTROL_TC_FLUSH |
-		  (stall ? GEN7_PIPE_CONTROL_CS_STALL : 0));
+		  GEN7_PIPE_CONTROL_CS_STALL);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 }
@@ -1043,9 +1043,7 @@ gen7_emit_state(struct sna *sna,
 	need_stall &= gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
-		if (op->dst.bo == op->src.bo || op->dst.bo == op->mask.bo)
-			need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
-		gen7_emit_pipe_invalidate(sna, need_stall);
+		gen7_emit_pipe_invalidate(sna);
 		kgem_clear_dirty(&sna->kgem);
 		if (op->dst.bo->exec)
 			kgem_bo_mark_dirty(op->dst.bo);
@@ -1069,7 +1067,7 @@ static void gen7_magic_ca_pass(struct sna *sna,
 	DBG(("%s: CA fixup (%d -> %d)\n", __FUNCTION__,
 	     sna->render.vertex_start, sna->render.vertex_index));
 
-	gen7_emit_pipe_invalidate(sna, true);
+	gen7_emit_pipe_invalidate(sna);
 
 	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, true, op->dst.format));
 	gen7_emit_wm(sna,
commit bdfedb46bd5bb3b96543a3ff553abd8ed0774fad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 09:15:12 2012 +0100

    sna: Use async upload only if the last render was not CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 66a0833..3c0736e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2986,7 +2986,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
-		if (DAMAGE_IS_ALL(priv->cpu_damage))
+		if (!priv->cpu)
 			flags |= MOVE_ASYNC_HINT;
 
 		ok = false;
commit 67b75ae6f66e2d8e97b0aaf70d66b6de30f9e67e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 08:46:39 2012 +0100

    sna: Tweak upload path to remove redundant code
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7686024..66a0833 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2996,10 +2996,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n, 0);
-			if (ok && priv->shm) {
-				assert(!priv->flush);
-				sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
-			}
 		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
@@ -3037,10 +3033,12 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	}
 
 	/* For large bo, try to keep only a single copy around */
-	if (priv->create & KGEM_CAN_CREATE_LARGE)
+	if (priv->create & KGEM_CAN_CREATE_LARGE) {
 		sna_damage_all(&priv->gpu_damage,
 			       pixmap->drawable.width,
 			       pixmap->drawable.height);
+		sna_pixmap_free_cpu(sna, priv);
+	}
 done:
 	list_del(&priv->list);
 
commit 38fb77af757318e5fb6f605b37306ce4585b11a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 08:23:34 2012 +0100

    sna: Don't upload ignored cpu damage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1156e78..7686024 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -61,6 +61,7 @@
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
 #define USE_INACTIVE 0
+#define USE_CPU_BO 1
 
 #define MIGRATE_ALL 0
 #define DBG_NO_CPU_UPLOAD 0
@@ -2525,11 +2526,13 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		DBG(("%s: use GPU fast path (all-damaged)\n", __FUNCTION__));
+		assert(priv->cpu_damage == NULL);
 		goto use_gpu_bo;
 	}
 
 	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
 		DBG(("%s: use CPU fast path (all-damaged)\n", __FUNCTION__));
+		assert(priv->gpu_damage == NULL);
 		goto use_cpu_bo;
 	}
 
@@ -2580,6 +2583,20 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 					goto use_cpu_bo;
 				}
 			}
+		} else if (priv->cpu_damage) {
+			get_drawable_deltas(drawable, pixmap, &dx, &dy);
+
+			region.extents = *box;
+			region.extents.x1 += dx;
+			region.extents.x2 += dx;
+			region.extents.y1 += dy;
+			region.extents.y2 += dy;
+
+			sna_damage_subtract(&priv->cpu_damage, &region);
+			if (priv->cpu_damage == NULL) {
+				list_del(&priv->list);
+				priv->undamaged = false;
+			}
 		}
 
 create_gpu_bo:
@@ -2690,6 +2707,9 @@ use_gpu_bo:
 	return priv->gpu_bo;
 
 use_cpu_bo:
+	if (!USE_CPU_BO)
+		return NULL;
+
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
commit 2eb32f711ddd65a55c3c21f813b41aca49635a22
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 5 00:22:04 2012 +0100

    sna: Add DBG control to disable CPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8d38308..7f687f7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -60,6 +60,7 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG_NO_TILING 0
 #define DBG_NO_CACHE 0
 #define DBG_NO_CACHE_LEVEL 0
+#define DBG_NO_CPU 0
 #define DBG_NO_USERPTR 0
 #define DBG_NO_LLC 0
 #define DBG_NO_SEMAPHORES 0
@@ -874,7 +875,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->next_request = __kgem_request_alloc();
 
 	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
-	     kgem->has_llc | kgem->has_userptr | kgem->has_cacheing,
+	     !DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
 	     kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
 
 	VG_CLEAR(aperture);
@@ -951,6 +952,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 			kgem->large_object_size = kgem->max_cpu_size;
 	} else
 		kgem->max_cpu_size = 0;
+	if (DBG_NO_CPU)
+		kgem->max_cpu_size = 0;
 
 	DBG(("%s: maximum object size=%d\n",
 	     __FUNCTION__, kgem->max_object_size));
@@ -3429,6 +3432,9 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 	struct kgem_bo *bo;
 	int stride, size;
 
+	if (DBG_NO_CPU)
+		return NULL;
+
 	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
 
 	if (kgem->has_llc) {
commit 19e170aa6fd3652cc2e983a291f82350dca27e52
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 23:52:42 2012 +0100

    sna: Fix comparison of memcpy overlap to include x-offsets
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbblt.c b/src/sna/fb/fbblt.c
index 3b3fa48..62ed2a1 100644
--- a/src/sna/fb/fbblt.c
+++ b/src/sna/fb/fbblt.c
@@ -287,19 +287,18 @@ fbBlt(FbBits *srcLine, FbStride srcStride, int srcX,
 
 		DBG(("%s fast blt, src_stride=%d, dst_stride=%d, width=%d (offset=%d)\n",
 		     __FUNCTION__,
-		     srcStride, dstStride, width,
-		     srcLine - dstLine));
+		     srcStride, dstStride, width, s - d));
 
-		if ((srcLine < dstLine && srcLine + width > dstLine) ||
-		    (dstLine < srcLine && dstLine + width > srcLine))
+		if (width == srcStride && width == dstStride) {
+			width *= height;
+			height = 1;
+		}
+
+		if ((s < d && s + width > d) || (d < s && d + width > s))
 			func = memmove;
 		else
 			func = memcpy;
 		if (!upsidedown) {
-			if (srcStride == dstStride && srcStride == width) {
-				width *= height;
-				height = 1;
-			}
 			for (i = 0; i < height; i++)
 				func(d + i * dstStride,
 				     s + i * srcStride,
commit 7895416eabfc5bd361d0d0ff92c570e2f6af47e0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 20:30:53 2012 +0100

    sna: Rearrange use_cpu() tests for composite ops to avoid syncing CPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 65dae9c..d3df17d 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -423,8 +423,8 @@ static void apply_damage(struct sna_composite_op *op, RegionPtr region)
 static inline bool use_cpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 			   CARD8 op, INT16 width, INT16 height)
 {
-	if (too_small(priv))
-		return true;
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return false;
 
 	if (DAMAGE_IS_ALL(priv->cpu_damage) &&
 	    (op > PictOpSrc ||
@@ -432,7 +432,10 @@ static inline bool use_cpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	     height < pixmap->drawable.height))
 		return true;
 
-	return false;
+	if (priv->gpu_bo)
+		return false;
+
+	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
 }
 
 void
@@ -501,14 +504,15 @@ sna_composite(CARD8 op,
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
-		DBG(("%s: fallback as destination is unattached\n",
-		     __FUNCTION__));
+		DBG(("%s: fallback as destination pixmap=%ld is unattached\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
 		goto fallback;
 	}
 
 	if (use_cpu(pixmap, priv, op, width, height) &&
 	    !picture_is_gpu(src) && !picture_is_gpu(mask)) {
-		DBG(("%s: fallback, dst is too small (or completely damaged)\n", __FUNCTION__));
+		DBG(("%s: fallback, dst pixmap=%ld is too small (or completely damaged)\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
 		goto fallback;
 	}
 
@@ -814,7 +818,8 @@ sna_composite_rectangles(CARD8		 op,
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL || too_small(priv)) {
-		DBG(("%s: fallback, too small or not attached\n", __FUNCTION__));
+		DBG(("%s: fallback, dst pixmap=%ld too small or not attached\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
 		goto fallback;
 	}
 
commit ad57ac07a273bf376b74884de47d8ee1e7129fb8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 20:06:49 2012 +0100

    sna: Flush the batch before a render operation if the GPU is idle
    
    Experiment with pushing those first commands earlier.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 229a799..8d38308 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3568,6 +3568,12 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	}
 	va_end(ap);
 
+	DBG(("%s: num_pages=+%d, num_exec=+%d\n",
+	     __FUNCTION__, num_pages, num_exec));
+
+	if (kgem->nexec && kgem_is_idle(kgem))
+		return false;
+
 	if (!num_pages)
 		return true;
 
commit 6325d7e29a8bacede321362c20bf3acd33c48651
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 14:51:54 2012 +0100

    NEWS: fixup missing links to bugs referenced in 2.20.6 release notes
    
    Reported-by: Adam Jackson <ajax at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index a2caaa7..1291b51 100644
--- a/NEWS
+++ b/NEWS
@@ -10,8 +10,8 @@ issue with incoherent page access to a streaming buffer.
    https://bugs.freedesktop.org/show_bug.cgi?id=54274
 
  * Incoherent concurrent access with the CPU and GPU
-   https://bugs.freedesktop.org/show_bug.cgi
-   https://bugs.freedesktop.org/show_bug.cgi
+   https://bugs.freedesktop.org/show_bug.cgi?id=51422
+   https://bugs.freedesktop.org/show_bug.cgi?id=52299
 
  * Add Option "Backlight" to override automatic selection of the
    backlight interface.
commit bc309bad39035649ee377c1d5af9099491a8ea43
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 14:22:15 2012 +0100

    sna: And remember to flush the damage to the output slaves...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a554655..1156e78 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13819,6 +13819,7 @@ static void sna_accel_post_damage(struct sna *sna)
 #if HAS_PIXMAP_SHARING
 	ScreenPtr screen = sna->scrn->pScreen;
 	PixmapDirtyUpdatePtr dirty;
+	bool flush = false;
 
 	xorg_list_for_each_entry(dirty, &screen->pixmap_dirty_list, ent) {
 		RegionRec region, *damage;
@@ -13903,6 +13904,8 @@ fallback:
 						    dst, sna_pixmap_get_bo(dst), -dirty->x, -dirty->y,
 						    box, n, COPY_LAST))
 				goto fallback;
+
+			flush = true;
 		}
 
 		RegionTranslate(&region, -dirty->x, -dirty->y);
@@ -13912,6 +13915,8 @@ skip:
 		RegionUninit(&region);
 		DamageEmpty(dirty->damage);
 	}
+	if (flush)
+		kgem_submit(&sna->kgem);
 #endif
 }
 
commit 68d207588a177afa4e999260bfddb4d6dba1029b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 13:20:21 2012 +0100

    sna: Improve handling of output offloading
    
    In particular, don't forget to flush when we only have offload slaves
    and no native pixmaps.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b08e701..a554655 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -937,6 +937,7 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 		return FALSE;
 
 	assert(priv->gpu_bo);
+	assert(priv->stride);
 
 	/* XXX negotiate format and stride restrictions */
 	if (priv->gpu_bo->tiling &&
@@ -13607,6 +13608,20 @@ static void sna_accel_disarm_timer(struct sna *sna, int id)
 	sna->timer_active &= ~(1<<id);
 }
 
+static bool has_offload_slaves(struct sna *sna)
+{
+#if HAS_PIXMAP_SHARING
+	ScreenPtr screen = sna->scrn->pScreen;
+	PixmapDirtyUpdatePtr dirty;
+
+	xorg_list_for_each_entry(dirty, &screen->pixmap_dirty_list, ent) {
+		if (RegionNotEmpty(DamageRegion(dirty->damage)))
+			return true;
+	}
+#endif
+	return false;
+}
+
 static bool has_shadow(struct sna *sna)
 {
 	DamagePtr damage = sna->mode.shadow_damage;
@@ -13625,13 +13640,16 @@ static bool has_shadow(struct sna *sna)
 
 static bool start_flush(struct sna *sna, struct sna_pixmap *scanout)
 {
-	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d))\n",
+	DBG(("%s: scanout=%d shadow?=%d, slaves?=%d, (cpu?=%d || gpu?=%d))\n",
 	     __FUNCTION__,
 	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
-	     has_shadow(sna),
+	     has_shadow(sna), has_offload_slaves(sna),
 	     scanout && scanout->cpu_damage != NULL,
 	     scanout && scanout->gpu_bo && scanout->gpu_bo->exec != NULL));
 
+	if (has_offload_slaves(sna))
+		return true;
+
 	if (has_shadow(sna))
 		return true;
 
@@ -13643,13 +13661,16 @@ static bool start_flush(struct sna *sna, struct sna_pixmap *scanout)
 
 static bool stop_flush(struct sna *sna, struct sna_pixmap *scanout)
 {
-	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d))\n",
+	DBG(("%s: scanout=%d shadow?=%d, slaves?=%d, (cpu?=%d || gpu?=%d))\n",
 	     __FUNCTION__,
 	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
-	     has_shadow(sna),
+	     has_shadow(sna), has_offload_slaves(sna),
 	     scanout && scanout->cpu_damage != NULL,
 	     scanout && scanout->gpu_bo && scanout->gpu_bo->rq != NULL));
 
+	if (has_offload_slaves(sna))
+		return true;
+
 	if (has_shadow(sna))
 		return true;
 
@@ -13662,9 +13683,10 @@ static bool stop_flush(struct sna *sna, struct sna_pixmap *scanout)
 static bool sna_accel_do_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv;
+	int interval;
 
 	priv = sna_accel_scanout(sna);
-	if (priv == NULL && !sna->mode.shadow_active) {
+	if (priv == NULL && !sna->mode.shadow_active && !has_offload_slaves(sna)) {
 		DBG(("%s -- no scanout attached\n", __FUNCTION__));
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 		return false;
@@ -13673,14 +13695,14 @@ static bool sna_accel_do_flush(struct sna *sna)
 	if (sna->flags & SNA_NO_DELAYED_FLUSH)
 		return true;
 
+	interval = sna->vblank_interval ?: 20;
 	if (sna->timer_active & (1<<(FLUSH_TIMER))) {
 		int32_t delta = sna->timer_expire[FLUSH_TIMER] - TIME;
 		DBG(("%s: flush timer active: delta=%d\n",
 		     __FUNCTION__, delta));
 		if (delta <= 3) {
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
-			sna->timer_expire[FLUSH_TIMER] =
-				TIME + sna->vblank_interval;
+			sna->timer_expire[FLUSH_TIMER] = TIME + interval;
 			return true;
 		}
 	} else {
@@ -13690,8 +13712,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 				kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
-			sna->timer_expire[FLUSH_TIMER] =
-				TIME + sna->vblank_interval / 2;
+			sna->timer_expire[FLUSH_TIMER] = TIME + interval / 2;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 		}
 	}
@@ -13800,19 +13821,95 @@ static void sna_accel_post_damage(struct sna *sna)
 	PixmapDirtyUpdatePtr dirty;
 
 	xorg_list_for_each_entry(dirty, &screen->pixmap_dirty_list, ent) {
-		RegionRec pixregion;
+		RegionRec region, *damage;
+		PixmapPtr src, dst;
+		BoxPtr box;
+		int n;
 
-		if (!RegionNotEmpty(DamageRegion(dirty->damage)))
+		damage = DamageRegion(dirty->damage);
+		if (!RegionNotEmpty(damage))
 			continue;
 
-		PixmapRegionInit(&pixregion,
-				 dirty->slave_dst->master_pixmap);
-		PixmapSyncDirtyHelper(dirty, &pixregion);
+		src = dirty->src;
+		dst = dirty->slave_dst->master_pixmap;
+
+		region.extents.x1 = dirty->x;
+		region.extents.x2 = dirty->x + dst->drawable.width;
+		region.extents.y1 = dirty->y;
+		region.extents.y2 = dirty->y + dst->drawable.height;
+		region.data = NULL;
+
+		DBG(("%s: pushing damage ((%d, %d), (%d, %d))x%d to slave pixmap=%ld, ((%d, %d), (%d, %d))\n", __FUNCTION__,
+		     damage->extents.x1, damage->extents.y1,
+		     damage->extents.x2, damage->extents.y2,
+		     RegionNumRects(damage),
+		     dst->drawable.serialNumber,
+		     region.extents.x1, region.extents.y1,
+		     region.extents.x2, region.extents.y2));
+
+		RegionIntersect(&region, &region, damage);
+
+		box = REGION_RECTS(&region);
+		n = REGION_NUM_RECTS(&region);
+		if (wedged(sna)) {
+fallback:
+			if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
+				goto skip;
+
+			if (!sna_pixmap_move_to_cpu(dst, MOVE_READ | MOVE_WRITE | MOVE_INPLACE_HINT))
+				goto skip;
+
+			assert(src->drawable.bitsPerPixel == dst->drawable.bitsPerPixel);
+			do {
+				DBG(("%s: copy box (%d, %d)->(%d, %d)x(%d, %d)\n",
+				     __FUNCTION__,
+				     box->x1, box->y1,
+				     box->x1 - dirty->x, box->y1 - dirty->y,
+				     box->x2 - box->x1, box->y2 - box->y1));
+
+				assert(box->x2 > box->x1);
+				assert(box->y2 > box->y1);
+
+				assert(box->x1 >= 0);
+				assert(box->y1 >= 0);
+				assert(box->x2 <= src->drawable.width);
+				assert(box->y2 <= src->drawable.height);
+
+				assert(box->x1 - dirty->x >= 0);
+				assert(box->y1 - dirty->y >= 0);
+				assert(box->x2 - dirty->x <= src->drawable.width);
+				assert(box->y2 - dirty->y <= src->drawable.height);
+
+				memcpy_blt(src->devPrivate.ptr,
+					   dst->devPrivate.ptr,
+					   src->drawable.bitsPerPixel,
+					   src->devKind, dst->devKind,
+					   box->x1, box->y1,
+					   box->x1 - dirty->x,
+					   box->y1 - dirty->y,
+					   box->x2 - box->x1,
+					   box->y2 - box->y1);
+				box++;
+			} while (--n);
+		} else {
+			if (!sna_pixmap_move_to_gpu(src, MOVE_READ | __MOVE_FORCE))
+				goto fallback;
 
-		DamageRegionAppend(&dirty->slave_dst->drawable,
-				   &pixregion);
-		RegionUninit(&pixregion);
+			if (!sna_pixmap_move_to_gpu(dst, MOVE_READ | MOVE_WRITE | __MOVE_FORCE))
+				goto fallback;
 
+			if (!sna->render.copy_boxes(sna, GXcopy,
+						    src, sna_pixmap_get_bo(src), 0, 0,
+						    dst, sna_pixmap_get_bo(dst), -dirty->x, -dirty->y,
+						    box, n, COPY_LAST))
+				goto fallback;
+		}
+
+		RegionTranslate(&region, -dirty->x, -dirty->y);
+		DamageRegionAppend(&dirty->slave_dst->drawable, &region);
+
+skip:
+		RegionUninit(&region);
 		DamageEmpty(dirty->damage);
 	}
 #endif
commit 8cf7ac776b9b47dabd5ab141e5a5385c44d3f309
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 13:55:34 2012 +0100

    sna: Fix a typo in an error message
    
    s/achieve/retrieve/ otherwise it is nonsense.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ae0a604..229a799 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -255,7 +255,7 @@ retry_gtt:
 	VG_CLEAR(mmap_arg);
 	mmap_arg.handle = bo->handle;
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
-		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
+		ErrorF("%s: failed to retrieve GTT offset for handle=%d: %d\n",
 		       __FUNCTION__, bo->handle, errno);
 		(void)__kgem_throttle_retire(kgem, 0);
 		if (kgem_expire_cache(kgem))
commit be83b98d299c74101357e7bf170b1451aec2e1f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 11:23:58 2012 +0100

    sna: Add some DBG to the prime paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 21d0ef8..b08e701 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -929,6 +929,8 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 	struct sna_pixmap *priv;
 	int fd;
 
+	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
+
 	priv = sna_pixmap_move_to_gpu(pixmap,
 				      MOVE_READ | MOVE_WRITE | __MOVE_DRI | __MOVE_FORCE);
 	if (priv == NULL)
@@ -936,7 +938,7 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 
 	assert(priv->gpu_bo);
 
-	/* XXX */
+	/* XXX negotiate format and stride restrictions */
 	if (priv->gpu_bo->tiling &&
 	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE))
 		return FALSE;
@@ -967,6 +969,8 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
 
+	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
+
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL)
 		return FALSE;
@@ -977,9 +981,9 @@ sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
 	assert(priv->cpu_damage == NULL);
 	assert(priv->gpu_damage == NULL);
 
-	bo = kgem_bo_create_for_prime(&sna->kgem,
-				      (intptr_t)fd_handle,
-				      pixmap->devKind * pixmap->drawable.height);
+	bo = kgem_create_for_prime(&sna->kgem,
+				   (intptr_t)fd_handle,
+				   pixmap->devKind * pixmap->drawable.height);
 	if (bo == NULL)
 		return FALSE;
 
@@ -1003,6 +1007,8 @@ sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen, int depth)
 	PixmapPtr pixmap;
 	struct sna_pixmap *priv;
 
+	DBG(("%s: depth=%d\n", __FUNCTION__, depth));
+
 	/* Create a stub to be attached later */
 	pixmap = create_pixmap(sna, screen, 0, 0, depth, 0);
 	if (pixmap == NullPixmap)
@@ -3356,7 +3362,7 @@ static bool upload_inplace(struct sna *sna,
 		}
 	}
 
-	if (priv->create & (KGEM_CAN_CREATE_GPU | KGEM_CAN_CREATE_CPU) == KGEM_CAN_CREATE_GPU &&
+	if ((priv->create & (KGEM_CAN_CREATE_GPU | KGEM_CAN_CREATE_CPU)) == KGEM_CAN_CREATE_GPU &&
 	    region_subsumes_drawable(region, &pixmap->drawable)) {
 		DBG(("%s? yes, will fill fresh GPU bo\n", __FUNCTION__));
 		return true;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 7e87204..4c260cd 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1320,6 +1320,9 @@ sna_crtc_destroy(xf86CrtcPtr crtc)
 static Bool
 sna_set_scanout_pixmap(xf86CrtcPtr crtc, PixmapPtr pixmap)
 {
+	DBG(("%s: CRTC:%d, pipe=%d setting scanout pixmap=%ld\n",
+	     __FUNCTION__,to_sna_crtc(crtc)->id, to_sna_crtc(crtc)->pipe,
+	     pixmap ? pixmap->drawable.serialNumber : 0));
 	to_sna_crtc(crtc)->scanout_pixmap = pixmap;
 	return TRUE;
 }
commit d0684d55e3a62c231c92a68403cf100c9e18351b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 11:05:43 2012 +0100

    sna: Fixup CREATE_USAGE_SHARED
    
    The DRI2 code tries to create pixmaps with non-zero width/height,
    whoops.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d073c9f..ae0a604 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3808,6 +3808,48 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 	}
 }
 
+void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
+{
+	void *ptr;
+
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
+
+	assert(!bo->purged);
+	assert(bo->proxy == NULL);
+	assert(list_is_empty(&bo->list));
+
+	if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
+		DBG(("%s: converting request for GTT map into CPU map\n",
+		     __FUNCTION__));
+		return kgem_bo_map__cpu(kgem, bo);
+	}
+
+	if (IS_CPU_MAP(bo->map))
+		kgem_bo_release_map(kgem, bo);
+
+	ptr = bo->map;
+	if (ptr == NULL) {
+		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
+
+		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
+
+		ptr = __kgem_bo_map__gtt(kgem, bo);
+		if (ptr == NULL)
+			return NULL;
+
+		/* Cache this mapping to avoid the overhead of an
+		 * excruciatingly slow GTT pagefault. This is more an
+		 * issue with compositing managers which need to frequently
+		 * flush CPU damage to their GPU bo.
+		 */
+		bo->map = ptr;
+		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
+	}
+
+	return ptr;
+}
+
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
@@ -3817,8 +3859,8 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(!bo->purged);
 	assert(bo->proxy == NULL);
-	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
+	assert(bo->exec == NULL);
 
 	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
 	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 33cd3ff..0d5384b 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -410,6 +410,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			uint32_t delta);
 
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
+void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b9bf93b..21d0ef8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -943,8 +943,7 @@ sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
 	assert(priv->gpu_bo->tiling == I915_TILING_NONE);
 
 	/* And export the bo->pitch via pixmap->devKind */
-	pixmap->devPrivate.ptr =
-		kgem_bo_map(&sna->kgem, priv->gpu_bo);
+	pixmap->devPrivate.ptr = kgem_bo_map__async(&sna->kgem, priv->gpu_bo);
 	if (pixmap->devPrivate.ptr == NULL)
 		return FALSE;
 
@@ -1038,19 +1037,23 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
+	if ((width|height) == 0) {
 #ifdef CREATE_PIXMAP_USAGE_SHARED
-	if (usage == CREATE_PIXMAP_USAGE_SHARED) {
-		assert((width|height) == 0);
-		return sna_create_pixmap_shared(sna, screen, depth);
-	}
+		if (usage == CREATE_PIXMAP_USAGE_SHARED)
+			return sna_create_pixmap_shared(sna, screen, depth);
 #endif
-
-	if ((width|height) == 0) {
 		usage = -1;
 		goto fallback;
 	}
 	assert(width && height);
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+	if (usage == CREATE_PIXMAP_USAGE_SHARED)
+		return sna_pixmap_create_scratch(screen,
+						 width, height, depth,
+						 I915_TILING_NONE);
+#endif
+
 	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
 	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
commit b6ced7a2267f9429ad38949dda39c843e2119e99
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Sep 4 10:05:46 2012 +0100

    sna: Fix typo for detecting prime ioctls
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d68ad9f..d073c9f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2813,7 +2813,7 @@ struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size
 
 int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo)
 {
-#ifdef DRM_IOCTL_PRIME_HANDLE_TO_PRIME
+#ifdef DRM_IOCTL_PRIME_HANDLE_TO_FD
 	struct drm_prime_handle args;
 
 	VG_CLEAR(args);
commit df68723baae71498de95924c72d4f23fa7fc7fdf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 3 20:43:48 2012 +0100

    sna: Port prime interfacing
    
    Preliminary prime support.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fb.h b/src/sna/fb/fb.h
index 3339236..215aec9 100644
--- a/src/sna/fb/fb.h
+++ b/src/sna/fb/fb.h
@@ -28,15 +28,15 @@
 #include "config.h"
 #endif
 
-#include <stdbool.h>
-#include <pixman.h>
-
 #include <xorg-server.h>
 #include <servermd.h>
 #include <gcstruct.h>
 #include <colormap.h>
 #include <windowstr.h>
 
+#include <stdbool.h>
+#include <pixman.h>
+
 #if HAS_DEBUG_FULL
 #define DBG(x) ErrorF x
 #else
@@ -288,13 +288,17 @@ typedef struct {
 	unsigned char bpp;          /* current drawable bpp */
 } FbGCPrivate, *FbGCPrivPtr;
 
+extern DevPrivateKeyRec sna_gc_key;
+extern DevPrivateKeyRec sna_window_key;
+
 static inline FbGCPrivate *fb_gc(GCPtr gc)
 {
-	return (FbGCPrivate *)gc->devPrivates;
+	return dixGetPrivateAddr(&gc->devPrivates, &sna_gc_key);
 }
+
 static inline PixmapPtr fbGetWindowPixmap(WindowPtr window)
 {
-	return *(void **)window->devPrivates;
+	return *(PixmapPtr *)dixGetPrivateAddr(&window->devPrivates, &sna_window_key);
 }
 
 #ifdef ROOTLESS
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cb0c82a..d68ad9f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2771,6 +2771,65 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 	return bo;
 }
 
+struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size)
+{
+#ifdef DRM_IOCTL_PRIME_FD_TO_HANDLE
+	struct drm_prime_handle args;
+	struct drm_i915_gem_get_tiling tiling;
+	struct kgem_bo *bo;
+
+	DBG(("%s(name=%d)\n", __FUNCTION__, name));
+
+	VG_CLEAR(args);
+	args.fd = name;
+	args.flags = 0;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args))
+		return NULL;
+
+	VG_CLEAR(tiling);
+	tiling.handle = args.handle;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling)) {
+		gem_close(kgem->fd, args.handle);
+		return NULL;
+	}
+
+	DBG(("%s: new handle=%d, tiling=%d\n", __FUNCTION__,
+	     args.handle, tiling.tiling_mode));
+	bo = __kgem_bo_alloc(args.handle, NUM_PAGES(size));
+	if (bo == NULL) {
+		gem_close(kgem->fd, args.handle);
+		return NULL;
+	}
+
+	bo->tiling = tiling.tiling_mode;
+	bo->reusable = false;
+
+	debug_alloc__bo(kgem, bo);
+	return bo;
+#else
+	return NULL;
+#endif
+}
+
+int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo)
+{
+#ifdef DRM_IOCTL_PRIME_HANDLE_TO_PRIME
+	struct drm_prime_handle args;
+
+	VG_CLEAR(args);
+	args.handle = bo->handle;
+	args.flags = DRM_CLOEXEC;
+
+	if (drmIoctl(kgem->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args))
+		return -1;
+
+	bo->reusable = false;
+	return args.fd;
+#else
+	return -1;
+#endif
+}
+
 struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
 {
 	struct kgem_bo *bo;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index d72db55..33cd3ff 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -207,6 +207,8 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				bool read_only);
 
 struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
+struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
+int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
 
 struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
 struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d655da0..b876061 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -47,6 +47,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xorg-server.h>
 
 #include <xf86Crtc.h>
+#if XF86_CRTC_VERSION >= 5
+#define HAS_PIXMAP_SHARING 1
+#endif
+
 #include <xf86str.h>
 #include <windowstr.h>
 #include <glyphstr.h>
@@ -149,9 +153,11 @@ static inline PixmapPtr get_drawable_pixmap(DrawablePtr drawable)
 		return get_window_pixmap((WindowPtr)drawable);
 }
 
+extern DevPrivateKeyRec sna_pixmap_key;
+
 constant static inline struct sna_pixmap *sna_pixmap(PixmapPtr pixmap)
 {
-	return ((void **)pixmap->devPrivates)[1];
+	return ((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[1];
 }
 
 static inline struct sna_pixmap *sna_pixmap_from_drawable(DrawablePtr drawable)
@@ -167,7 +173,7 @@ struct sna_gc {
 
 static inline struct sna_gc *sna_gc(GCPtr gc)
 {
-	return (struct sna_gc *)gc->devPrivates;
+	return dixGetPrivateAddr(&gc->devPrivates, &sna_gc_key);
 }
 
 enum {
@@ -309,7 +315,7 @@ to_sna_from_screen(ScreenPtr screen)
 constant static inline struct sna *
 to_sna_from_pixmap(PixmapPtr pixmap)
 {
-	return *(void **)pixmap->devPrivates;
+	return ((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[0];
 }
 
 constant static inline struct sna *
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0cb10bc..b9bf93b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -601,11 +601,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 
 static inline void sna_set_pixmap(PixmapPtr pixmap, struct sna_pixmap *sna)
 {
-#if 0
-	dixSetPrivate(&pixmap->devPrivates, &sna_private_index, sna);
-#else
-	((void **)pixmap->devPrivates)[1] = sna;
-#endif
+	((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[1] = sna;
 	assert(sna_pixmap(pixmap) == sna);
 }
 
@@ -709,7 +705,8 @@ create_pixmap(struct sna *sna, ScreenPtr screen,
 	if (!pixmap)
 		return NullPixmap;
 
-	((void **)pixmap->devPrivates)[0] = sna;
+	((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[0] = sna;
+	assert(to_sna_from_pixmap(pixmap) == sna);
 
 	pixmap->drawable.type = DRAWABLE_PIXMAP;
 	pixmap->drawable.class = 0;
@@ -924,6 +921,111 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	return pixmap;
 }
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+static Bool
+sna_share_pixmap_backing(PixmapPtr pixmap, ScreenPtr slave, void **fd_handle)
+{
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
+	int fd;
+
+	priv = sna_pixmap_move_to_gpu(pixmap,
+				      MOVE_READ | MOVE_WRITE | __MOVE_DRI | __MOVE_FORCE);
+	if (priv == NULL)
+		return FALSE;
+
+	assert(priv->gpu_bo);
+
+	/* XXX */
+	if (priv->gpu_bo->tiling &&
+	    !sna_pixmap_change_tiling(pixmap, I915_TILING_NONE))
+		return FALSE;
+	assert(priv->gpu_bo->tiling == I915_TILING_NONE);
+
+	/* And export the bo->pitch via pixmap->devKind */
+	pixmap->devPrivate.ptr =
+		kgem_bo_map(&sna->kgem, priv->gpu_bo);
+	if (pixmap->devPrivate.ptr == NULL)
+		return FALSE;
+
+	pixmap->devKind = priv->gpu_bo->pitch;
+	priv->mapped = true;
+
+	fd = kgem_bo_export_to_prime(&sna->kgem, priv->gpu_bo);
+	if (fd == -1)
+		return FALSE;
+
+	priv->pinned = true;
+
+	*fd_handle = (void *)(intptr_t)fd;
+	return TRUE;
+}
+
+static Bool
+sna_set_shared_pixmap_backing(PixmapPtr pixmap, void *fd_handle)
+{
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
+	struct kgem_bo *bo;
+
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return FALSE;
+
+	assert(!priv->pinned);
+	assert(priv->gpu_bo == NULL);
+	assert(priv->cpu_bo == NULL);
+	assert(priv->cpu_damage == NULL);
+	assert(priv->gpu_damage == NULL);
+
+	bo = kgem_bo_create_for_prime(&sna->kgem,
+				      (intptr_t)fd_handle,
+				      pixmap->devKind * pixmap->drawable.height);
+	if (bo == NULL)
+		return FALSE;
+
+	sna_damage_all(&priv->gpu_damage,
+		       pixmap->drawable.width,
+		       pixmap->drawable.height);
+
+	bo->pitch = pixmap->devKind;
+	priv->stride = pixmap->devKind;
+
+	priv->gpu_bo = bo;
+	priv->pinned = true;
+
+	close((intptr_t)fd_handle);
+	return TRUE;
+}
+
+static PixmapPtr
+sna_create_pixmap_shared(struct sna *sna, ScreenPtr screen, int depth)
+{
+	PixmapPtr pixmap;
+	struct sna_pixmap *priv;
+
+	/* Create a stub to be attached later */
+	pixmap = create_pixmap(sna, screen, 0, 0, depth, 0);
+	if (pixmap == NullPixmap)
+		return NullPixmap;
+
+	pixmap->drawable.width = 0;
+	pixmap->drawable.height = 0;
+	pixmap->devKind = 0;
+	pixmap->devPrivate.ptr = NULL;
+
+	priv = sna_pixmap_attach(pixmap);
+	if (priv == NULL) {
+		free(pixmap);
+		return NullPixmap;
+	}
+
+	priv->stride = 0;
+	priv->create = 0;
+	return pixmap;
+}
+#endif
+
 static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 				   int width, int height, int depth,
 				   unsigned int usage)
@@ -936,6 +1038,13 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+	if (usage == CREATE_PIXMAP_USAGE_SHARED) {
+		assert((width|height) == 0);
+		return sna_create_pixmap_shared(sna, screen, depth);
+	}
+#endif
+
 	if ((width|height) == 0) {
 		usage = -1;
 		goto fallback;
@@ -13675,6 +13784,31 @@ static int32_t sna_timeout(struct sna *sna)
 	return next;
 }
 
+static void sna_accel_post_damage(struct sna *sna)
+{
+#if HAS_PIXMAP_SHARING
+	ScreenPtr screen = sna->scrn->pScreen;
+	PixmapDirtyUpdatePtr dirty;
+
+	xorg_list_for_each_entry(dirty, &screen->pixmap_dirty_list, ent) {
+		RegionRec pixregion;
+
+		if (!RegionNotEmpty(DamageRegion(dirty->damage)))
+			continue;
+
+		PixmapRegionInit(&pixregion,
+				 dirty->slave_dst->master_pixmap);
+		PixmapSyncDirtyHelper(dirty, &pixregion);
+
+		DamageRegionAppend(&dirty->slave_dst->drawable,
+				   &pixregion);
+		RegionUninit(&pixregion);
+
+		DamageEmpty(dirty->damage);
+	}
+#endif
+}
+
 static void sna_accel_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
@@ -13699,6 +13833,7 @@ static void sna_accel_flush(struct sna *sna)
 	}
 
 	sna_mode_redisplay(sna);
+	sna_accel_post_damage(sna);
 }
 
 static void sna_accel_throttle(struct sna *sna)
@@ -13859,7 +13994,7 @@ sna_get_window_pixmap(WindowPtr window)
 static void
 sna_set_window_pixmap(WindowPtr window, PixmapPtr pixmap)
 {
-	*(PixmapPtr *)window->devPrivates = pixmap;
+	*(PixmapPtr *)dixGetPrivateAddr(&window->devPrivates, &sna_window_key) = pixmap;
 }
 
 static Bool
@@ -13988,6 +14123,10 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->CreatePixmap = sna_create_pixmap;
 	assert(screen->DestroyPixmap == NULL);
 	screen->DestroyPixmap = sna_destroy_pixmap;
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+	screen->SharePixmapBacking = sna_share_pixmap_backing;
+	screen->SetSharedPixmapBacking = sna_set_shared_pixmap_backing;
+#endif
 	screen->RealizeFont = sna_realize_font;
 	screen->UnrealizeFont = sna_unrealize_font;
 	assert(screen->CreateGC == NULL);
@@ -14002,6 +14141,11 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->StoreColors = sna_store_colors;
 	screen->BitmapToRegion = fbBitmapToRegion;
 
+#if HAS_PIXMAP_SHARING
+	screen->StartPixmapTracking = PixmapStartDirtyTracking;
+	screen->StopPixmapTracking = PixmapStopDirtyTracking;
+#endif
+
 	assert(screen->GetWindowPixmap == NULL);
 	screen->GetWindowPixmap = sna_get_window_pixmap;
 	assert(screen->SetWindowPixmap == NULL);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 3ec6dc1..7e87204 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -60,6 +60,7 @@
 struct sna_crtc {
 	struct drm_mode_modeinfo kmode;
 	int dpms_mode;
+	PixmapPtr scanout_pixmap;
 	struct kgem_bo *bo;
 	uint32_t cursor;
 	bool shadow;
@@ -960,6 +961,22 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 
 		sna_crtc->transform = true;
 		return bo;
+	} else if (sna_crtc->scanout_pixmap) {
+		DBG(("%s: attaching to scanout pixmap\n", __FUNCTION__));
+		if (!sna_crtc_enable_shadow(sna, sna_crtc))
+			return NULL;
+
+		bo = sna_pixmap_pin(sna_crtc->scanout_pixmap);
+		if (bo == NULL)
+			return NULL;
+
+		if (!get_fb(sna, bo,
+			    sna_crtc->scanout_pixmap->drawable.width,
+			    sna_crtc->scanout_pixmap->drawable.height))
+			return NULL;
+
+		sna_crtc->transform = true;
+		return kgem_bo_reference(bo);
 	} else if (sna->flags & SNA_TEAR_FREE) {
 		DBG(("%s: tear-free updates requested\n", __FUNCTION__));
 
@@ -1299,6 +1316,15 @@ sna_crtc_destroy(xf86CrtcPtr crtc)
 	crtc->driver_private = NULL;
 }
 
+#if HAS_PIXMAP_SHARING
+static Bool
+sna_set_scanout_pixmap(xf86CrtcPtr crtc, PixmapPtr pixmap)
+{
+	to_sna_crtc(crtc)->scanout_pixmap = pixmap;
+	return TRUE;
+}
+#endif
+
 static const xf86CrtcFuncsRec sna_crtc_funcs = {
 	.dpms = sna_crtc_dpms,
 	.set_mode_major = sna_crtc_set_mode_major,
@@ -1309,6 +1335,9 @@ static const xf86CrtcFuncsRec sna_crtc_funcs = {
 	.load_cursor_argb = sna_crtc_load_cursor_argb,
 	.gamma_set = sna_crtc_gamma_set,
 	.destroy = sna_crtc_destroy,
+#if HAS_PIXMAP_SHARING
+	.set_scanout_pixmap = sna_set_scanout_pixmap,
+#endif
 };
 
 static uint32_t
@@ -2412,6 +2441,9 @@ bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 	for (i = 0; i < mode->kmode->count_connectors; i++)
 		sna_output_init(scrn, mode, i);
 
+#if HAS_PIXMAP_SHARING
+	xf86ProviderSetup(scrn, NULL, "Intel");
+#endif
 	xf86InitialConfiguration(scrn, TRUE);
 
 	return true;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 77e4e26..ff18fe3 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -195,12 +195,12 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 
 constant static inline void *sna_pixmap_get_buffer(PixmapPtr pixmap)
 {
-	return ((void **)pixmap->devPrivates)[2];
+	return ((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[2];
 }
 
 static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
 {
-	((void **)pixmap->devPrivates)[2] = ptr;
+	((void **)dixGetPrivateAddr(&pixmap->devPrivates, &sna_pixmap_key))[2] = ptr;
 }
 
 static DRI2Buffer2Ptr
@@ -841,7 +841,7 @@ sna_dri_get_pipe(DrawablePtr pDraw)
 static struct sna_dri_frame_event *
 sna_dri_window_get_chain(WindowPtr win)
 {
-	return ((void **)win->devPrivates)[1];
+	return ((void **)dixGetPrivateAddr(&win->devPrivates, &sna_window_key))[1];
 }
 
 static void
@@ -850,7 +850,7 @@ sna_dri_window_set_chain(WindowPtr win,
 {
 	DBG(("%s: head now %p\n", __FUNCTION__, chain));
 	assert(win->drawable.type == DRAWABLE_WINDOW);
-	((void **)win->devPrivates)[1] = chain;
+	((void **)dixGetPrivateAddr(&win->devPrivates, &sna_window_key))[1] = chain;
 }
 
 static void
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 2eab460..a193607 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -75,10 +75,10 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "git_version.h"
 #endif
 
-static DevPrivateKeyRec sna_pixmap_key;
-static DevPrivateKeyRec sna_gc_key;
-static DevPrivateKeyRec sna_glyph_key;
-static DevPrivateKeyRec sna_window_key;
+DevPrivateKeyRec sna_pixmap_key;
+DevPrivateKeyRec sna_gc_key;
+DevPrivateKeyRec sna_window_key;
+DevPrivateKeyRec sna_glyph_key;
 
 static Bool sna_enter_vt(VT_FUNC_ARGS_DECL);
 
@@ -343,6 +343,21 @@ static bool has_pageflipping(struct sna *sna)
 	return v > 0;
 }
 
+static void sna_setup_capabilities(ScrnInfoPtr scrn, int fd)
+{
+#if HAS_PIXMAP_SHARING && defined(DRM_CAP_PRIME)
+	uint64_t value;
+
+	scrn->capabilities = 0;
+	if (drmGetCap(fd, DRM_CAP_PRIME, &value) == 0) {
+		if (value & DRM_PRIME_CAP_EXPORT)
+			scrn->capabilities |= RR_Capability_SourceOutput | RR_Capability_SinkOffload;
+		if (value & DRM_PRIME_CAP_IMPORT)
+			scrn->capabilities |= RR_Capability_SinkOutput;
+	}
+#endif
+}
+
 /**
  * This is called before ScreenInit to do any require probing of screen
  * configuration.
@@ -442,6 +457,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (sna->Options == NULL)
 		return FALSE;
 
+	sna_setup_capabilities(scrn, fd);
+
 	intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
 
 	kgem_init(&sna->kgem, fd, sna->PciInfo, sna->info->gen);
@@ -777,22 +794,18 @@ sna_register_all_privates(void)
 	if (!dixRegisterPrivateKey(&sna_pixmap_key, PRIVATE_PIXMAP,
 				   3*sizeof(void *)))
 		return FALSE;
-	assert(sna_pixmap_key.offset == 0);
 
 	if (!dixRegisterPrivateKey(&sna_gc_key, PRIVATE_GC,
 				   sizeof(FbGCPrivate)))
 		return FALSE;
-	assert(sna_gc_key.offset == 0);
 
 	if (!dixRegisterPrivateKey(&sna_glyph_key, PRIVATE_GLYPH,
 				   sizeof(struct sna_glyph)))
 		return FALSE;
-	assert(sna_glyph_key.offset == 0);
 
 	if (!dixRegisterPrivateKey(&sna_window_key, PRIVATE_WINDOW,
 				   2*sizeof(void *)))
 		return FALSE;
-	assert(sna_window_key.offset == 0);
 
 	return TRUE;
 }
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 8cbe39c..f7331a5 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -104,9 +104,11 @@ static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char
 #define assert_pixmap_contains_box(p, b)
 #endif
 
+extern DevPrivateKeyRec sna_glyph_key;
+
 static inline struct sna_glyph *sna_glyph(GlyphPtr glyph)
 {
-	return (struct sna_glyph *)glyph->devPrivates;
+	return dixGetPrivateAddr(&glyph->devPrivates, &sna_glyph_key);
 }
 
 #define NeedsComponent(f) (PICT_FORMAT_A(f) != 0 && PICT_FORMAT_RGB(f) != 0)
commit e3ad18036b4dca29744ecfa4694006f01dd3fd18
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Sep 3 20:53:30 2012 +0100

    sna: Enable platform probing
    
    Completing commit 0768ac4d195214825137152893deb74a87fcd11e
    Author: Dave Airlie <airlied at redhat.com>
    Date:   Wed Jul 25 16:11:23 2012 +1000
    
        intel: add platform probing support.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a0beb4c..2eab460 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -371,7 +371,14 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
-	if (pEnt == NULL || pEnt->location.type != BUS_PCI)
+	if (pEnt == NULL)
+		return FALSE;
+
+	if (pEnt->location.type != BUS_PCI
+#ifdef XSERVER_PLATFORM_BUS
+	    && pEnt->location.type != BUS_PLATFORM
+#endif
+		)
 		return FALSE;
 
 	if (flags & PROBE_DETECT)
commit 3dc644b2a959fc559e1138b332ed42d7235de42f
Author: Сковорода Никита Андреевич <chalkerx at gmail.com>
Date:   Mon Sep 3 13:44:21 2012 +0400

    uxa: Fix Backlight option support.
    
    Signed-off-by: Сковорода Никита Андреевич <chalkerx at gmail.com>

diff --git a/src/intel_display.c b/src/intel_display.c
index 4bc8a7b..233c6af 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -262,7 +262,7 @@ intel_output_backlight_init(xf86OutputPtr output)
 	int i;
 
 	str = xf86GetOptValString(intel->Options, OPTION_BACKLIGHT);
-	if (str == NULL) {
+	if (str != NULL) {
 		sprintf(path, "%s/%s", BACKLIGHT_CLASS, str);
 		if (!stat(path, &buf)) {
 			intel_output->backlight_iface = backlight_interfaces[i];
commit a972affe0c78b23a119d5dd14bb4446c89122af1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 17:43:33 2012 +0100

    sna/gen6+: Redirect fills if the destination is too large for 3D
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54134
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6d5a6ce..363e8db 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1287,7 +1287,8 @@ gen6_emit_composite_primitive_solid(struct sna *sna,
 	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 	assert(sna->render.vertex_used <= sna->render.vertex_size);
-	assert(!too_large(r->dst.x + r->width, r->dst.y + r->height));
+	assert(!too_large(op->dst.x + r->dst.x + r->width,
+			  op->dst.y + r->dst.y + r->height));
 
 	dst.p.x = r->dst.x + r->width;
 	dst.p.y = r->dst.y + r->height;
@@ -3249,15 +3250,8 @@ static inline bool prefer_blt_copy(struct sna *sna,
 		prefer_blt_bo(sna, dst_bo));
 }
 
-static inline bool
-overlaps(struct sna *sna,
-	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
-	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-	 const BoxRec *box, int n, BoxRec *extents)
+inline static void boxes_extents(const BoxRec *box, int n, BoxRec *extents)
 {
-	if (src_bo != dst_bo)
-		return false;
-
 	*extents = box[0];
 	while (--n) {
 		box++;
@@ -3272,7 +3266,18 @@ overlaps(struct sna *sna,
 		if (box->y2 > extents->y2)
 			extents->y2 = box->y2;
 	}
+}
 
+static inline bool
+overlaps(struct sna *sna,
+	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+	 const BoxRec *box, int n, BoxRec *extents)
+{
+	if (src_bo != dst_bo)
+		return false;
+
+	boxes_extents(box, n, extents);
 	return (extents->x2 + src_dx > extents->x1 + dst_dx &&
 		extents->x1 + src_dx < extents->x2 + dst_dx &&
 		extents->y2 + src_dy > extents->y1 + dst_dy &&
@@ -3667,22 +3672,21 @@ gen6_render_fill_boxes(struct sna *sna,
 		return false;
 	}
 
-	if (op <= PictOpSrc &&
-	    (prefer_blt_fill(sna, dst_bo) ||
-	     too_large(dst->drawable.width, dst->drawable.height) ||
-	     !gen6_check_dst_format(format))) {
+	if (prefer_blt_fill(sna, dst_bo) || !gen6_check_dst_format(format)) {
 		uint8_t alu = GXinvalid;
 
-		pixel = 0;
-		if (op == PictOpClear)
-			alu = GXclear;
-		else if (sna_get_pixel_from_rgba(&pixel,
-						 color->red,
-						 color->green,
-						 color->blue,
-						 color->alpha,
-						 format))
-			alu = GXcopy;
+		if (op <= PictOpSrc) {
+			pixel = 0;
+			if (op == PictOpClear)
+				alu = GXclear;
+			else if (sna_get_pixel_from_rgba(&pixel,
+							 color->red,
+							 color->green,
+							 color->blue,
+							 color->alpha,
+							 format))
+				alu = GXcopy;
+		}
 
 		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
@@ -3692,10 +3696,6 @@ gen6_render_fill_boxes(struct sna *sna,
 
 		if (!gen6_check_dst_format(format))
 			return false;
-
-		if (too_large(dst->drawable.width, dst->drawable.height))
-			return sna_tiling_fill_boxes(sna, op, format, color,
-						     dst, dst_bo, box, n);
 	}
 
 	if (op == PictOpClear) {
@@ -3720,6 +3720,19 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
 
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(dst->drawable.width, dst->drawable.height)) {
+		BoxRec extents;
+
+		boxes_extents(box, n, &extents);
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			return sna_tiling_fill_boxes(sna, op, format, color,
+						     dst, dst_bo, box, n);
+	}
+
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
 	tmp.mask.bo = NULL;
 
@@ -3767,6 +3780,7 @@ gen6_render_fill_boxes(struct sna *sna,
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	sna_render_composite_redirect_done(sna, &tmp);
 	return true;
 }
 
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ffe41cf..0cc4cba 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1401,7 +1401,8 @@ gen7_emit_composite_primitive_solid(struct sna *sna,
 	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
 	assert(sna->render.vertex_used <= sna->render.vertex_size);
-	assert(!too_large(r->dst.x + r->width, r->dst.y + r->height));
+	assert(!too_large(op->dst.x + r->dst.x + r->width,
+			  op->dst.y + r->dst.y + r->height));
 
 	dst.p.x = r->dst.x + r->width;
 	dst.p.y = r->dst.y + r->height;
@@ -3338,15 +3339,8 @@ static inline bool prefer_blt_copy(struct sna *sna,
 		prefer_blt_bo(sna, dst_bo));
 }
 
-static inline bool
-overlaps(struct sna *sna,
-	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
-	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-	 const BoxRec *box, int n, BoxRec *extents)
+inline static void boxes_extents(const BoxRec *box, int n, BoxRec *extents)
 {
-	if (src_bo != dst_bo)
-		return false;
-
 	*extents = box[0];
 	while (--n) {
 		box++;
@@ -3361,7 +3355,18 @@ overlaps(struct sna *sna,
 		if (box->y2 > extents->y2)
 			extents->y2 = box->y2;
 	}
+}
 
+static inline bool
+overlaps(struct sna *sna,
+	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+	 const BoxRec *box, int n, BoxRec *extents)
+{
+	if (src_bo != dst_bo)
+		return false;
+
+	boxes_extents(box, n, extents);
 	return (extents->x2 + src_dx > extents->x1 + dst_dx &&
 		extents->x1 + src_dx < extents->x2 + dst_dx &&
 		extents->y2 + src_dy > extents->y1 + dst_dy &&
@@ -3742,22 +3747,21 @@ gen7_render_fill_boxes(struct sna *sna,
 		return false;
 	}
 
-	if (op <= PictOpSrc &&
-	    (prefer_blt_fill(sna, dst_bo) ||
-	     too_large(dst->drawable.width, dst->drawable.height) ||
-	     !gen7_check_dst_format(format))) {
+	if (prefer_blt_fill(sna, dst_bo) || !gen7_check_dst_format(format)) {
 		uint8_t alu = GXinvalid;
 
-		pixel = 0;
-		if (op == PictOpClear)
-			alu = GXclear;
-		else if (sna_get_pixel_from_rgba(&pixel,
-						 color->red,
-						 color->green,
-						 color->blue,
-						 color->alpha,
-						 format))
-			alu = GXcopy;
+		if (op <= PictOpSrc) {
+			pixel = 0;
+			if (op == PictOpClear)
+				alu = GXclear;
+			else if (sna_get_pixel_from_rgba(&pixel,
+							 color->red,
+							 color->green,
+							 color->blue,
+							 color->alpha,
+							 format))
+				alu = GXcopy;
+		}
 
 		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
@@ -3767,10 +3771,6 @@ gen7_render_fill_boxes(struct sna *sna,
 
 		if (!gen7_check_dst_format(format))
 			return false;
-
-		if (too_large(dst->drawable.width, dst->drawable.height))
-			return sna_tiling_fill_boxes(sna, op, format, color,
-						     dst, dst_bo, box, n);
 	}
 
 	if (op == PictOpClear) {
@@ -3795,6 +3795,19 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
 
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(dst->drawable.width, dst->drawable.height)) {
+		BoxRec extents;
+
+		boxes_extents(box, n, &extents);
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			return sna_tiling_fill_boxes(sna, op, format, color,
+						     dst, dst_bo, box, n);
+	}
+
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
 	tmp.mask.bo = NULL;
 
@@ -3839,6 +3852,7 @@ gen7_render_fill_boxes(struct sna *sna,
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	sna_render_composite_redirect_done(sna, &tmp);
 	return true;
 }
 
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index e048361..9e70833 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -648,7 +648,6 @@ sna_tiling_fill_boxes(struct sna *sna,
 				int16_t dy = this.extents.y1;
 
 				assert(kgem_bo_can_blt(&sna->kgem, bo));
-				assert(bo->pitch <= 8192);
 
 				if (!sna->render.copy_boxes(sna, GXcopy,
 							     dst, dst_bo, 0, 0,
commit 18d26076c778c20eb589b638fc47fa847793f149
Author: Dave Airlie <airlied at gmail.com>
Date:   Tue Sep 4 07:19:12 2012 +1000

    intel: fix return value for no pEnt
    
    Probably never gets hit but shuold return FALSE,
    
    pointed out on irc by Lekensteyn
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 348d5df..218b583 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -477,7 +477,7 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
 	if (pEnt == NULL)
-		return NULL;
+		return FALSE;
 
 	if (pEnt->location.type != BUS_PCI
 #ifdef XSERVER_PLATFORM_BUS
commit d14ff42f2a205542df2ef723c6151d18db2bea8b
Author: Dave Airlie <airlied at redhat.com>
Date:   Thu Jul 26 10:43:29 2012 +1000

    intel: query kernel for caps to setup scrn capabilities.
    
    This queries the kernel for prime support before advertising
    the capabilities.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 65fecfc..348d5df 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -432,6 +432,25 @@ static Bool can_accelerate_blt(struct intel_screen_private *intel)
 	return TRUE;
 }
 
+static void intel_setup_capabilities(ScrnInfoPtr scrn)
+{
+#ifdef INTEL_PIXMAP_SHARING
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	uint64_t value;
+	int ret;
+
+	scrn->capabilities = 0;
+
+	ret = drmGetCap(intel->drmSubFD, DRM_CAP_PRIME, &value);
+	if (ret == 0) {
+		if (value & DRM_PRIME_CAP_EXPORT)
+			scrn->capabilities |= RR_Capability_SourceOutput | RR_Capability_SinkOffload;
+		if (value & DRM_PRIME_CAP_IMPORT)
+			scrn->capabilities |= RR_Capability_SinkOutput;
+	}
+#endif
+}
+
 /**
  * This is called before ScreenInit to do any require probing of screen
  * configuration.
@@ -524,6 +543,7 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 	if (!I830GetEarlyOptions(scrn))
 		return FALSE;
 
+	intel_setup_capabilities(scrn);
 	intel_check_chipset_option(scrn);
 	intel_check_dri_option(scrn);
 
commit 6705d8237aca90964449e4dbee97b4f62b87c28b
Author: Dave Airlie <airlied at redhat.com>
Date:   Thu Jul 26 10:37:04 2012 +1000

    intel: add pixmap tracking and scanout support. (v2)
    
    This adds support for pixmap tracking and scanout of
    alternate pixmaps.
    
    v2: do dirty updates after uxa block handler, check if kernel
    can flush vmap for us so we don't have to.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel.h b/src/intel.h
index 0b57aaf..5b0c5df 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -80,6 +80,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define MONITOR_EDID_COMPLETE_RAWDATA EDID_COMPLETE_RAWDATA
 #endif
 
+#if XF86_CRTC_VERSION >= 5
+#define INTEL_PIXMAP_SHARING 1
+#endif
+
 struct intel_pixmap {
 	dri_bo *bo;
 
@@ -347,8 +351,13 @@ typedef struct intel_screen_private {
 	struct udev_monitor *uevent_monitor;
 	InputHandlerProc uevent_handler;
 #endif
+	Bool has_prime_vmap_flush;
 } intel_screen_private;
 
+#ifndef I915_PARAM_HAS_PRIME_VMAP_FLUSH
+#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
+#endif
+
 enum {
 	DEBUG_FLUSH_BATCHES = 0x1,
 	DEBUG_FLUSH_CACHES = 0x2,
diff --git a/src/intel_display.c b/src/intel_display.c
index 6dfc8e6..4bc8a7b 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -86,6 +86,8 @@ struct intel_crtc {
 	uint32_t rotate_fb_id;
 	xf86CrtcPtr crtc;
 	struct list link;
+	PixmapPtr scanout_pixmap;
+	uint32_t scanout_fb_id;
 };
 
 struct intel_property {
@@ -378,6 +380,7 @@ intel_crtc_apply(xf86CrtcPtr crtc)
 	ScrnInfoPtr scrn = crtc->scrn;
 	struct intel_crtc *intel_crtc = crtc->driver_private;
 	struct intel_mode *mode = intel_crtc->mode;
+	intel_screen_private *intel = intel_get_screen_private(scrn);
 	xf86CrtcConfigPtr   xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
 	uint32_t *output_ids;
 	int output_count = 0;
@@ -401,13 +404,15 @@ intel_crtc_apply(xf86CrtcPtr crtc)
 		output_count++;
 	}
 
+	if (!intel_crtc->scanout_fb_id) {
 #if XORG_VERSION_CURRENT < XORG_VERSION_NUMERIC(1,5,99,0,0)
-	if (!xf86CrtcRotate(crtc, mode, rotation))
-		goto done;
+		if (!xf86CrtcRotate(crtc, mode, rotation))
+			goto done;
 #else
-	if (!xf86CrtcRotate(crtc))
-		goto done;
+		if (!xf86CrtcRotate(crtc))
+			goto done;
 #endif
+	}
 
 #if XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,7,0,0,0)
 	crtc->funcs->gamma_set(crtc, crtc->gamma_red, crtc->gamma_green,
@@ -421,6 +426,10 @@ intel_crtc_apply(xf86CrtcPtr crtc)
 		fb_id = intel_crtc->rotate_fb_id;
 		x = 0;
 		y = 0;
+	} else if (intel_crtc->scanout_fb_id && intel_crtc->scanout_pixmap->drawable.width >= crtc->mode.HDisplay && intel_crtc->scanout_pixmap->drawable.height >= crtc->mode.VDisplay) {
+		fb_id = intel_crtc->scanout_fb_id;
+		x = 0;
+		y = 0;
 	}
 	ret = drmModeSetCrtc(mode->fd, crtc_id(intel_crtc),
 			     fb_id, x, y, output_ids, output_count,
@@ -684,6 +693,42 @@ intel_crtc_destroy(xf86CrtcPtr crtc)
 	crtc->driver_private = NULL;
 }
 
+#ifdef INTEL_PIXMAP_SHARING
+static Bool
+intel_set_scanout_pixmap(xf86CrtcPtr crtc, PixmapPtr ppix)
+{
+	struct intel_crtc *intel_crtc = crtc->driver_private;
+	ScrnInfoPtr scrn = crtc->scrn;
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	dri_bo *bo;
+	int ret;
+
+	if (ppix == intel_crtc->scanout_pixmap)
+		return TRUE;
+
+	if (!ppix) {
+		intel_crtc->scanout_pixmap = NULL;
+		if (intel_crtc->scanout_fb_id) {
+			drmModeRmFB(intel->drmSubFD, intel_crtc->scanout_fb_id);
+			intel_crtc->scanout_fb_id = 0;
+		}
+		return TRUE;
+	}
+
+	bo = intel_get_pixmap_bo(ppix);
+	if (intel->front_buffer) {
+		ErrorF("have front buffer\n");
+	}
+
+	intel_crtc->scanout_pixmap = ppix;
+	ret = drmModeAddFB(intel->drmSubFD, ppix->drawable.width,
+			   ppix->drawable.height, ppix->drawable.depth,
+			   ppix->drawable.bitsPerPixel, ppix->devKind,
+			   bo->handle, &intel_crtc->scanout_fb_id);
+	return TRUE;
+}
+#endif
+
 static const xf86CrtcFuncsRec intel_crtc_funcs = {
 	.dpms = intel_crtc_dpms,
 	.set_mode_major = intel_crtc_set_mode_major,
@@ -697,6 +742,9 @@ static const xf86CrtcFuncsRec intel_crtc_funcs = {
 	.shadow_destroy = intel_crtc_shadow_destroy,
 	.gamma_set = intel_crtc_gamma_set,
 	.destroy = intel_crtc_destroy,
+#ifdef INTEL_PIXMAP_SHARING
+	.set_scanout_pixmap = intel_set_scanout_pixmap,
+#endif
 };
 
 static void
@@ -1662,6 +1710,10 @@ Bool intel_mode_pre_init(ScrnInfoPtr scrn, int fd, int cpp)
 	for (i = 0; i < mode->mode_res->count_connectors; i++)
 		intel_output_init(scrn, mode, i);
 
+#ifdef INTEL_PIXMAP_SHARING
+	xf86ProviderSetup(scrn, NULL, "Intel");
+#endif
+
 	xf86InitialConfiguration(scrn, TRUE);
 
 	mode->event_context.version = DRM_EVENT_CONTEXT_VERSION;
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 76d56d9..65fecfc 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -385,6 +385,11 @@ static Bool has_relaxed_fencing(struct intel_screen_private *intel)
 	return drm_has_boolean_param(intel, I915_PARAM_HAS_RELAXED_FENCING);
 }
 
+static Bool has_prime_vmap_flush(struct intel_screen_private *intel)
+{
+	return drm_has_boolean_param(intel, I915_PARAM_HAS_PRIME_VMAP_FLUSH);
+}
+
 static Bool can_accelerate_blt(struct intel_screen_private *intel)
 {
 	if (INTEL_INFO(intel)->gen == -1)
@@ -545,6 +550,8 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	intel->has_kernel_flush = has_kernel_flush(intel);
 
+	intel->has_prime_vmap_flush = has_prime_vmap_flush(intel);
+
 	intel->has_relaxed_fencing =
 		xf86ReturnOptValBool(intel->Options,
 				     OPTION_RELAXED_FENCING,
@@ -645,6 +652,51 @@ void IntelEmitInvarientState(ScrnInfoPtr scrn)
 		I915EmitInvarientState(scrn);
 }
 
+#ifdef INTEL_PIXMAP_SHARING
+static Bool
+redisplay_dirty(ScreenPtr screen, PixmapDirtyUpdatePtr dirty)
+{
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	RegionRec pixregion;
+	int was_blocked;
+
+	PixmapRegionInit(&pixregion, dirty->slave_dst->master_pixmap);
+
+	PixmapSyncDirtyHelper(dirty, &pixregion);
+	intel_batch_submit(scrn);
+	if (!intel->has_prime_vmap_flush) {
+		drm_intel_bo *bo = intel_get_pixmap_bo(dirty->slave_dst->master_pixmap);
+		was_blocked = xf86BlockSIGIO();
+		drm_intel_bo_map(bo, FALSE);
+		drm_intel_bo_unmap(bo);
+		xf86UnblockSIGIO(was_blocked);
+        }
+        DamageRegionAppend(&dirty->slave_dst->drawable, &pixregion);
+        RegionUninit(&pixregion);
+	return 0;
+}
+
+static void
+intel_dirty_update(ScreenPtr screen)
+{
+	RegionPtr region;
+	PixmapDirtyUpdatePtr ent;
+
+	if (xorg_list_is_empty(&screen->pixmap_dirty_list))
+	    return;
+
+	ErrorF("list is not empty\n");
+	xorg_list_for_each_entry(ent, &screen->pixmap_dirty_list, ent) {
+		region = DamageRegion(ent->damage);
+		if (RegionNotEmpty(region)) {
+			redisplay_dirty(screen, ent);
+			DamageEmpty(ent->damage);
+		}
+	}
+}
+#endif
+
 static void
 I830BlockHandler(BLOCKHANDLER_ARGS_DECL)
 {
@@ -661,6 +713,9 @@ I830BlockHandler(BLOCKHANDLER_ARGS_DECL)
 
 	intel_uxa_block_handler(intel);
 	intel_video_block_handler(intel);
+#ifdef INTEL_PIXMAP_SHARING
+	intel_dirty_update(screen);
+#endif
 }
 
 static Bool
@@ -906,6 +961,11 @@ I830ScreenInit(SCREEN_INIT_ARGS_DECL)
 	intel->BlockHandler = screen->BlockHandler;
 	screen->BlockHandler = I830BlockHandler;
 
+#ifdef INTEL_PIXMAP_SHARING
+	screen->StartPixmapTracking = PixmapStartDirtyTracking;
+	screen->StopPixmapTracking = PixmapStopDirtyTracking;
+#endif
+
 	if (!AddCallback(&FlushCallback, intel_flush_callback, scrn))
 		return FALSE;
 
commit 69827126abdfa289417b55fe7db8ae0535037185
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jul 25 16:22:57 2012 +1000

    intel/uxa: add pixmap sharing support.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 3745ff0..9bb0e0a 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -36,6 +36,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xaarop.h>
 #include <string.h>
 #include <errno.h>
+#include <unistd.h>
 
 #include "intel.h"
 #include "intel_glamor.h"
@@ -1040,6 +1041,10 @@ intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 		if (usage == UXA_CREATE_PIXMAP_FOR_MAP || usage & INTEL_CREATE_PIXMAP_TILING_NONE)
 			tiling = I915_TILING_NONE;
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+		if (usage == CREATE_PIXMAP_USAGE_SHARED)
+			tiling = I915_TILING_NONE;
+#endif
 		/* if tiling is off force to none */
 		if (!intel->tiling)
 			tiling = I915_TILING_NONE;
@@ -1138,7 +1143,7 @@ static Bool intel_uxa_destroy_pixmap(PixmapPtr pixmap)
 Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
-	PixmapPtr pixmap = screen->GetScreenPixmap(screen);
+	PixmapPtr pixmap;
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	dri_bo *bo = intel->front_buffer;
 
@@ -1165,6 +1170,69 @@ Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 	return TRUE;
 }
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+static Bool
+intel_uxa_share_pixmap_backing(PixmapPtr ppix, ScreenPtr slave, void **fd_handle)
+{
+	ScrnInfoPtr scrn = xf86ScreenToScrn(ppix->drawable.pScreen);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	struct intel_pixmap *priv = intel_get_pixmap_private(ppix);
+	unsigned int size, tiling, swizzle;
+	dri_bo *bo = intel_get_pixmap_bo(ppix), *newbo;
+	int stride;
+	int handle;
+
+	if (drm_intel_bo_references(intel->batch_bo, bo))
+		intel_batch_submit(intel->scrn);
+
+	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
+
+	if (tiling == I915_TILING_X) {
+	        tiling = I915_TILING_NONE;
+
+		size = intel_uxa_pixmap_compute_size(ppix, ppix->drawable.width, ppix->drawable.height, &tiling, &stride, INTEL_CREATE_PIXMAP_DRI2);
+
+		newbo = drm_intel_bo_alloc_for_render(intel->bufmgr,
+						      "pixmap",
+						      size, 0);
+
+		if (tiling != I915_TILING_NONE)
+			drm_intel_bo_set_tiling(newbo, &tiling, stride);
+		priv->stride = stride;
+		priv->tiling = tiling;
+		intel_set_pixmap_bo(ppix, newbo);
+
+		ppix->drawable.pScreen->ModifyPixmapHeader(ppix, ppix->drawable.width,
+					   ppix->drawable.height, 0, 0,
+					   stride, NULL);
+		bo = newbo;
+	}
+	drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
+	drm_intel_bo_gem_export_to_prime(bo, &handle);
+
+	*fd_handle = (void *)(long)handle;
+	return TRUE;
+}
+
+static Bool
+intel_uxa_set_shared_pixmap_backing(PixmapPtr ppix, void *fd_handle)
+{
+	ScrnInfoPtr scrn = xf86ScreenToScrn(ppix->drawable.pScreen);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	dri_bo *bo;
+	int ihandle = (int)(long)fd_handle;
+
+	/* force untiled for now */
+	bo = drm_intel_bo_gem_create_from_prime(intel->bufmgr, ihandle, 0);
+	if (!bo)
+		return FALSE;
+
+	intel_set_pixmap_bo(ppix, bo);
+	close(ihandle);
+	return TRUE;
+}
+#endif
+
 static void
 intel_limits_init(intel_screen_private *intel)
 {
@@ -1314,6 +1382,11 @@ Bool intel_uxa_init(ScreenPtr screen)
 	screen->CreatePixmap = intel_uxa_create_pixmap;
 	screen->DestroyPixmap = intel_uxa_destroy_pixmap;
 
+#ifdef CREATE_PIXMAP_USAGE_SHARED
+	screen->SharePixmapBacking = intel_uxa_share_pixmap_backing;
+	screen->SetSharedPixmapBacking = intel_uxa_set_shared_pixmap_backing;
+#endif
+
 	if (!uxa_driver_init(screen, intel->uxa_driver)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "UXA initialization failed\n");
commit 0768ac4d195214825137152893deb74a87fcd11e
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed Jul 25 16:11:23 2012 +1000

    intel: add platform probing support.
    
    This allows the driver to be loaded by the platform loading code.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index c5be679..76d56d9 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -452,7 +452,14 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
-	if (pEnt == NULL || pEnt->location.type != BUS_PCI)
+	if (pEnt == NULL)
+		return NULL;
+
+	if (pEnt->location.type != BUS_PCI
+#ifdef XSERVER_PLATFORM_BUS
+	    && pEnt->location.type != BUS_PLATFORM
+#endif
+		)
 		return FALSE;
 
 	if (flags & PROBE_DETECT)
diff --git a/src/intel_module.c b/src/intel_module.c
index e5f98d4..cfd92e9 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -47,6 +47,10 @@
 #include "legacy/legacy.h"
 #include "sna/sna_module.h"
 
+#ifdef XSERVER_PLATFORM_BUS
+#include <xf86platformBus.h>
+#endif
+
 static const struct intel_device_info intel_generic_info = {
 	.gen = -1,
 };
@@ -525,6 +529,47 @@ static Bool intel_pci_probe(DriverPtr		driver,
 	}
 }
 
+#ifdef XSERVER_PLATFORM_BUS
+static Bool
+intel_platform_probe(DriverPtr driver,
+		     int entity_num, int flags,
+		     struct xf86_platform_device *dev,
+		     intptr_t match_data)
+{
+	ScrnInfoPtr scrn = NULL;
+	char *path = xf86_get_platform_device_attrib(dev, ODEV_ATTRIB_PATH);
+
+	if (!dev->pdev)
+		return FALSE;
+	/* if we get any flags we don't understand fail to probe for now */
+	if (flags)
+		return FALSE;
+
+	scrn = xf86AllocateScreen(driver, 0);
+	xf86AddEntityToScreen(scrn, entity_num);
+
+	scrn->driverVersion = INTEL_VERSION;
+	scrn->driverName = INTEL_DRIVER_NAME;
+	scrn->name = INTEL_NAME;
+	scrn->driverPrivate = (void *)(match_data | 1);
+	scrn->Probe = NULL;
+	switch (get_accel_method()) {
+#if USE_SNA
+        case SNA: sna_init_scrn(scrn, entity_num); break;
+#endif
+
+#if USE_UXA
+        case UXA: intel_init_scrn(scrn); break;
+#endif
+	default: break;
+	}
+
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "using drv %s\n", path ? path : "Default device");
+	return scrn != NULL;
+}
+#endif
+
 #ifdef XFree86LOADER
 
 static MODULESETUPPROTO(intel_setup);
@@ -569,7 +614,10 @@ static DriverRec intel = {
 	0,
 	intel_driver_func,
 	intel_device_match,
-	intel_pci_probe
+	intel_pci_probe,
+#ifdef XSERVER_PLATFORM_BUS
+	intel_platform_probe
+#endif
 };
 
 static pointer intel_setup(pointer module,
commit 164ae7a4f9fd3e538f1b506031d297088fc0c659
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 17:19:12 2012 +0100

    man: Malformed "Backlight" section
    
    Reported-by: Matthew Monaco <dgbaley27 at 0x01b.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54397
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/man/intel.man b/man/intel.man
index 9ab9fa2..0942dc1 100644
--- a/man/intel.man
+++ b/man/intel.man
@@ -132,12 +132,11 @@ have options for selecting adaptors.
 .IP
 Default: Textured video adaptor is preferred.
 .TP
-.BI "Option \*Backlight\*q \*q" string \*q
+.BI "Option \*qBacklight\*q \*q" string \*q
 Override the probed backlight control interface. Sometimes the automatically
 selected backlight interface may not correspond to the correct, or simply
 most useful, interface available on the system. This allows you to override
-by specifying the entry under /sys/class/backlight to use.
-server log.
+that choice by specifying the entry under /sys/class/backlight to use.
 .IP
 Default: Automatic selection.
 .TP
commit 67b44104168650473a24e69056707e0b35f1713e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 12:47:36 2012 +0100

    sna: Cache the temporary upload buffer when copying boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bb7cda2..0cb10bc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3889,6 +3889,9 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 
 		return count > SOURCE_BIAS;
 	} else {
+		if (w == pixmap->drawable.width && h == pixmap->drawable.height)
+			return count > SOURCE_BIAS;
+
 		return count * w*h >= (SOURCE_BIAS+2) * (int)pixmap->drawable.width * pixmap->drawable.height;
 	}
 }
@@ -4348,6 +4351,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 		if (alu != GXcopy) {
 			PixmapPtr tmp;
+			struct kgem_bo *src_bo;
 			int i;
 
 			assert(src_pixmap->drawable.depth != 1);
@@ -4363,6 +4367,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (tmp == NullPixmap)
 				return;
 
+			src_bo = sna_pixmap_get_bo(tmp);
+			assert(src_bo != NULL);
+
 			dx = -region->extents.x1;
 			dy = -region->extents.y1;
 			for (i = 0; i < n; i++) {
@@ -4389,8 +4396,13 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					   box[i].y2 - box[i].y1);
 			}
 
+			if (n == 1 &&
+			    tmp->drawable.width == src_pixmap->drawable.width &&
+			    tmp->drawable.height == src_pixmap->drawable.height)
+				kgem_proxy_bo_attach(src_bo, &src_priv->gpu_bo);
+
 			if (!sna->render.copy_boxes(sna, alu,
-						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
+						    tmp, src_bo, dx, dy,
 						    dst_pixmap, bo, 0, 0,
 						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
commit 4b558281e04e4d4febfc361632a90f8a45080c49
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 12:23:59 2012 +0100

    sna: Don't retire when searching for a snoopable buffer without hw support
    
    If the hw/kernel doesn't support snoopable buffers, then it makes little
    sense to search for one, and force a retire in the certainty of not
    finding any.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6df5a2e..cb0c82a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1375,6 +1375,9 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 
 	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
 
+	if ((kgem->has_cacheing | kgem->has_userptr) == 0)
+		return NULL;
+
 	if (list_is_empty(&kgem->snoop)) {
 		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
 		if (!__kgem_throttle_retire(kgem, flags)) {
commit d933f3a7965c9aa70f70477be3bfe94d5ded948b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 10:29:49 2012 +0100

    2.20.6 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 54f2caa..a2caaa7 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,24 @@
+Release 2.20.6 (2012-09-02)
+===========================
+A serious bug that caused a crash on SandyBridge and IvyBridge when
+mixing CPU and GPU operations on the same buffer, and an annoyance from
+bad scheduling of windowed swapbuffer updates causing low framerates and
+jitter. Plus the usual smattering of assertion fixes and a long standing
+issue with incoherent page access to a streaming buffer.
+
+ * Low frame rates in Blobby Valley when "fullscreen"
+   https://bugs.freedesktop.org/show_bug.cgi?id=54274
+
+ * Incoherent concurrent access with the CPU and GPU
+   https://bugs.freedesktop.org/show_bug.cgi
+   https://bugs.freedesktop.org/show_bug.cgi
+
+ * Add Option "Backlight" to override automatic selection of the
+   backlight interface.
+
+ * Avoid overwriting the composite operation info when testing if we
+   can transfer the operation to the BLT.
+
 Release 2.20.5 (2012-08-26)
 ===========================
 Another silly bug found, another small bugfix release. The goal was for
diff --git a/configure.ac b/configure.ac
index 114e721..c401a61 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.5],
+        [2.20.6],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 62e7e69d88f7e5c8b0f0931fe887ee0c3bddb43f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Sep 2 09:55:07 2012 +0100

    sna: Tweak placement rules if we already have a CPU bo
    
    If we have a CPU bo, consider if it may be quicker to render to it then
    create a GPU bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7c598f1..bb7cda2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2427,23 +2427,43 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 			goto use_cpu_bo;
 		}
 
-		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
-			DBG(("%s: already using CPU bo, will not force allocation\n",
-			     __FUNCTION__));
-			goto use_cpu_bo;
-		}
+		if ((flags & IGNORE_CPU) == 0) {
+			if (priv->cpu_bo) {
+				if (to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu) {
+					if (kgem_bo_is_busy(priv->cpu_bo)) {
+						DBG(("%s: already using CPU bo, will not force allocation\n",
+						     __FUNCTION__));
+						goto use_cpu_bo;
+					}
 
-		if (priv->cpu_damage && flags == 0) {
-			DBG(("%s: prefer cpu", __FUNCTION__));
-			goto use_cpu_bo;
-		}
+					if ((flags & RENDER_GPU) == 0) {
+						DBG(("%s: prefer cpu", __FUNCTION__));
+						goto use_cpu_bo;
+					}
+				} else {
+					if (kgem_bo_is_busy(priv->cpu_bo)) {
+						DBG(("%s: CPU bo active, must force allocation\n",
+						     __FUNCTION__));
+						goto create_gpu_bo;
+					}
+				}
+			}
 
-		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
-			DBG(("%s: damaged with a small operation, will not force allocation\n",
-			     __FUNCTION__));
-			goto use_cpu_bo;
+			if (priv->cpu_damage) {
+				if ((flags & (PREFER_GPU | FORCE_GPU)) == 0) {
+					DBG(("%s: prefer cpu", __FUNCTION__));
+					goto use_cpu_bo;
+				}
+
+				if (!box_inplace(pixmap, box)) {
+					DBG(("%s: damaged with a small operation, will not force allocation\n",
+					     __FUNCTION__));
+					goto use_cpu_bo;
+				}
+			}
 		}
 
+create_gpu_bo:
 		move = MOVE_WRITE | MOVE_READ;
 		if (flags & FORCE_GPU)
 			move |= __MOVE_FORCE;
commit f837807cc257fbedd35fa6101dc0b4a57e5ad78d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 1 10:36:09 2012 +0100

    sna/dri: Remove busy-wait spin from vblank chaining of swapbuffers
    
    The issue being that, due to the delay, the chained swap would miss its
    intended vblank and so cause an unwanted reduction in frame throughput
    and increase output latency even further. Since both client and server
    have other rate-limiting processes in place, we can forgo the stall here
    and still keep the clients in check.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54274
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 0210d73..77e4e26 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1268,9 +1268,6 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 		break;
 
 	case DRI2_SWAP_THROTTLE:
-		if (!sna_dri_blit_complete(sna, info))
-			return;
-
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
 		     __FUNCTION__, info->type,
 		     event->sequence, event->tv_sec, event->tv_usec));
commit b56e8c5105c858452ca4eabf15b298fc06dfd3c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 1 12:15:47 2012 +0100

    sna: Nullify gpu_bo after free in case final release is deferred in destroy
    
    As we may defer the actual release of the pixmap until after completion
    of the last shm operation, we need to make sure in that case we mark the
    GPU bo as released to prevent a use-after-free.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7dad94b..7c598f1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1073,8 +1073,10 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 	sna = to_sna_from_pixmap(pixmap);
 
 	/* Always release the gpu bo back to the lower levels of caching */
-	if (priv->gpu_bo)
+	if (priv->gpu_bo) {
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+		priv->gpu_bo = NULL;
+	}
 
 	if (priv->shm && kgem_bo_is_busy(priv->cpu_bo)) {
 		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
commit c4d994014160d8c946af731196a908991c77d9f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 1 10:44:24 2012 +0100

    sna/gen2+: Add the missing assertions in case the drawrect is invalid
    
    Only the later gen had these useful assertions, add them to the rest
    just in case.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index d2f6fe7..7d51823 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -543,6 +543,7 @@ gen2_get_batch(struct sna *sna)
 
 static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
 {
+	assert(!too_large(op->dst.width, op->dst.height));
 	assert(op->dst.bo->pitch >= 8 && op->dst.bo->pitch <= MAX_3D_PITCH);
 	assert(sna->render_state.gen2.vertex_offset == 0);
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ab94bdb..c5ec9bc 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1341,6 +1341,8 @@ static void gen3_emit_target(struct sna *sna,
 {
 	struct gen3_render_state *state = &sna->render_state.gen3;
 
+	assert(!too_large(width, height));
+
 	/* BUF_INFO is an implicit flush, so skip if the target is unchanged. */
 	assert(bo->unique_id != 0);
 	if (bo->unique_id != state->current_dst) {
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index e732810..d8b76a1 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1283,6 +1283,9 @@ gen4_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op)
 	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
 	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
 
+	assert(!too_large(op->dst.x, op->dst.y));
+	assert(!too_large(op->dst.width, op->dst.height));
+
 	if (sna->render_state.gen4.drawrect_limit == limit &&
 	    sna->render_state.gen4.drawrect_offset == offset)
 		return;
commit 25c1b478a957ceb0474e7121840b7eed1ba2e140
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Sep 1 09:49:34 2012 +0100

    sna: Discard cached CPU upload early when clearing with a solid fill
    
    Otherwise we end up considering the GPU bo as a real target, causing
    confusion and failed asserts.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 090da91..7dad94b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4153,6 +4153,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		DBG(("%s: overwritting CPU damage\n", __FUNCTION__));
 		if (region_subsumes_damage(region, dst_priv->cpu_damage)) {
 			DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
+			if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
+				kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
+				dst_priv->gpu_bo = NULL;
+			}
 			sna_damage_destroy(&dst_priv->cpu_damage);
 			list_del(&dst_priv->list);
 		}
@@ -11580,6 +11584,10 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		    region_is_singular(gc->pCompositeClip)) {
 			if (region_subsumes_damage(&region, priv->cpu_damage)) {
 				DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
+				if (priv->gpu_bo && priv->gpu_bo->proxy) {
+					kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+					priv->gpu_bo = NULL;
+				}
 				sna_damage_destroy(&priv->cpu_damage);
 				list_del(&priv->list);
 			}
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index d47479b..65dae9c 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -829,6 +829,10 @@ sna_composite_rectangles(CARD8		 op,
 		if (priv->cpu_damage &&
 		    region_subsumes_damage(&region, priv->cpu_damage)) {
 			DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
+			if (priv->gpu_bo && priv->gpu_bo->proxy) {
+				kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+				priv->gpu_bo = NULL;
+			}
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 		}
commit 530b1d1516595cf14c5112d8833b870cd50eca46
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 30 22:21:57 2012 +0100

    sna/dri: Use common routine for testing busyness after flush
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 5fa17d7..0210d73 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1206,24 +1206,21 @@ static void chain_swap(struct sna *sna,
 static bool sna_dri_blit_complete(struct sna *sna,
 				  struct sna_dri_frame_event *info)
 {
-	if (info->bo && kgem_bo_is_busy(info->bo)) {
-		kgem_retire(&sna->kgem);
-		if (kgem_bo_is_busy(info->bo)) {
-			drmVBlank vbl;
+	if (info->bo && __kgem_bo_is_busy(&sna->kgem, info->bo)) {
+		drmVBlank vbl;
 
-			DBG(("%s: vsync'ed blit is still busy, postponing\n",
-			     __FUNCTION__));
+		DBG(("%s: vsync'ed blit is still busy, postponing\n",
+		     __FUNCTION__));
 
-			VG_CLEAR(vbl);
-			vbl.request.type =
-				DRM_VBLANK_RELATIVE |
-				DRM_VBLANK_EVENT |
-				pipe_select(info->pipe);
-			vbl.request.sequence = 1;
-			vbl.request.signal = (unsigned long)info;
-			if (!sna_wait_vblank(sna, &vbl))
-				return false;
-		}
+		VG_CLEAR(vbl);
+		vbl.request.type =
+			DRM_VBLANK_RELATIVE |
+			DRM_VBLANK_EVENT |
+			pipe_select(info->pipe);
+		vbl.request.sequence = 1;
+		vbl.request.signal = (unsigned long)info;
+		if (!sna_wait_vblank(sna, &vbl))
+			return false;
 	}
 
 	return true;
commit 00d8c776b3607dbdab32c1126f91a7a38b8065f6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 30 21:47:17 2012 +0100

    sna/dri: Hold a reference to the vsync blit
    
    Fixes regression from
    
    commit 96a921487ef00db03a12bec7b0821410d6b74c31
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Mon Aug 27 21:50:32 2012 +0100
    
        sna: Track outstanding requests per-ring
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ed5e342..6df5a2e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1258,8 +1258,10 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 {
 	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
 
+	assert(bo->refcnt == 0);
 	assert(bo->reusable);
 	assert(bo->rq == NULL);
+	assert(bo->exec == NULL);
 	assert(bo->domain != DOMAIN_GPU);
 	assert(!kgem_busy(kgem, bo->handle));
 	assert(!bo->proxy);
@@ -1544,6 +1546,7 @@ destroy:
 
 static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 {
+	assert(bo->refcnt);
 	if (--bo->refcnt == 0)
 		__kgem_bo_destroy(kgem, bo);
 }
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 06a940b..5fa17d7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -616,7 +616,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		if (flush) { /* STAT! */
 			struct kgem_request *rq = sna->kgem.next_request;
 			kgem_submit(&sna->kgem);
-			bo = rq->bo;
+			bo = kgem_bo_reference(rq->bo);
 		}
 	}
 
commit b2a6c74c2159c9968c19400d61a11f4773724b4a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 29 14:51:40 2012 +0100

    sna: Remove inconsistent assertion
    
    As we now may not prefer to use the GPU even if all-damaged and clear,
    asserting that if we choose to use the CPU if clear is now bogus.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9c493c2..090da91 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2379,6 +2379,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
 		DBG(("%s: cached upload proxy, discard and revert to GPU\n",
 		     __FUNCTION__));
+		assert(priv->gpu_damage == NULL);
 		kgem_bo_destroy(&to_sna_from_pixmap(pixmap)->kgem,
 				priv->gpu_bo);
 		priv->gpu_bo = NULL;
@@ -2548,7 +2549,6 @@ use_gpu_bo:
 	return priv->gpu_bo;
 
 use_cpu_bo:
-	assert(!priv->clear);
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
commit deaa1cac269be03f4ec44092f70349ff466d59de
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 28 22:23:22 2012 +0100

    sna: Align active upload buffers to the next page for reuse
    
    If we write to the same page as it already active on the GPU then
    despite the invalidation performed at the beginning of each batch, we do
    not seem to correctly sample the new data.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=51422
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52299
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e51bbc0..ed5e342 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1910,13 +1910,18 @@ static void kgem_finish_buffers(struct kgem *kgem)
 		}
 
 		if (bo->mmapped) {
+			int used;
+
 			assert(!bo->need_io);
+
+			used = ALIGN(bo->used + PAGE_SIZE-1, PAGE_SIZE);
 			if (!DBG_NO_UPLOAD_ACTIVE &&
-			    bo->used + PAGE_SIZE <= bytes(&bo->base) &&
+			    used + PAGE_SIZE <= bytes(&bo->base) &&
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
 				assert(!bo->base.snoop);
+				bo->used = used;
 				list_move(&bo->base.list,
 					  &kgem->active_buffers);
 				continue;
commit 9e0305a3928f253ded6c8e141a4dd42be3952eb9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 28 20:49:20 2012 +0100

    sna: Discard inplace flag if we create a CPU map for the upload buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ba8b0b9..e51bbc0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4300,6 +4300,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
 			assert(!bo->base.snoop);
+			assert(!IS_CPU_MAP(bo->base.map) || kgem->has_llc);
 
 			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
@@ -4435,9 +4436,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->mmapped);
 			assert(bo->base.refcnt == 1);
 
-			bo->mem = kgem_bo_map__gtt(kgem, &bo->base);
+			bo->mem = kgem_bo_map(kgem, &bo->base);
 			if (bo->mem) {
 				alloc = num_pages(&bo->base);
+				if (IS_CPU_MAP(bo->base.map))
+				    flags &= ~KGEM_BUFFER_INPLACE;
 				goto init;
 			} else {
 				bo->base.refcnt = 0;
commit f3e122554e88da0351bfb9a7a722f0715553689b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 28 21:27:04 2012 +0100

    sna: Propagate the request to flush rather than directly submit the batch
    
    The subtly is that we need to reset the mode correctly after
    submitting the batch which was not handled by kgem_flush(). If we fail
    to set the appropriate mode then the next operation will be on a random
    ring, which can prove fatal with SandyBridge+.
    
    Reported-by: Reinis Danne <reinis.danne at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 520f0b2..ba8b0b9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3480,7 +3480,8 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	int num_exec = 0;
 	int num_pages = 0;
 
-	kgem_flush(kgem);
+	if (kgem_flush(kgem))
+		return false;
 
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
@@ -3516,7 +3517,8 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 {
 	uint32_t size;
 
-	kgem_flush(kgem);
+	if (kgem_flush(kgem))
+		return false;
 
 	while (bo->proxy)
 		bo = bo->proxy;
@@ -3561,7 +3563,8 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 	int num_pages = 0;
 	int fenced_size = 0;
 
-	kgem_flush(kgem);
+	if (kgem_flush(kgem))
+		return false;
 
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index a72fe42..d72db55 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -279,10 +279,9 @@ static inline void kgem_submit(struct kgem *kgem)
 		_kgem_submit(kgem);
 }
 
-static inline void kgem_flush(struct kgem *kgem)
+static inline bool kgem_flush(struct kgem *kgem)
 {
-	if (kgem->flush && kgem_is_idle(kgem))
-		_kgem_submit(kgem);
+	return kgem->flush && kgem_is_idle(kgem);
 }
 
 static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
commit b5c8efe4309248e62d94d80b37a70775284ae985
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 28 08:33:00 2012 +0100

    sna: Make sure we reset the domain tracking when exporting DRI bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cd16ffe..520f0b2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3905,6 +3905,8 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* The bo is outside of our control, so presume it is written to */
 	bo->needs_flush = true;
+	if (bo->domain != DOMAIN_GPU)
+		bo->domain = DOMAIN_NONE;
 
 	/* Henceforth, we need to broadcast all updates to clients and
 	 * flush our rendering before doing so.
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 8967ed9..06a940b 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -426,6 +426,8 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 
 	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 	priv->gpu_bo = ref(bo);
+	if (bo->domain != DOMAIN_GPU)
+		bo->domain = DOMAIN_NONE;
 
 	/* Post damage on the new front buffer so that listeners, such
 	 * as DisplayLink know take a copy and shove it over the USB.
commit 96a921487ef00db03a12bec7b0821410d6b74c31
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 21:50:32 2012 +0100

    sna: Track outstanding requests per-ring
    
    In order to properly track when the GPU is idle, we need to account for
    the completion order that may differ on architectures like SandyBridge
    with multiple mostly independent rings.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54127
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index dc7c95d..cd16ffe 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -353,7 +353,8 @@ kgem_busy(struct kgem *kgem, int handle)
 	busy.handle = handle;
 	busy.busy = !kgem->wedged;
 	(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
-	DBG(("%s: handle=%d, busy=%d, wedged=%d\n", busy.busy, kgem->wedged));
+	DBG(("%s: handle=%d, busy=%d, wedged=%d\n",
+	     __FUNCTION__, handle, busy.busy, kgem->wedged));
 
 	return busy.busy;
 }
@@ -551,6 +552,7 @@ static struct kgem_request *__kgem_request_alloc(void)
 
 	list_init(&rq->buffers);
 	rq->bo = NULL;
+	rq->ring = 0;
 
 	return rq;
 }
@@ -849,9 +851,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
 	     kgem->half_cpu_cache_pages));
 
+	list_init(&kgem->requests[0]);
+	list_init(&kgem->requests[1]);
 	list_init(&kgem->batch_buffers);
 	list_init(&kgem->active_buffers);
-	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->large);
 	list_init(&kgem->snoop);
@@ -1630,106 +1633,122 @@ static bool kgem_retire__requests(struct kgem *kgem)
 {
 	struct kgem_bo *bo;
 	bool retired = false;
+	int n;
 
-	while (!list_is_empty(&kgem->requests)) {
-		struct kgem_request *rq;
-
-		rq = list_first_entry(&kgem->requests,
-				      struct kgem_request,
-				      list);
-		if (kgem_busy(kgem, rq->bo->handle))
-			break;
-
-		DBG(("%s: request %d complete\n",
-		     __FUNCTION__, rq->bo->handle));
+	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
+		while (!list_is_empty(&kgem->requests[n])) {
+			struct kgem_request *rq;
 
-		while (!list_is_empty(&rq->buffers)) {
-			bo = list_first_entry(&rq->buffers,
-					      struct kgem_bo,
-					      request);
+			rq = list_first_entry(&kgem->requests[n],
+					      struct kgem_request,
+					      list);
+			if (kgem_busy(kgem, rq->bo->handle))
+				break;
 
-			assert(bo->rq == rq);
-			assert(bo->exec == NULL);
-			assert(bo->domain == DOMAIN_GPU);
+			DBG(("%s: request %d complete\n",
+			     __FUNCTION__, rq->bo->handle));
 
-			list_del(&bo->request);
+			while (!list_is_empty(&rq->buffers)) {
+				bo = list_first_entry(&rq->buffers,
+						      struct kgem_bo,
+						      request);
 
-			if (bo->needs_flush)
-				bo->needs_flush = kgem_busy(kgem, bo->handle);
-			if (bo->needs_flush) {
-				DBG(("%s: moving %d to flushing\n",
-				     __FUNCTION__, bo->handle));
-				list_add(&bo->request, &kgem->flushing);
-				bo->rq = &_kgem_static_request;
-			} else {
-				bo->domain = DOMAIN_NONE;
-				bo->rq = NULL;
-			}
+				assert(bo->rq == rq);
+				assert(bo->exec == NULL);
+				assert(bo->domain == DOMAIN_GPU);
 
-			if (bo->refcnt)
-				continue;
+				list_del(&bo->request);
 
-			if (bo->snoop) {
+				if (bo->needs_flush)
+					bo->needs_flush = kgem_busy(kgem, bo->handle);
 				if (bo->needs_flush) {
+					DBG(("%s: moving %d to flushing\n",
+					     __FUNCTION__, bo->handle));
 					list_add(&bo->request, &kgem->flushing);
 					bo->rq = &_kgem_static_request;
 				} else {
-					kgem_bo_move_to_snoop(kgem, bo);
+					bo->domain = DOMAIN_NONE;
+					bo->rq = NULL;
 				}
-				continue;
-			}
 
-			if (!bo->reusable) {
-				DBG(("%s: closing %d\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_free(kgem, bo);
-				continue;
-			}
+				if (bo->refcnt)
+					continue;
 
-			if (!bo->needs_flush) {
-				if (kgem_bo_set_purgeable(kgem, bo)) {
-					kgem_bo_move_to_inactive(kgem, bo);
-					retired = true;
-				} else {
+				if (bo->snoop) {
+					if (bo->needs_flush) {
+						list_add(&bo->request, &kgem->flushing);
+						bo->rq = &_kgem_static_request;
+					} else {
+						kgem_bo_move_to_snoop(kgem, bo);
+					}
+					continue;
+				}
+
+				if (!bo->reusable) {
 					DBG(("%s: closing %d\n",
 					     __FUNCTION__, bo->handle));
 					kgem_bo_free(kgem, bo);
+					continue;
+				}
+
+				if (!bo->needs_flush) {
+					if (kgem_bo_set_purgeable(kgem, bo)) {
+						kgem_bo_move_to_inactive(kgem, bo);
+						retired = true;
+					} else {
+						DBG(("%s: closing %d\n",
+						     __FUNCTION__, bo->handle));
+						kgem_bo_free(kgem, bo);
+					}
 				}
 			}
-		}
 
-		assert(rq->bo->rq == NULL);
-		assert(list_is_empty(&rq->bo->request));
+			assert(rq->bo->rq == NULL);
+			assert(list_is_empty(&rq->bo->request));
 
-		if (--rq->bo->refcnt == 0) {
-			if (kgem_bo_set_purgeable(kgem, rq->bo)) {
-				kgem_bo_move_to_inactive(kgem, rq->bo);
-				retired = true;
-			} else {
-				DBG(("%s: closing %d\n",
-				     __FUNCTION__, rq->bo->handle));
-				kgem_bo_free(kgem, rq->bo);
+			if (--rq->bo->refcnt == 0) {
+				if (kgem_bo_set_purgeable(kgem, rq->bo)) {
+					kgem_bo_move_to_inactive(kgem, rq->bo);
+					retired = true;
+				} else {
+					DBG(("%s: closing %d\n",
+					     __FUNCTION__, rq->bo->handle));
+					kgem_bo_free(kgem, rq->bo);
+				}
 			}
+
+			__kgem_request_free(rq);
+			kgem->num_requests--;
 		}
 
-		__kgem_request_free(rq);
+#if HAS_DEBUG_FULL
+		{
+			int count = 0;
+
+			list_for_each_entry(bo, &kgem->requests[n], request)
+				count++;
+
+			bo = NULL;
+			if (!list_is_empty(&kgem->requests[n]))
+				bo = list_first_entry(&kgem->requests[n],
+						      struct kgem_request,
+						      list)->bo;
+
+			ErrorF("%s: ring=%d, %d outstanding requests, oldest=%d\n",
+			       __FUNCTION__, n, count, bo ? bo->handle : 0);
+		}
+#endif
 	}
 
 #if HAS_DEBUG_FULL
 	{
 		int count = 0;
 
-		list_for_each_entry(bo, &kgem->requests, request)
-			count++;
-
-		bo = NULL;
-		if (!list_is_empty(&kgem->requests))
-			bo = list_first_entry(&kgem->requests,
-					      struct kgem_request,
-					      list)->bo;
+		for (n = 0; n < ARRAY_SIZE(kgem->requests); n++)
+			list_for_each_entry(bo, &kgem->requests[n], request)
+				count++;
 
-		ErrorF("%s: %d outstanding requests, oldest=%d\n",
-		       __FUNCTION__, count, bo ? bo->handle : 0);
+		assert(count == kgem->num_requests);
 	}
 #endif
 
@@ -1743,11 +1762,12 @@ bool kgem_retire(struct kgem *kgem)
 	DBG(("%s\n", __FUNCTION__));
 
 	retired |= kgem_retire__flushing(kgem);
-	retired |= kgem_retire__requests(kgem);
+	if (kgem->num_requests)
+		retired |= kgem_retire__requests(kgem);
 	retired |= kgem_retire__buffers(kgem);
 
 	kgem->need_retire =
-		!list_is_empty(&kgem->requests) ||
+		kgem->num_requests ||
 		!list_is_empty(&kgem->flushing);
 	DBG(("%s -- retired=%d, need_retire=%d\n",
 	     __FUNCTION__, retired, kgem->need_retire));
@@ -1759,20 +1779,29 @@ bool kgem_retire(struct kgem *kgem)
 
 bool __kgem_is_idle(struct kgem *kgem)
 {
-	struct kgem_request *rq;
+	int n;
 
-	assert(!list_is_empty(&kgem->requests));
+	assert(kgem->num_requests);
 
-	rq = list_last_entry(&kgem->requests, struct kgem_request, list);
-	if (kgem_busy(kgem, rq->bo->handle)) {
-		DBG(("%s: last requests handle=%d still busy\n",
-		     __FUNCTION__, rq->bo->handle));
-		return false;
-	}
+	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
+		struct kgem_request *rq;
+
+		if (list_is_empty(&kgem->requests[n]))
+			continue;
+
+		rq = list_last_entry(&kgem->requests[n],
+				     struct kgem_request, list);
+		if (kgem_busy(kgem, rq->bo->handle)) {
+			DBG(("%s: last requests handle=%d still busy\n",
+			     __FUNCTION__, rq->bo->handle));
+			return false;
+		}
 
-	DBG(("%s: gpu idle (handle=%d)\n", __FUNCTION__, rq->bo->handle));
+		DBG(("%s: ring=%d idle (handle=%d)\n",
+		     __FUNCTION__, n, rq->bo->handle));
+	}
 	kgem_retire__requests(kgem);
-	assert(list_is_empty(&kgem->requests));
+	assert(kgem->num_requests == 0);
 	return true;
 }
 
@@ -1834,8 +1863,9 @@ static void kgem_commit(struct kgem *kgem)
 
 		gem_close(kgem->fd, rq->bo->handle);
 	} else {
-		list_add_tail(&rq->list, &kgem->requests);
+		list_add_tail(&rq->list, &kgem->requests[rq->ring]);
 		kgem->need_throttle = kgem->need_retire = 1;
+		kgem->num_requests++;
 	}
 
 	kgem->next_request = NULL;
@@ -1975,31 +2005,36 @@ decouple:
 
 static void kgem_cleanup(struct kgem *kgem)
 {
-	while (!list_is_empty(&kgem->requests)) {
-		struct kgem_request *rq;
+	int n;
 
-		rq = list_first_entry(&kgem->requests,
-				      struct kgem_request,
-				      list);
-		while (!list_is_empty(&rq->buffers)) {
-			struct kgem_bo *bo;
+	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
+		while (!list_is_empty(&kgem->requests[n])) {
+			struct kgem_request *rq;
 
-			bo = list_first_entry(&rq->buffers,
-					      struct kgem_bo,
-					      request);
+			rq = list_first_entry(&kgem->requests[n],
+					      struct kgem_request,
+					      list);
+			while (!list_is_empty(&rq->buffers)) {
+				struct kgem_bo *bo;
 
-			list_del(&bo->request);
-			bo->rq = NULL;
-			bo->exec = NULL;
-			bo->domain = DOMAIN_NONE;
-			bo->dirty = false;
-			if (bo->refcnt == 0)
-				kgem_bo_free(kgem, bo);
-		}
+				bo = list_first_entry(&rq->buffers,
+						      struct kgem_bo,
+						      request);
 
-		__kgem_request_free(rq);
+				list_del(&bo->request);
+				bo->rq = NULL;
+				bo->exec = NULL;
+				bo->domain = DOMAIN_NONE;
+				bo->dirty = false;
+				if (bo->refcnt == 0)
+					kgem_bo_free(kgem, bo);
+			}
+
+			__kgem_request_free(rq);
+		}
 	}
 
+	kgem->num_requests = 0;
 	kgem_close_inactive(kgem);
 }
 
@@ -2169,6 +2204,7 @@ void _kgem_submit(struct kgem *kgem)
 		rq->bo->exec = &kgem->exec[i];
 		rq->bo->rq = rq; /* useful sanity check */
 		list_add(&rq->bo->request, &rq->buffers);
+		rq->ring = kgem->ring == KGEM_BLT;
 
 		kgem_fixup_self_relocs(kgem, rq->bo);
 
@@ -2366,12 +2402,12 @@ bool kgem_expire_cache(struct kgem *kgem)
 	}
 #ifdef DEBUG_MEMORY
 	{
-		long size = 0;
-		int count = 0;
+		long snoop_size = 0;
+		int snoop_count = 0;
 		list_for_each_entry(bo, &kgem->snoop, list)
-			count++, size += bytes(bo);
+			snoop_count++, snoop_size += bytes(bo);
 		ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
-		       __FUNCTION__, count, size);
+		       __FUNCTION__, snoop_count, snoop_size);
 	}
 #endif
 
@@ -2441,13 +2477,13 @@ bool kgem_expire_cache(struct kgem *kgem)
 
 #ifdef DEBUG_MEMORY
 	{
-		long size = 0;
-		int count = 0;
+		long inactive_size = 0;
+		int inactive_count = 0;
 		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 			list_for_each_entry(bo, &kgem->inactive[i], list)
-				count++, size += bytes(bo);
+				inactive_count++, inactive_size += bytes(bo);
 		ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
-		       __FUNCTION__, count, size);
+		       __FUNCTION__, inactive_count, inactive_size);
 	}
 #endif
 
@@ -2463,25 +2499,28 @@ bool kgem_expire_cache(struct kgem *kgem)
 void kgem_cleanup_cache(struct kgem *kgem)
 {
 	unsigned int i;
+	int n;
 
 	/* sync to the most recent request */
-	if (!list_is_empty(&kgem->requests)) {
-		struct kgem_request *rq;
-		struct drm_i915_gem_set_domain set_domain;
+	for (n = 0; n < ARRAY_SIZE(kgem->requests); n++) {
+		if (!list_is_empty(&kgem->requests[n])) {
+			struct kgem_request *rq;
+			struct drm_i915_gem_set_domain set_domain;
 
-		rq = list_first_entry(&kgem->requests,
-				      struct kgem_request,
-				      list);
+			rq = list_first_entry(&kgem->requests[n],
+					      struct kgem_request,
+					      list);
 
-		DBG(("%s: sync on cleanup\n", __FUNCTION__));
+			DBG(("%s: sync on cleanup\n", __FUNCTION__));
 
-		VG_CLEAR(set_domain);
-		set_domain.handle = rq->bo->handle;
-		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-		(void)drmIoctl(kgem->fd,
-			       DRM_IOCTL_I915_GEM_SET_DOMAIN,
-			       &set_domain);
+			VG_CLEAR(set_domain);
+			set_domain.handle = rq->bo->handle;
+			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+			(void)drmIoctl(kgem->fd,
+				       DRM_IOCTL_I915_GEM_SET_DOMAIN,
+				       &set_domain);
+		}
 	}
 
 	kgem_retire(kgem);
@@ -4802,17 +4841,3 @@ kgem_replace_bo(struct kgem *kgem,
 
 	return dst;
 }
-
-struct kgem_bo *kgem_get_last_request(struct kgem *kgem)
-{
-	struct kgem_request *rq;
-
-	if (list_is_empty(&kgem->requests))
-		return NULL;
-
-	rq = list_last_entry(&kgem->requests,
-			     struct kgem_request,
-			     list);
-
-	return kgem_bo_reference(rq->bo);
-}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 3bf0152..a72fe42 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -97,6 +97,7 @@ struct kgem_request {
 	struct list list;
 	struct kgem_bo *bo;
 	struct list buffers;
+	int ring;
 };
 
 enum {
@@ -126,8 +127,10 @@ struct kgem {
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list snoop;
 	struct list batch_buffers, active_buffers;
-	struct list requests;
+
+	struct list requests[2];
 	struct kgem_request *next_request;
+	uint32_t num_requests;
 
 	struct {
 		struct list inactive[NUM_CACHE_BUCKETS];
@@ -261,14 +264,13 @@ bool kgem_retire(struct kgem *kgem);
 bool __kgem_is_idle(struct kgem *kgem);
 static inline bool kgem_is_idle(struct kgem *kgem)
 {
-	if (list_is_empty(&kgem->requests)) {
+	if (kgem->num_requests == 0) {
 		DBG(("%s: no outstanding requests\n", __FUNCTION__));
 		return true;
 	}
 
 	return __kgem_is_idle(kgem);
 }
-struct kgem_bo *kgem_get_last_request(struct kgem *kgem);
 
 void _kgem_submit(struct kgem *kgem);
 static inline void kgem_submit(struct kgem *kgem)
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 3e8f5f9..8967ed9 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -612,8 +612,9 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 		DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 		if (flush) { /* STAT! */
+			struct kgem_request *rq = sna->kgem.next_request;
 			kgem_submit(&sna->kgem);
-			bo = kgem_get_last_request(&sna->kgem);
+			bo = rq->bo;
 		}
 	}
 
commit 26c731efc2048663b6a19a7ed7db0e94243ab30f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 20:50:08 2012 +0100

    sna: Ensure that we create a GTT mapping for the inplace upload buffer
    
    As the code will optimistically convert a request for a GTT mapping into
    a CPU mapping if the object is still in the CPU domain, we need to
    overrule that in this case where we explicitly want to write directly
    into the GTT and furthermore keep the buffer around in an upload cache.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=51422
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52299
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c9ab2c2..dc7c95d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4377,6 +4377,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old) {
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
+			assert(kgem_bo_is_mappable(kgem, old));
+			assert(!old->snoop);
+			assert(old->rq == NULL);
 
 			bo = buffer_alloc();
 			if (bo == NULL)
@@ -4388,7 +4391,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->mmapped);
 			assert(bo->base.refcnt == 1);
 
-			bo->mem = kgem_bo_map(kgem, &bo->base);
+			bo->mem = kgem_bo_map__gtt(kgem, &bo->base);
 			if (bo->mem) {
 				alloc = num_pages(&bo->base);
 				goto init;
@@ -4398,6 +4401,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 	}
+#else
+	flags &= ~KGEM_BUFFER_INPLACE;
 #endif
 	/* Be more parsimonious with pwrite/pread/cacheable buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
commit 2cbf88980ede50370b97f32e565dea33db16ac44
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 20:07:17 2012 +0100

    sna: Force the stall if using a busy ShmPixmap bo with PutImage
    
    As we will stall in the near future to serialise access with the
    ShmPixmap, we may as well stall first and do a simple copy using the
    CPU in this highly unlikely scenario.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 06b7e26..9c493c2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3311,59 +3311,28 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		 * However, we can queue some writes to the GPU bo to avoid
 		 * the wait. Or we can try to replace the CPU bo.
 		 */
-		if (__kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
-			if (priv->cpu_bo->flush) {
-				if (sna_put_image_upload_blt(drawable, gc, region,
-							     x, y, w, h, bits, stride)) {
-					if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
-						if (region_subsumes_drawable(region, &pixmap->drawable))
-							sna_damage_destroy(&priv->cpu_damage);
-						else
-							sna_damage_subtract(&priv->cpu_damage, region);
-						if (priv->cpu_damage == NULL) {
-							sna_damage_all(&priv->gpu_damage,
-								       pixmap->drawable.width,
-								       pixmap->drawable.height);
-							list_del(&priv->list);
-							priv->undamaged = false;
-						} else
-							sna_damage_add(&priv->gpu_damage, region);
-					}
-
-					assert_pixmap_damage(pixmap);
-					priv->clear = false;
-					priv->cpu = false;
-					return true;
-				}
-			} else {
-				DBG(("%s: cpu bo will stall, upload damage and discard\n",
-				     __FUNCTION__));
-				if (priv->cpu_damage) {
-					if (!region_subsumes_drawable(region, &pixmap->drawable)) {
-						sna_damage_subtract(&priv->cpu_damage, region);
-						if (!sna_pixmap_move_to_gpu(pixmap,
-									    MOVE_WRITE))
-							return false;
-					} else {
-						sna_damage_destroy(&priv->cpu_damage);
-						priv->undamaged = false;
-					}
-				}
-				if (priv->undamaged) {
-					sna_damage_all(&priv->gpu_damage,
-						       pixmap->drawable.width,
-						       pixmap->drawable.height);
-					list_del(&priv->list);
+		if (!priv->shm && __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
+			assert(!priv->cpu_bo->flush);
+			DBG(("%s: cpu bo will stall, upload damage and discard\n",
+			     __FUNCTION__));
+			if (priv->cpu_damage) {
+				if (!region_subsumes_drawable(region, &pixmap->drawable)) {
+					sna_damage_subtract(&priv->cpu_damage, region);
+					if (!sna_pixmap_move_to_gpu(pixmap,
+								    MOVE_WRITE))
+						return false;
+				} else {
+					sna_damage_destroy(&priv->cpu_damage);
 					priv->undamaged = false;
 				}
-				assert(!priv->cpu_bo->flush);
-				assert(!priv->shm);
-				sna_pixmap_free_cpu(sna, priv);
+				assert(priv->cpu_damage == NULL);
 			}
+			if (!priv->undamaged)
+				sna_damage_all(&priv->gpu_damage,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height);
+			sna_pixmap_free_cpu(sna, priv);
 		}
-
-		if (priv->cpu_bo)
-			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
 
 	if (priv->mapped) {
@@ -3376,15 +3345,15 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, false))
 		return true;
 
+	if (priv->cpu_bo) {
+		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
+		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+	}
+
 	if (priv->clear) {
 		DBG(("%s: applying clear [%08x]\n",
 		     __FUNCTION__, priv->clear_color));
 
-		if (priv->cpu_bo) {
-			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
-			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
-		}
-
 		if (priv->clear_color == 0) {
 			memset(pixmap->devPrivate.ptr,
 			       0, pixmap->devKind * pixmap->drawable.height);
@@ -3403,8 +3372,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			       pixmap->drawable.height);
 		sna_pixmap_free_gpu(sna, priv);
 		priv->undamaged = false;
-		priv->clear = false;
-		priv->cpu = false;
 	}
 
 	if (!DAMAGE_IS_ALL(priv->cpu_damage)) {
commit 705103d77e6b80d796a4535cade96cb6e9ebece3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 20:07:19 2012 +0100

    sna: Add a modicum of DBG to kgem_is_idle()
    
    Print out the handle of the bo we just checked.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 92ab02f..c9ab2c2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -353,6 +353,7 @@ kgem_busy(struct kgem *kgem, int handle)
 	busy.handle = handle;
 	busy.busy = !kgem->wedged;
 	(void)drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+	DBG(("%s: handle=%d, busy=%d, wedged=%d\n", busy.busy, kgem->wedged));
 
 	return busy.busy;
 }
@@ -1769,7 +1770,7 @@ bool __kgem_is_idle(struct kgem *kgem)
 		return false;
 	}
 
-	DBG(("%s: gpu idle\n", __FUNCTION__));
+	DBG(("%s: gpu idle (handle=%d)\n", __FUNCTION__, rq->bo->handle));
 	kgem_retire__requests(kgem);
 	assert(list_is_empty(&kgem->requests));
 	return true;
commit 8218e5da2b177ca9cd0e2b1e7dbe114e5ef2ebf0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 19:36:03 2012 +0100

    sna: Fix crash with broken DBG missing one of its arguments
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=54127
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4639fc4..06b7e26 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11638,7 +11638,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	/* If the source is already on the GPU, keep the operation on the GPU */
 	if (gc->fillStyle == FillTiled) {
 		if (!gc->tileIsPixel && sna_pixmap_is_gpu(gc->tile.pixmap)) {
-			DBG(("%s: source is already on the gpu\n"));
+			DBG(("%s: source is already on the gpu\n", __FUNCTION__));
 			hint |= PREFER_GPU | FORCE_GPU;
 		}
 	}
commit 593f549b45fbb1528472feed51207d84901d142e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 16:43:19 2012 +0100

    sna: A little more DBG to try and identify ratelimiting ops
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0dff41c..92ab02f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2262,7 +2262,7 @@ void _kgem_submit(struct kgem *kgem)
 			if (DEBUG_FLUSH_SYNC) {
 				struct drm_i915_gem_set_domain set_domain;
 
-				DBG(("%s: debug sync\n", __FUNCTION__));
+				DBG(("%s: debug sync, starting\n", __FUNCTION__));
 
 				VG_CLEAR(set_domain);
 				set_domain.handle = handle;
@@ -2274,6 +2274,8 @@ void _kgem_submit(struct kgem *kgem)
 					DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
 					kgem_throttle(kgem);
 				}
+
+				DBG(("%s: debug sync, completed\n", __FUNCTION__));
 			}
 		}
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 0d4d706..4493bf6 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -551,7 +551,8 @@ sna_render_pixmap_bo(struct sna *sna,
 	struct sna_pixmap *priv;
 	BoxRec box;
 
-	DBG(("%s (%d, %d)x(%d, %d)/(%d, %d)\n", __FUNCTION__,
+	DBG(("%s pixmap=%ld, (%d, %d)x(%d, %d)/(%d, %d)\n",
+	     __FUNCTION__, pixmap->drawable.serialNumber,
 	     x, y, w,h, pixmap->drawable.width, pixmap->drawable.height));
 
 	channel->width  = pixmap->drawable.width;
@@ -622,7 +623,6 @@ sna_render_pixmap_bo(struct sna *sna,
 	     channel->offset[0], channel->offset[1],
 	     pixmap->drawable.width, pixmap->drawable.height));
 
-
 	channel->bo = __sna_render_pixmap_bo(sna, pixmap, &box, false);
 	if (channel->bo == NULL) {
 		DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
commit 71ac12e9b6ed00c28993637aafd5186a2ba26256
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 15:49:59 2012 +0100

    sna: Assert that the bo is marked as 'flush' when exported to DRI clients
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 7495fb9..0dff41c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4625,8 +4625,9 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	struct kgem_buffer *bo;
 	uint32_t offset = _bo->delta, length = _bo->size.bytes;
 
+	/* We expect the caller to have already submitted the batch */
 	assert(_bo->io);
-	assert(_bo->exec == &_kgem_dummy_exec);
+	assert(_bo->exec == NULL);
 	assert(_bo->rq == NULL);
 	assert(_bo->proxy);
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4fbdd70..4639fc4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2022,6 +2022,7 @@ out:
 	if (flags & MOVE_WRITE) {
 		priv->source_count = SOURCE_BIAS;
 		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+		assert(!priv->flush || !list_is_empty(&priv->list));
 	}
 	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
@@ -3432,6 +3433,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
 		}
 	}
+	assert(!priv->flush || !list_is_empty(&priv->list));
 	priv->cpu = true;
 
 blt:
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 435d22e..3e8f5f9 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -170,6 +170,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 		return NULL;
 	}
 
+	assert(priv->cpu_damage == NULL);
 	if (priv->flush++)
 		return priv->gpu_bo;
 
@@ -189,13 +190,6 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
 
-	if (priv->gpu_bo->exec || priv->cpu_damage) {
-		DBG(("%s: marking pixmap=%ld for flushing\n",
-		     __FUNCTION__, pixmap->drawable.serialNumber));
-		list_move(&priv->list, &sna->flush_pixmaps);
-		sna->kgem.flush = true;
-	}
-
 	return priv->gpu_bo;
 }
 
@@ -340,6 +334,8 @@ sna_dri_create_buffer(DrawablePtr draw,
 		pixmap->refcnt++;
 	}
 
+	assert(bo->flush == true);
+
 	return buffer;
 
 err:
commit cf64c8ce758cfa5d3bcd1b7626ff94cce7a84636
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 13:59:56 2012 +0100

    sna: Upload PutImage inplace to a fresh GPU bo if the device doesn't snoop
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 02d4b13..4fbdd70 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3221,7 +3221,12 @@ static bool upload_inplace(struct sna *sna,
 		}
 	}
 
-	DBG(("%s? no\n", __FUNCTION__));
+	if (priv->create & (KGEM_CAN_CREATE_GPU | KGEM_CAN_CREATE_CPU) == KGEM_CAN_CREATE_GPU &&
+	    region_subsumes_drawable(region, &pixmap->drawable)) {
+		DBG(("%s? yes, will fill fresh GPU bo\n", __FUNCTION__));
+		return true;
+	}
+
 	return false;
 }
 
commit 3c6758fc4a50ecfce9ed317fec669cc48addedcf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 11:58:05 2012 +0100

    sna: Flush the batch if it references a ShmPixmap and the GPU is idle
    
    This helps minimise the stall when syncing with the GPU before sending
    the next reply to the Client.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 568d120..7495fb9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3438,6 +3438,8 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	int num_exec = 0;
 	int num_pages = 0;
 
+	kgem_flush(kgem);
+
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		if (bo->exec)
@@ -3472,6 +3474,8 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 {
 	uint32_t size;
 
+	kgem_flush(kgem);
+
 	while (bo->proxy)
 		bo = bo->proxy;
 	if (bo->exec) {
@@ -3515,6 +3519,8 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 	int num_pages = 0;
 	int fenced_size = 0;
 
+	kgem_flush(kgem);
+
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		while (bo->proxy)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index d8018b8..3bf0152 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -277,6 +277,12 @@ static inline void kgem_submit(struct kgem *kgem)
 		_kgem_submit(kgem);
 }
 
+static inline void kgem_flush(struct kgem *kgem)
+{
+	if (kgem->flush && kgem_is_idle(kgem))
+		_kgem_submit(kgem);
+}
+
 static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (bo->exec)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index fd9728c..02d4b13 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1025,10 +1025,8 @@ void sna_add_flush_pixmap(struct sna *sna,
 	assert(bo);
 	list_move(&priv->list, &sna->flush_pixmaps);
 
-	if (bo->exec == NULL && sna->kgem.need_retire)
-		kgem_retire(&sna->kgem);
-	if (bo->exec == NULL || !sna->kgem.need_retire) {
-		DBG(("%s: flush bo idle, flushing\n", __FUNCTION__));
+	if (bo->exec == NULL) {
+		DBG(("%s: new flush bo, flushin before\n", __FUNCTION__));
 		kgem_submit(&sna->kgem);
 	}
 }
commit f21079bad6e8316baf5d0295d6e7a809041bce06
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 27 11:25:28 2012 +0100

    Revert "sna: Cleanup composite redirection after substituting the BLT"
    
    This reverts commit 5a5212117e7a73ce3fffb87c60a505a849e38c36.
    
    The clean up is in effect too early, as this is during preparation and
    the actual work is already being correctly done at the end.

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 401d84a..d2f6fe7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1782,10 +1782,8 @@ gen2_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 		break;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b13b9bf..ab94bdb 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2880,10 +2880,8 @@ gen3_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 
 		gen3_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 7668caa..e732810 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2315,10 +2315,8 @@ gen4_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 
 		gen4_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 9a94421..5eff871 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2333,10 +2333,8 @@ gen5_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 
 		gen5_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 8eadda7..6d5a6ce 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2710,10 +2710,8 @@ gen6_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 
 		gen6_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ea05a41..ffe41cf 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2815,10 +2815,8 @@ gen7_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp)) {
-			sna_render_composite_redirect_done(sna, tmp);
+					       tmp))
 			return true;
-		}
 
 		gen7_composite_channel_convert(&tmp->src);
 		break;
commit 414e87255cdee6eb556703ddefd194af71b985ed
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 19:47:31 2012 +0100

    Add Option "Backlight" to override the probed backlight control interface
    
    The automatic selection may not correspond with the correct backlight
    (such as in a multi-gpu, multi-panel device) or the user may simply
    prefer another control interface. This allows them to override the
    chosen interface using
    
      Option "Backlight" "my-backlight"
    
    to specify '/sys/class/backlight/my-backlight' as the interface to use
    instead.
    
    Suggested-by: Alon Levy <alevy at redhat.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=29273
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/man/intel.man b/man/intel.man
index 5498f87..9ab9fa2 100644
--- a/man/intel.man
+++ b/man/intel.man
@@ -132,6 +132,15 @@ have options for selecting adaptors.
 .IP
 Default: Textured video adaptor is preferred.
 .TP
+.BI "Option \*Backlight\*q \*q" string \*q
+Override the probed backlight control interface. Sometimes the automatically
+selected backlight interface may not correspond to the correct, or simply
+most useful, interface available on the system. This allows you to override
+by specifying the entry under /sys/class/backlight to use.
+server log.
+.IP
+Default: Automatic selection.
+.TP
 .BI "Option \*qFallbackDebug\*q \*q" boolean \*q
 Enable printing of debugging information on acceleration fallbacks to the
 server log.
diff --git a/src/intel_display.c b/src/intel_display.c
index 6580c8c..6dfc8e6 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -41,6 +41,7 @@
 
 #include "intel.h"
 #include "intel_bufmgr.h"
+#include "intel_options.h"
 #include "xf86drm.h"
 #include "xf86drmMode.h"
 #include "X11/Xatom.h"
@@ -252,19 +253,36 @@ static void
 intel_output_backlight_init(xf86OutputPtr output)
 {
 	struct intel_output *intel_output = output->driver_private;
+	intel_screen_private *intel = intel_get_screen_private(output->scrn);
+	char path[BACKLIGHT_PATH_LEN];
+	struct stat buf;
+	char *str;
 	int i;
 
-	for (i = 0; backlight_interfaces[i] != NULL; i++) {
-		char path[BACKLIGHT_PATH_LEN];
-		struct stat buf;
+	str = xf86GetOptValString(intel->Options, OPTION_BACKLIGHT);
+	if (str == NULL) {
+		sprintf(path, "%s/%s", BACKLIGHT_CLASS, str);
+		if (!stat(path, &buf)) {
+			intel_output->backlight_iface = backlight_interfaces[i];
+			intel_output->backlight_max = intel_output_backlight_get_max(output);
+			if (intel_output->backlight_max > 0) {
+				xf86DrvMsg(output->scrn->scrnIndex, X_CONFIG,
+					   "found backlight control interface %s\n", path);
+				return;
+			}
+		}
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "unrecognised backlight control interface %s\n", str);
+	}
 
+	for (i = 0; backlight_interfaces[i] != NULL; i++) {
 		sprintf(path, "%s/%s", BACKLIGHT_CLASS, backlight_interfaces[i]);
 		if (!stat(path, &buf)) {
 			intel_output->backlight_iface = backlight_interfaces[i];
 			intel_output->backlight_max = intel_output_backlight_get_max(output);
 			if (intel_output->backlight_max > 0) {
 				intel_output->backlight_active_level = intel_output_backlight_get(output);
-				xf86DrvMsg(output->scrn->scrnIndex, X_INFO,
+				xf86DrvMsg(output->scrn->scrnIndex, X_PROBED,
 					   "found backlight control interface %s\n", path);
 				return;
 			}
diff --git a/src/intel_options.c b/src/intel_options.c
index 7dbbc7e..dcab9e7 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -7,6 +7,7 @@
 const OptionInfoRec intel_options[] = {
 	{OPTION_ACCEL_DISABLE,	"NoAccel",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_ACCEL_METHOD,	"AccelMethod",	OPTV_STRING,	{0},	0},
+	{OPTION_BACKLIGHT,	"Backlight",	OPTV_STRING,	{0},	0},
 	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	1},
 	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	0},
 	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	0},
diff --git a/src/intel_options.h b/src/intel_options.h
index 6c16a07..39c0b73 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -13,6 +13,7 @@
 enum intel_options {
 	OPTION_ACCEL_DISABLE,
 	OPTION_ACCEL_METHOD,
+	OPTION_BACKLIGHT,
 	OPTION_DRI,
 	OPTION_VIDEO_KEY,
 	OPTION_COLOR_KEY,
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b35a7cc..3ec6dc1 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -290,6 +290,31 @@ enum {
 	NAMED,
 };
 
+static char *
+has_user_backlight_override(xf86OutputPtr output)
+{
+	struct sna_output *sna_output = output->driver_private;
+	struct sna *sna = to_sna(output->scrn);
+	char *str;
+	int max;
+
+	str = xf86GetOptValString(sna->Options, OPTION_BACKLIGHT);
+	if (str == NULL)
+		return NULL;
+
+	sna_output->backlight_iface = str;
+	max = sna_output_backlight_get_max(output);
+	sna_output->backlight_iface = NULL;
+	if (max <= 0) {
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "unrecognised backlight control interface '%s'\n",
+			   str);
+		return NULL;
+	}
+
+	return str;
+}
+
 static void
 sna_output_backlight_init(xf86OutputPtr output)
 {
@@ -307,14 +332,17 @@ sna_output_backlight_init(xf86OutputPtr output)
 		"acpi_video0",
 		"intel_backlight",
 	};
+	MessageType from = X_PROBED;
 	struct sna_output *sna_output = output->driver_private;
 	char *best_iface;
 	int best_type;
 	DIR *dir;
 	struct dirent *de;
 
-	best_iface = NULL;
 	best_type = INT_MAX;
+	best_iface = has_user_backlight_override(output);
+	if (best_iface)
+		goto skip;
 
 	dir = opendir(BACKLIGHT_CLASS);
 	if (dir == NULL)
@@ -371,6 +399,7 @@ sna_output_backlight_init(xf86OutputPtr output)
 
 			sna_output->backlight_iface = de->d_name;
 			max = sna_output_backlight_get_max(output);
+			sna_output->backlight_iface = NULL;
 			if (max <= 0)
 				continue;
 
@@ -384,24 +413,23 @@ sna_output_backlight_init(xf86OutputPtr output)
 	}
 	closedir(dir);
 
-	sna_output->backlight_iface = NULL;
+	if (!best_iface)
+		return;
 
-	if (best_iface) {
-		const char *str;
-
-		sna_output->backlight_iface = best_iface;
-		sna_output->backlight_max = sna_output_backlight_get_max(output);
-		sna_output->backlight_active_level = sna_output_backlight_get(output);
-		switch (best_type) {
-		case FIRMWARE: str = "firmware"; break;
-		case PLATFORM: str = "platform"; break;
-		case RAW: str = "raw"; break;
-		default: str = "unknown"; break;
-		}
-		xf86DrvMsg(output->scrn->scrnIndex, X_INFO,
-			   "found backlight control interface %s (type '%s')\n",
-			   best_iface, str);
-	}
+skip:
+	sna_output->backlight_iface = best_iface;
+	sna_output->backlight_max = sna_output_backlight_get_max(output);
+	sna_output->backlight_active_level = sna_output_backlight_get(output);
+	switch (best_type) {
+	case INT_MAX: best_iface = "user"; from = X_CONFIG; break;
+	case FIRMWARE: best_iface = "firmware"; break;
+	case PLATFORM: best_iface = "platform"; break;
+	case RAW: best_iface = "raw"; break;
+	default: best_iface = "unknown"; break;
+	}
+	xf86DrvMsg(output->scrn->scrnIndex, from,
+		   "found backlight control interface %s (type '%s')\n",
+		   sna_output->backlight_iface, best_iface);
 }
 
 
commit d4f7c58186849374cd929e20fa49ea2e93939a69
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 17:01:55 2012 +0100

    sna/gen5+: Use the common methods for choosing the render targets
    
    This should afford us much more flexibility in where we render.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index b24d742..9a94421 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1966,44 +1966,53 @@ gen5_render_composite_done(struct sna *sna,
 }
 
 static bool
-gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
+gen5_composite_set_target(struct sna *sna,
+			  struct sna_composite_op *op,
+			  PicturePtr dst,
+			  int x, int y, int w, int h)
 {
-	struct sna_pixmap *priv;
-
-	DBG(("%s: dst=%p\n", __FUNCTION__, dst));
-
-	assert(gen5_check_dst_format(dst->format));
+	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
-	priv = sna_pixmap(op->dst.pixmap);
-
-	op->dst.width  = op->dst.pixmap->drawable.width;
-	op->dst.height = op->dst.pixmap->drawable.height;
 	op->dst.format = dst->format;
+	op->dst.width = op->dst.pixmap->drawable.width;
+	op->dst.height = op->dst.pixmap->drawable.height;
 
-	DBG(("%s: pixmap=%p, format=%08x\n", __FUNCTION__,
-	     op->dst.pixmap, (unsigned int)op->dst.format));
-
-	op->dst.bo = NULL;
-	if (priv && priv->gpu_bo == NULL) {
-		op->dst.bo = priv->cpu_bo;
-		op->damage = &priv->cpu_damage;
-	}
-	if (op->dst.bo == NULL) {
-		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-		if (priv == NULL)
-			return false;
-
-		op->dst.bo = priv->gpu_bo;
-		op->damage = &priv->gpu_damage;
+	if (w && h) {
+		box.x1 = x;
+		box.y1 = y;
+		box.x2 = x + w;
+		box.y2 = y + h;
+	} else {
+		box.x1 = dst->pDrawable->x;
+		box.y1 = dst->pDrawable->y;
+		box.x2 = box.x1 + dst->pDrawable->width;
+		box.y2 = box.y1 + dst->pDrawable->height;
 	}
-	if (sna_damage_is_all(op->damage, op->dst.width, op->dst.height))
-		op->damage = NULL;
 
-	DBG(("%s: bo=%p, damage=%p\n", __FUNCTION__, op->dst.bo, op->damage));
+	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
+					  &box, &op->damage);
+	if (op->dst.bo == NULL)
+		return false;
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
+
+	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
+	     __FUNCTION__,
+	     op->dst.pixmap, (int)op->dst.format,
+	     op->dst.width, op->dst.height,
+	     op->dst.bo->pitch,
+	     op->dst.x, op->dst.y,
+	     op->damage ? *op->damage : (void *)-1));
+
+	assert(op->dst.bo->proxy == NULL);
+
+	if (too_large(op->dst.width, op->dst.height) &&
+	    !sna_render_composite_redirect(sna, op, x, y, w, h))
+		return false;
+
 	return true;
 }
 
@@ -2299,18 +2308,12 @@ gen5_render_composite(struct sna *sna,
 					    width, height,
 					    tmp);
 
-	if (!gen5_composite_set_target(dst, tmp)) {
+	if (!gen5_composite_set_target(sna, tmp, dst,
+				       dst_x, dst_y, width, height)) {
 		DBG(("%s: failed to set composite target\n", __FUNCTION__));
 		return false;
 	}
 
-	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->dst.width, tmp->dst.height) &&
-	    !sna_render_composite_redirect(sna, tmp,
-					   dst_x, dst_y, width, height))
-		return false;
-
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen5_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
@@ -2676,15 +2679,9 @@ gen5_render_composite_spans(struct sna *sna,
 	}
 
 	tmp->base.op = op;
-	if (!gen5_composite_set_target(dst, &tmp->base))
+	if (!gen5_composite_set_target(sna, &tmp->base, dst,
+				       dst_x, dst_y, width, height))
 		return false;
-	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
-		if (!sna_render_composite_redirect(sna, &tmp->base,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
 
 	switch (gen5_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c364f72..8eadda7 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2315,46 +2315,51 @@ static void gen6_render_composite_done(struct sna *sna,
 static bool
 gen6_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
-			  PicturePtr dst)
+			  PicturePtr dst,
+			  int x, int y, int w, int h)
 {
-	struct sna_pixmap *priv;
+	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
-	op->dst.width  = op->dst.pixmap->drawable.width;
-	op->dst.height = op->dst.pixmap->drawable.height;
 	op->dst.format = dst->format;
+	op->dst.width = op->dst.pixmap->drawable.width;
+	op->dst.height = op->dst.pixmap->drawable.height;
 
-	op->dst.bo = NULL;
-	priv = sna_pixmap(op->dst.pixmap);
-	if (priv && priv->gpu_bo == NULL &&
-	    I915_TILING_NONE == kgem_choose_tiling(&sna->kgem,
-						   I915_TILING_X,
-						   op->dst.width,
-						   op->dst.height,
-						   op->dst.pixmap->drawable.bitsPerPixel)) {
-		op->dst.bo = priv->cpu_bo;
-		op->damage = &priv->cpu_damage;
+	if (w && h) {
+		box.x1 = x;
+		box.y1 = y;
+		box.x2 = x + w;
+		box.y2 = y + h;
+	} else {
+		box.x1 = dst->pDrawable->x;
+		box.y1 = dst->pDrawable->y;
+		box.x2 = box.x1 + dst->pDrawable->width;
+		box.y2 = box.y1 + dst->pDrawable->height;
 	}
-	if (op->dst.bo == NULL) {
-		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-		if (priv == NULL)
-			return false;
 
-		op->dst.bo = priv->gpu_bo;
-		op->damage = &priv->gpu_damage;
-	}
-	if (sna_damage_is_all(op->damage, op->dst.width, op->dst.height))
-		op->damage = NULL;
+	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
+					  &box, &op->damage);
+	if (op->dst.bo == NULL)
+		return false;
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
 
-	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d)\n",
+	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
 	     __FUNCTION__,
 	     op->dst.pixmap, (int)op->dst.format,
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
-	     op->dst.x, op->dst.y));
+	     op->dst.x, op->dst.y,
+	     op->damage ? *op->damage : (void *)-1));
+
+	assert(op->dst.bo->proxy == NULL);
+
+	if (too_large(op->dst.width, op->dst.height) &&
+	    !sna_render_composite_redirect(sna, op, x, y, w, h))
+		return false;
+
 	return true;
 }
 
@@ -2682,17 +2687,10 @@ gen6_render_composite(struct sna *sna,
 	if (op == PictOpClear)
 		op = PictOpSrc;
 	tmp->op = op;
-	if (!gen6_composite_set_target(sna, tmp, dst))
+	if (!gen6_composite_set_target(sna, tmp, dst,
+				       dst_x, dst_y, width, height))
 		return false;
 
-	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->dst.width, tmp->dst.height)) {
-		if (!sna_render_composite_redirect(sna, tmp,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
-
 	switch (gen6_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
@@ -3126,15 +3124,9 @@ gen6_render_composite_spans(struct sna *sna,
 	}
 
 	tmp->base.op = op;
-	if (!gen6_composite_set_target(sna, &tmp->base, dst))
+	if (!gen6_composite_set_target(sna, &tmp->base, dst,
+				       dst_x, dst_y, width, height))
 		return false;
-	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
-		if (!sna_render_composite_redirect(sna, &tmp->base,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
 
 	switch (gen6_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index b8897d3..ea05a41 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2412,46 +2412,53 @@ static void gen7_render_composite_done(struct sna *sna,
 }
 
 static bool
-gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PicturePtr dst)
+gen7_composite_set_target(struct sna *sna,
+			  struct sna_composite_op *op,
+			  PicturePtr dst,
+			  int x, int y, int w, int h)
 {
-	struct sna_pixmap *priv;
+	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
-	op->dst.width  = op->dst.pixmap->drawable.width;
-	op->dst.height = op->dst.pixmap->drawable.height;
 	op->dst.format = dst->format;
+	op->dst.width = op->dst.pixmap->drawable.width;
+	op->dst.height = op->dst.pixmap->drawable.height;
 
-	op->dst.bo = NULL;
-	priv = sna_pixmap(op->dst.pixmap);
-	if (priv && priv->gpu_bo == NULL &&
-	    I915_TILING_NONE == kgem_choose_tiling(&sna->kgem,
-						   I915_TILING_X,
-						   op->dst.width,
-						   op->dst.height,
-						   op->dst.pixmap->drawable.bitsPerPixel)) {
-		op->dst.bo = priv->cpu_bo;
-		op->damage = &priv->cpu_damage;
+	if (w && h) {
+		box.x1 = x;
+		box.y1 = y;
+		box.x2 = x + w;
+		box.y2 = y + h;
+	} else {
+		box.x1 = dst->pDrawable->x;
+		box.y1 = dst->pDrawable->y;
+		box.x2 = box.x1 + dst->pDrawable->width;
+		box.y2 = box.y1 + dst->pDrawable->height;
 	}
-	if (op->dst.bo == NULL) {
-		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-		if (priv == NULL)
-			return false;
 
-		op->dst.bo = priv->gpu_bo;
-		op->damage = &priv->gpu_damage;
-	}
-	if (sna_damage_is_all(op->damage, op->dst.width, op->dst.height))
-		op->damage = NULL;
+	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
+					  &box, &op->damage);
+	if (op->dst.bo == NULL)
+		return false;
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
 
-	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d)\n",
+	DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
 	     __FUNCTION__,
 	     op->dst.pixmap, (int)op->dst.format,
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
-	     op->dst.x, op->dst.y));
+	     op->dst.x, op->dst.y,
+	     op->damage ? *op->damage : (void *)-1));
+
+	assert(op->dst.bo->proxy == NULL);
+
+	if (too_large(op->dst.width, op->dst.height) &&
+	    !sna_render_composite_redirect(sna, op, x, y, w, h))
+		return false;
+
 	return true;
 }
 
@@ -2785,17 +2792,10 @@ gen7_render_composite(struct sna *sna,
 	if (op == PictOpClear)
 		op = PictOpSrc;
 	tmp->op = op;
-	if (!gen7_composite_set_target(sna, tmp, dst))
+	if (!gen7_composite_set_target(sna, tmp, dst,
+				       dst_x, dst_y, width, height))
 		return false;
 
-	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->dst.width, tmp->dst.height)) {
-		if (!sna_render_composite_redirect(sna, tmp,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
-
 	switch (gen7_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
@@ -3217,15 +3217,9 @@ gen7_render_composite_spans(struct sna *sna,
 	}
 
 	tmp->base.op = op;
-	if (!gen7_composite_set_target(sna, &tmp->base, dst))
+	if (!gen7_composite_set_target(sna, &tmp->base, dst,
+				       dst_x, dst_y, width, height))
 		return false;
-	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
-
-	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
-		if (!sna_render_composite_redirect(sna, &tmp->base,
-						   dst_x, dst_y, width, height))
-			return false;
-	}
 
 	switch (gen7_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
commit 3c88b5f693c29b990d69f96508b121ce97a7209e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 25 11:49:30 2012 +0100

    sna: Use a fast span emitter for mono trapezoids without damage or clipping
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index dd438de..482abd3 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1788,6 +1788,8 @@ struct mono {
 	struct sna_composite_op op;
 	pixman_region16_t clip;
 
+	fastcall void (*span)(struct mono *, int, int, BoxPtr);
+
 	struct mono_polygon polygon;
 };
 
@@ -2040,16 +2042,9 @@ mono_merge_edges(struct mono *c, struct mono_edge *edges)
 	c->head.next = mono_merge_unsorted_edges(c->head.next, edges);
 }
 
-inline static void
+fastcall static void
 mono_span(struct mono *c, int x1, int x2, BoxPtr box)
 {
-	if (x1 < c->clip.extents.x1)
-		x1 = c->clip.extents.x1;
-	if (x2 > c->clip.extents.x2)
-		x2 = c->clip.extents.x2;
-	if (x2 <= x1)
-		return;
-
 	__DBG(("%s [%d, %d]\n", __FUNCTION__, x1, x2));
 
 	box->x1 = x1;
@@ -2073,6 +2068,17 @@ mono_span(struct mono *c, int x1, int x2, BoxPtr box)
 	}
 }
 
+fastcall static void
+mono_span__fast(struct mono *c, int x1, int x2, BoxPtr box)
+{
+	__DBG(("%s [%d, %d]\n", __FUNCTION__, x1, x2));
+
+	box->x1 = x1;
+	box->x2 = x2;
+
+	c->op.box(c->sna, &c->op, box);
+}
+
 inline static void
 mono_row(struct mono *c, int16_t y, int16_t h)
 {
@@ -2124,7 +2130,12 @@ mono_row(struct mono *c, int16_t y, int16_t h)
 		if (winding == 0) {
 			assert(I(next->x.quo) >= xend);
 			if (I(next->x.quo) > xend + 1) {
-				mono_span(c, xstart, xend, &box);
+				if (xstart < c->clip.extents.x1)
+					xstart = c->clip.extents.x1;
+				if (xend > c->clip.extents.x2)
+					xend = c->clip.extents.x2;
+				if (xend > xstart)
+					c->span(c, xstart, xend, &box);
 				xstart = INT16_MIN;
 			}
 		} else if (xstart == INT16_MIN)
@@ -2185,6 +2196,11 @@ mono_render(struct mono *mono)
 	struct mono_polygon *polygon = &mono->polygon;
 	int i, j, h = mono->clip.extents.y2 - mono->clip.extents.y1;
 
+	if (mono->clip.data == NULL && mono->op.damage == NULL)
+		mono->span = mono_span__fast;
+	else
+		mono->span = mono_span;
+
 	for (i = 0; i < h; i = j) {
 		j = i + 1;
 
commit 8e10a5b348a37feadcf935ec7694e46cc0802bdf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 14:53:12 2012 +0100

    sna/gen6+: Do not call sna_blt_composite() after prepping the composite op
    
    As sna_blt_composite() will overwrite parts of the composite op as it
    checks whether or not it can execute that operation, it will lead to a
    crash as the normal render path finds the op corrupt. (The BLT
    conversion functions cater for the cases where we may wish to switch
    pipelines after choosing src/dst bo.)
    
    Reported-by: rei4dan at gmail.com
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 390da52..c364f72 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2685,15 +2685,6 @@ gen6_render_composite(struct sna *sna,
 	if (!gen6_composite_set_target(sna, tmp, dst))
 		return false;
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height,
-			      tmp, false))
-		return true;
-
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index f14d777..b8897d3 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2788,16 +2788,6 @@ gen7_render_composite(struct sna *sna,
 	if (!gen7_composite_set_target(sna, tmp, dst))
 		return false;
 
-	if (mask == NULL &&
-	    sna->kgem.mode == KGEM_BLT &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height,
-			      tmp, false))
-		return true;
-
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
commit cbbe7727e766a5ee8767673feb6c8cdec38a7051
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 14:43:47 2012 +0100

    sna/gen6+: Simplify prefer_blt_bo
    
    As we already check the tiling state, so all we need to then check is
    that the pitch is within the BLT constraint.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 9c99b2a..390da52 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2387,7 +2387,7 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 
 static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
 {
-	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
+	return untiled_tlb_miss(bo) && bo->pitch < MAXSHORT;
 }
 
 static bool
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index b1f17d7..f14d777 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2476,7 +2476,7 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 
 static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
 {
-	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
+	return untiled_tlb_miss(bo) && bo->pitch < MAXSHORT;
 }
 
 inline static bool prefer_blt_ring(struct sna *sna)
commit 0c15824a8143a288716d2eacf03252cc54eb9466
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 14:29:41 2012 +0100

    sna: Add some DBG to kgem_is_idle()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 825caa7..568d120 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1763,9 +1763,13 @@ bool __kgem_is_idle(struct kgem *kgem)
 	assert(!list_is_empty(&kgem->requests));
 
 	rq = list_last_entry(&kgem->requests, struct kgem_request, list);
-	if (kgem_busy(kgem, rq->bo->handle))
+	if (kgem_busy(kgem, rq->bo->handle)) {
+		DBG(("%s: last requests handle=%d still busy\n",
+		     __FUNCTION__, rq->bo->handle));
 		return false;
+	}
 
+	DBG(("%s: gpu idle\n", __FUNCTION__));
 	kgem_retire__requests(kgem);
 	assert(list_is_empty(&kgem->requests));
 	return true;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index d085a2f..d8018b8 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -261,8 +261,10 @@ bool kgem_retire(struct kgem *kgem);
 bool __kgem_is_idle(struct kgem *kgem);
 static inline bool kgem_is_idle(struct kgem *kgem)
 {
-	if (list_is_empty(&kgem->requests))
+	if (list_is_empty(&kgem->requests)) {
+		DBG(("%s: no outstanding requests\n", __FUNCTION__));
 		return true;
+	}
 
 	return __kgem_is_idle(kgem);
 }
commit d432983421286d343f7c487c12c7244b711f5a66
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 13:43:35 2012 +0100

    sna: Add some DBG to BLT composite substitute to show if redirection is used
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 656e979..c286918 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1915,7 +1915,9 @@ sna_blt_composite__convert(struct sna *sna,
 	return false;
 #endif
 
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s src=%d, dst=%d (redirect? %d)\n", __FUNCTION__,
+	     tmp->src.bo->handle, tmp->dst.bo->handle,
+	     tmp->redirect.real_bo ? tmp->redirect.real_bo->handle : 0));
 
 	if (!kgem_bo_can_blt(&sna->kgem, tmp->dst.bo) ||
 	    !kgem_bo_can_blt(&sna->kgem, tmp->src.bo)) {
commit 5a5212117e7a73ce3fffb87c60a505a849e38c36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 13:38:23 2012 +0100

    sna: Cleanup composite redirection after substituting the BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index d2f6fe7..401d84a 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1782,8 +1782,10 @@ gen2_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 		break;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ab94bdb..b13b9bf 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2880,8 +2880,10 @@ gen3_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 
 		gen3_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index e732810..7668caa 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2315,8 +2315,10 @@ gen4_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 
 		gen4_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 0a7bc51..b24d742 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2330,8 +2330,10 @@ gen5_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 
 		gen5_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 56e48ed..9c99b2a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2721,8 +2721,10 @@ gen6_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 
 		gen6_composite_channel_convert(&tmp->src);
 		break;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 41921d4..b1f17d7 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2825,8 +2825,10 @@ gen7_render_composite(struct sna *sna,
 					       src_x, src_y,
 					       width, height,
 					       dst_x, dst_y,
-					       tmp))
+					       tmp)) {
+			sna_render_composite_redirect_done(sna, tmp);
 			return true;
+		}
 
 		gen7_composite_channel_convert(&tmp->src);
 		break;
commit 335821d588460c253b2ba2c8616a7c46e5ad0150
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 13:29:04 2012 +0100

    sna/gen4+: Check for allocation failure for the clear solid bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 632793f..e732810 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2306,7 +2306,8 @@ gen4_render_composite(struct sna *sna,
 		DBG(("%s: failed to prepare source\n", __FUNCTION__));
 		goto cleanup_dst;
 	case 0:
-		gen4_composite_solid_init(sna, &tmp->src, 0);
+		if (!gen4_composite_solid_init(sna, &tmp->src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		if (mask == NULL &&
@@ -2361,7 +2362,8 @@ gen4_render_composite(struct sna *sna,
 				DBG(("%s: failed to prepare mask\n", __FUNCTION__));
 				goto cleanup_src;
 			case 0:
-				gen4_composite_solid_init(sna, &tmp->mask, 0);
+				if (!gen4_composite_solid_init(sna, &tmp->mask, 0))
+					goto cleanup_src;
 				/* fall through to fixup */
 			case 1:
 				gen4_composite_channel_convert(&tmp->mask);
@@ -2660,7 +2662,8 @@ gen4_render_composite_spans(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen4_composite_solid_init(sna, &tmp->base.src, 0);
+		if (!gen4_composite_solid_init(sna, &tmp->base.src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		gen4_composite_channel_convert(&tmp->base.src);
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index aaf7e49..0a7bc51 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2321,7 +2321,8 @@ gen5_render_composite(struct sna *sna,
 		DBG(("%s: failed to prepare source picture\n", __FUNCTION__));
 		goto cleanup_dst;
 	case 0:
-		gen5_composite_solid_init(sna, &tmp->src, 0);
+		if (!gen5_composite_solid_init(sna, &tmp->src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		if (mask == NULL &&
@@ -2375,7 +2376,8 @@ gen5_render_composite(struct sna *sna,
 				DBG(("%s: failed to prepare mask picture\n", __FUNCTION__));
 				goto cleanup_src;
 			case 0:
-				gen5_composite_solid_init(sna, &tmp->mask, 0);
+				if (!gen5_composite_solid_init(sna, &tmp->mask, 0))
+					goto cleanup_src;
 				/* fall through to fixup */
 			case 1:
 				gen5_composite_channel_convert(&tmp->mask);
@@ -2690,7 +2692,8 @@ gen5_render_composite_spans(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen5_composite_solid_init(sna, &tmp->base.src, 0);
+		if (!gen5_composite_solid_init(sna, &tmp->base.src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		gen5_composite_channel_convert(&tmp->base.src);
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index bfbcfd8..56e48ed 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2710,7 +2710,8 @@ gen6_render_composite(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen6_composite_solid_init(sna, &tmp->src, 0);
+		if (!gen6_composite_solid_init(sna, &tmp->src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		/* Did we just switch rings to prepare the source? */
@@ -2765,7 +2766,8 @@ gen6_render_composite(struct sna *sna,
 			case -1:
 				goto cleanup_src;
 			case 0:
-				gen6_composite_solid_init(sna, &tmp->mask, 0);
+				if (!gen6_composite_solid_init(sna, &tmp->mask, 0))
+					goto cleanup_src;
 				/* fall through to fixup */
 			case 1:
 				gen6_composite_channel_convert(&tmp->mask);
@@ -3149,7 +3151,8 @@ gen6_render_composite_spans(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen6_composite_solid_init(sna, &tmp->base.src, 0);
+		if (!gen6_composite_solid_init(sna, &tmp->base.src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		gen6_composite_channel_convert(&tmp->base.src);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 08ba6a0..41921d4 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2814,7 +2814,8 @@ gen7_render_composite(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen7_composite_solid_init(sna, &tmp->src, 0);
+		if (!gen7_composite_solid_init(sna, &tmp->src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		/* Did we just switch rings to prepare the source? */
@@ -2869,7 +2870,8 @@ gen7_render_composite(struct sna *sna,
 			case -1:
 				goto cleanup_src;
 			case 0:
-				gen7_composite_solid_init(sna, &tmp->mask, 0);
+				if (!gen7_composite_solid_init(sna, &tmp->mask, 0))
+					goto cleanup_src;
 				/* fall through to fixup */
 			case 1:
 				gen7_composite_channel_convert(&tmp->mask);
@@ -3241,7 +3243,8 @@ gen7_render_composite_spans(struct sna *sna,
 	case -1:
 		goto cleanup_dst;
 	case 0:
-		gen7_composite_solid_init(sna, &tmp->base.src, 0);
+		if (!gen7_composite_solid_init(sna, &tmp->base.src, 0))
+			goto cleanup_dst;
 		/* fall through to fixup */
 	case 1:
 		gen7_composite_channel_convert(&tmp->base.src);
commit b5c77a6aaf520e331e82409b2592911cb1bb3100
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 26 12:48:18 2012 +0100

    2.20.5 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 1ad769e..54f2caa 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,18 @@
+Release 2.20.5 (2012-08-26)
+===========================
+Another silly bug found, another small bugfix release. The goal was for
+the driver to bind to all Intel devices supported by the kernel.
+Unfortunately we were too successful and started claiming Pouslbo,
+Medfield and Cedarview devices which are still encumbered by propietary
+IP and not supported by this driver.
+
+Bugs fixed since 2.20.4:
+
+ * Only bind to Intel devices using the i915 kernel module
+
+ * Regression in the bitmap-to-region code, e.g. icewm window buttons
+   https://bugs.freedesktop.org/show_bug.cgi?id=53699
+
 Release 2.20.4 (2012-08-18)
 ===========================
 Continuing the small bugfix releases, the only real feature is initial
diff --git a/configure.ac b/configure.ac
index a9c6336..114e721 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.4],
+        [2.20.5],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 454cc8453af1852758c3396dbe303c13c5c1be27
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 24 08:48:12 2012 +0100

    sna: Submit the partial batch before throttling
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9bc8c48..fd9728c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13691,8 +13691,10 @@ static void sna_accel_throttle(struct sna *sna)
 {
 	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)TIME));
 
-	if (sna->kgem.need_throttle)
+	if (sna->kgem.need_throttle) {
+		kgem_submit(&sna->kgem);
 		kgem_throttle(&sna->kgem);
+	}
 
 	if (!sna->kgem.need_retire)
 		sna_accel_disarm_timer(sna, THROTTLE_TIMER);
commit 0e1e83ed4952f620e9422e58f955a5aea406e300
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 24 00:59:31 2012 +0100

    sna: Allow the batch to be flushed if the GPU is idle upon a context switch
    
    Submit early, submit often in order to keep the GPU busy. As always we
    trade off CPU overhead versus concurrency.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 9e51cb7..d2f6fe7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -3124,8 +3124,16 @@ gen2_render_context_switch(struct kgem *kgem,
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
 
+	if (!kgem->mode)
+		return;
+
 	/* Reload BLT registers following a lost context */
 	sna->blt_state.fill_bo = 0;
+
+	if (kgem_is_idle(kgem)) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+		_kgem_submit(kgem);
+	}
 }
 
 bool gen2_render_init(struct sna *sna)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 2894c58..aaf7e49 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3509,6 +3509,9 @@ static void
 gen5_render_context_switch(struct kgem *kgem,
 			   int new_mode)
 {
+	if (!kgem->mode)
+		return;
+
 	/* Ironlake has a limitation that a 3D or Media command can't
 	 * be the first command after a BLT, unless it's
 	 * non-pipelined.
@@ -3522,6 +3525,11 @@ gen5_render_context_switch(struct kgem *kgem,
 		     __FUNCTION__));
 		sna->render_state.gen5.drawrect_limit = -1;
 	}
+
+	if (kgem_is_idle(kgem)) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+		_kgem_submit(kgem);
+	}
 }
 
 static void
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 84e7902..bfbcfd8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2366,9 +2366,18 @@ static bool prefer_blt_ring(struct sna *sna)
 	return sna->kgem.ring != KGEM_RENDER;
 }
 
-static bool can_switch_rings(struct sna *sna)
+static bool can_switch_to_blt(struct sna *sna)
 {
-	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
+	if (sna->kgem.ring == KGEM_BLT)
+		return true;
+
+	if (NO_RING_SWITCH)
+		return false;
+
+	if (!sna->kgem.has_semaphores)
+		return false;
+
+	return sna->kgem.mode == KGEM_NONE || kgem_is_idle(&sna->kgem);
 }
 
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
@@ -2397,7 +2406,7 @@ try_blt(struct sna *sna,
 		return true;
 	}
 
-	if (can_switch_rings(sna) && sna_picture_is_solid(src, NULL))
+	if (can_switch_to_blt(sna) && sna_picture_is_solid(src, NULL))
 		return true;
 
 	return false;
@@ -3328,7 +3337,7 @@ fallback_blt:
 		if (too_large(extents.x2-extents.x1, extents.y2-extents.y1))
 			goto fallback_blt;
 
-		if ((flags & COPY_LAST || can_switch_rings(sna)) &&
+		if ((flags & COPY_LAST || can_switch_to_blt(sna)) &&
 		    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 		    sna_blt_copy_boxes(sna, alu,
 				       src_bo, src_dx, src_dy,
@@ -3646,7 +3655,7 @@ static inline bool prefer_blt_fill(struct sna *sna,
 	if (PREFER_RENDER)
 		return PREFER_RENDER < 0;
 
-	return (can_switch_rings(sna) ||
+	return (can_switch_to_blt(sna) ||
 		prefer_blt_ring(sna) ||
 		untiled_tlb_miss(bo));
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 99296fb..08ba6a0 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2455,9 +2455,18 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	return true;
 }
 
-inline static bool can_switch_rings(struct sna *sna)
+inline static bool can_switch_to_blt(struct sna *sna)
 {
-	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
+	if (sna->kgem.ring == KGEM_BLT)
+		return true;
+
+	if (NO_RING_SWITCH)
+		return false;
+
+	if (!sna->kgem.has_semaphores)
+		return false;
+
+	return sna->kgem.mode == KGEM_NONE || kgem_is_idle(&sna->kgem);
 }
 
 static inline bool untiled_tlb_miss(struct kgem_bo *bo)
@@ -2472,7 +2481,7 @@ static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
 
 inline static bool prefer_blt_ring(struct sna *sna)
 {
-	return sna->kgem.ring != KGEM_RENDER || can_switch_rings(sna);
+	return sna->kgem.ring != KGEM_RENDER || can_switch_to_blt(sna);
 }
 
 static bool
@@ -2491,7 +2500,7 @@ try_blt(struct sna *sna,
 		return true;
 	}
 
-	if (can_switch_rings(sna)) {
+	if (can_switch_to_blt(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return true;
 
@@ -3416,7 +3425,7 @@ fallback_blt:
 		if (too_large(extents.x2-extents.x1, extents.y2-extents.y1))
 			goto fallback_blt;
 
-		if ((flags & COPY_LAST || can_switch_rings(sna)) &&
+		if ((flags & COPY_LAST || can_switch_to_blt(sna)) &&
 		    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 		    sna_blt_copy_boxes(sna, alu,
 				       src_bo, src_dx, src_dy,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1cf7957..825caa7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1158,8 +1158,6 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
 {
-	kgem->context_switch(kgem, KGEM_NONE);
-
 	kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END;
 	if (kgem->nbatch & 1)
 		kgem->batch[kgem->nbatch++] = MI_NOOP;
@@ -1758,6 +1756,21 @@ bool kgem_retire(struct kgem *kgem)
 	return retired;
 }
 
+bool __kgem_is_idle(struct kgem *kgem)
+{
+	struct kgem_request *rq;
+
+	assert(!list_is_empty(&kgem->requests));
+
+	rq = list_last_entry(&kgem->requests, struct kgem_request, list);
+	if (kgem_busy(kgem, rq->bo->handle))
+		return false;
+
+	kgem_retire__requests(kgem);
+	assert(list_is_empty(&kgem->requests));
+	return true;
+}
+
 static void kgem_commit(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 583bafc..d085a2f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -258,15 +258,13 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
 
 void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
+bool __kgem_is_idle(struct kgem *kgem);
 static inline bool kgem_is_idle(struct kgem *kgem)
 {
 	if (list_is_empty(&kgem->requests))
 		return true;
 
-	if (!kgem_retire(kgem))
-		return false;
-
-	return list_is_empty(&kgem->requests);
+	return __kgem_is_idle(kgem);
 }
 struct kgem_bo *kgem_get_last_request(struct kgem *kgem);
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 8373890..0d4d706 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -246,6 +246,14 @@ static void
 no_render_context_switch(struct kgem *kgem,
 			 int new_mode)
 {
+	if (!kgem->mode)
+		return;
+
+	if (kgem_is_idle(kgem)) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+		_kgem_submit(kgem);
+	}
+
 	(void)kgem;
 	(void)new_mode;
 }
commit 5059db0697c5516f1538f7062937664baf7b1c2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 24 00:21:07 2012 +0100

    sna: Correct a pair of DBG messages
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 257dbc8..84e7902 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1235,7 +1235,7 @@ gen6_bind_bo(struct sna *sna,
 	if (offset) {
 		DBG(("[%x]  bo(handle=%d), format=%d, reuse %s binding\n",
 		     offset, bo->handle, format,
-		     domains & 0xffff ? "render" : "sampler"));
+		     is_dst ? "render" : "sampler"));
 		if (is_dst)
 			kgem_bo_mark_dirty(bo);
 		return offset * sizeof(uint32_t);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dfb1491..9bc8c48 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13427,7 +13427,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	 * and doesn't appear to mitigate the performance loss.
 	 */
 	DBG(("%s: flush?=%d, dirty?=%d\n", __FUNCTION__,
-	     sna->kgem.flush, list_is_empty(&sna->flush_pixmap)));
+	     sna->kgem.flush, !list_is_empty(&sna->flush_pixmaps)));
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	while (!list_is_empty(&sna->flush_pixmaps)) {
commit c5b46e411a2c738c5ae55bffb9b3d460249f5c24
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 23 17:36:10 2012 +0100

    sna: Tidy up users of __kgem_bo_is_busy()
    
    A lot of callsites had not been converted to the common function.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2a976cf..dfb1491 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1231,11 +1231,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			assert(!priv->shm);
 			DBG(("%s: write inplace\n", __FUNCTION__));
 			if (priv->gpu_bo) {
-				if (kgem_bo_is_busy(priv->gpu_bo) &&
-				    priv->gpu_bo->exec == NULL)
-					kgem_retire(&sna->kgem);
-
-				if (kgem_bo_is_busy(priv->gpu_bo)) {
+				if (__kgem_bo_is_busy(&sna->kgem,
+						      priv->gpu_bo)) {
 					if (priv->pinned)
 						goto skip_inplace_map;
 
@@ -1277,21 +1274,17 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 skip_inplace_map:
 		sna_damage_destroy(&priv->gpu_damage);
-		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush &&
+		    __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
+			DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
 			assert(!priv->shm);
-			if (priv->cpu_bo->exec == NULL)
-				kgem_retire(&sna->kgem);
+			assert(priv->gpu_bo == NULL || priv->gpu_damage == NULL);
 
-			if (kgem_bo_is_busy(priv->cpu_bo)) {
-				DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
-				assert(priv->gpu_bo == NULL || priv->gpu_damage == NULL);
-
-				sna_damage_destroy(&priv->cpu_damage);
-				priv->undamaged = false;
+			sna_damage_destroy(&priv->cpu_damage);
+			priv->undamaged = false;
 
-				sna_pixmap_free_gpu(sna, priv);
-				sna_pixmap_free_cpu(sna, priv);
-			}
+			sna_pixmap_free_gpu(sna, priv);
+			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
 
@@ -1495,11 +1488,6 @@ pixmap_contains_damage(PixmapPtr pixmap, struct sna_damage *damage)
 }
 #endif
 
-static bool sync_will_stall(struct kgem_bo *bo)
-{
-	return kgem_bo_is_busy(bo);
-}
-
 static inline bool region_inplace(struct sna *sna,
 				  PixmapPtr pixmap,
 				  RegionPtr region,
@@ -1629,11 +1617,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		if (priv->stride && priv->gpu_bo &&
 		    region_inplace(sna, pixmap, region, priv, true)) {
 			assert(priv->gpu_bo->proxy == NULL);
-			if (sync_will_stall(priv->gpu_bo) &&
-			    priv->gpu_bo->exec == NULL)
-				kgem_retire(&sna->kgem);
-
-			if (!kgem_bo_is_busy(priv->gpu_bo)) {
+			if (!__kgem_bo_is_busy(&sna->kgem, priv->gpu_bo)) {
 				pixmap->devPrivate.ptr =
 					kgem_bo_map(&sna->kgem, priv->gpu_bo);
 				if (pixmap->devPrivate.ptr == NULL) {
@@ -1666,9 +1650,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		}
 
 		if (priv->cpu_bo && !priv->cpu_bo->flush) {
-			if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
-				kgem_retire(&sna->kgem);
-			if (sync_will_stall(priv->cpu_bo)) {
+			if (__kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
 				sna_damage_subtract(&priv->cpu_damage, region);
 				if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE)) {
 					if (dx | dy)
@@ -3325,9 +3307,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		 * However, we can queue some writes to the GPU bo to avoid
 		 * the wait. Or we can try to replace the CPU bo.
 		 */
-		if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
-			kgem_retire(&sna->kgem);
-		if (sync_will_stall(priv->cpu_bo)) {
+		if (__kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
 			if (priv->cpu_bo->flush) {
 				if (sna_put_image_upload_blt(drawable, gc, region,
 							     x, y, w, h, bits, stride)) {
commit 82e91327d57e03d2117638165f298a50b946fcaa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 23 15:59:00 2012 +0100

    sna: Use a temporary userptr mapping for a large upload into a busy target
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0499a1d..2a976cf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4333,6 +4333,41 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			return;
 		}
 
+		if (src_priv == NULL &&
+		    sna->kgem.has_userptr &&
+		    __kgem_bo_is_busy(&sna->kgem, bo) &&
+		    box_inplace(src_pixmap, &region->extents)) {
+			struct kgem_bo *src_bo;
+			bool ok = false;
+
+			DBG(("%s: upload through a temporary map\n",
+			     __FUNCTION__));
+
+			src_bo = kgem_create_map(&sna->kgem,
+						 src_pixmap->devPrivate.ptr,
+						 src_pixmap->devKind * src_pixmap->drawable.height,
+						 true);
+			if (src_bo) {
+				src_bo->flush = true;
+				src_bo->pitch = src_pixmap->devKind;
+				src_bo->reusable = false;
+
+				ok = sna->render.copy_boxes(sna, alu,
+							    src_pixmap, src_bo, src_dx, src_dy,
+							    dst_pixmap, bo, 0, 0,
+							    box, n, COPY_LAST);
+
+				kgem_bo_sync__cpu(&sna->kgem, src_bo);
+				kgem_bo_destroy(&sna->kgem, src_bo);
+			}
+
+			if (ok) {
+				if (damage)
+					sna_damage_add(damage, region);
+				return;
+			}
+		}
+
 		if (alu != GXcopy) {
 			PixmapPtr tmp;
 			int i;
commit b286ffa6beccb8fe341c464a4fb9f2af98541263
Author: Eric S. Raymond <esr at thyrsus.com>
Date:   Thu Aug 23 12:42:18 2012 -0400

    Fix seriously malformed list syntax on intel(4).
    
    Signed-off-by: Eric S. Raymond <esr at thyrsus.com>

diff --git a/man/intel.man b/man/intel.man
index d231fd0..5498f87 100644
--- a/man/intel.man
+++ b/man/intel.man
@@ -257,80 +257,73 @@ VGA output port (typically exposed via an HD15 connector).
 .SS "LVDS"
 Low Voltage Differential Signalling output (typically a laptop LCD panel).  Available properties:
 
-.PP
-.B BACKLIGHT
-- current backlight level (adjustable)
-.TP 2
-By adjusting the BACKLIGHT property, the brightness on the LVDS output can be adjusted.  In some cases, this property may be unavailable (for example if your platform uses an external microcontroller to control the backlight).
-
-.PP
-.B scaling mode
-- control LCD panel scaling mode
-.TP 2
-When the currently selected display mode differs from the native panel resolution, various scaling options are available. These include
+.TP
+\fBBACKLIGHT\fB - current backlight level (adjustable)
+By adjusting the BACKLIGHT property, the brightness on the LVDS output
+can be adjusted.  In some cases, this property may be unavailable (for
+example if your platform uses an external microcontroller to control
+the backlight).
+.TP
+\fBscaling mode\fP - control LCD panel scaling mode
+When the currently selected display mode differs from the native panel
+resolution, various scaling options are available. These include
 .RS
-.PP
+.TP
 .B Center
-.TP 4
-Simply center the image on-screen without scaling. This is the only scaling mode that guarantees a one-to-one correspondence between native and displayed pixels, but some portions of the panel may be unused (so-called "letterboxing").
-.PP
+Simply center the image on-screen without scaling. This is the only
+scaling mode that guarantees a one-to-one correspondence between
+native and displayed pixels, but some portions of the panel may be
+unused (so-called "letterboxing").
+.TP
 .B Full aspect
-.TP 4
-Scale the image as much as possible while preserving aspect ratio. Pixels may not be displayed one-to-one (there may be some blurriness). Some portions of the panel may be unused if the aspect ratio of the selected mode does not match that of the panel.
-.PP
+Scale the image as much as possible while preserving aspect
+ratio. Pixels may not be displayed one-to-one (there may be some
+blurriness). Some portions of the panel may be unused if the aspect
+ratio of the selected mode does not match that of the panel.
+.TP
 .B Full
-.TP 4
-Scale the image to the panel size without regard to aspect ratio. This is the only mode which guarantees that every pixel of the panel will be used. But the displayed image may be distorted by stretching either horizontally or vertically, and pixels may not be displayed one-to-one (there may be some blurriness).
+Scale the image to the panel size without regard to aspect ratio. This
+is the only mode which guarantees that every pixel of the panel will
+be used. But the displayed image may be distorted by stretching either
+horizontally or vertically, and pixels may not be displayed one-to-one
+(there may be some blurriness).
 .RE
 
 The precise names of these options may differ depending on the kernel
-video driver, (but the functionality should be similar). See the output of
+video driver, (but the functionality should be similar). See the
+output of
 .B xrandr \-\-prop
 for a list of currently available scaling modes.
 .SS "TV"
 Integrated TV output.  Available properties include:
-
-.PP
-.B BOTTOM, RIGHT, TOP, LEFT
-- margins
-.TP 2
-Adjusting these properties allows you to control the placement of your TV output buffer on the screen. The options with the same name can also be set in xorg.conf with integer value.
-
-.PP
-.B BRIGHTNESS
-- TV brightness, range 0-255
-.TP 2
+.TP
+\fBBOTTOM, RIGHT, TOP, LEFT\fP - margins
+Adjusting these properties allows you to control the placement of your
+TV output buffer on the screen. The options with the same name can
+also be set in xorg.conf with integer value.
+.TP
+\fBBRIGHTNESS\fP - TV brightness, range 0-255
 Adjust TV brightness, default value is 128.
-
-.PP
-.B CONTRAST
-- TV contrast, range 0-255
-.TP 2
+.TP
+\fBCONTRAST\fP - TV contrast, range 0-255
 Adjust TV contrast, default value is 1.0 in chipset specific format.
-
-.PP
-.B SATURATION
-- TV saturation, range 0-255
-.TP 2
+.TP
+\fBSATURATION\fP - TV saturation, range 0-255
 Adjust TV saturation, default value is 1.0 in chipset specific format.
-
-.PP
-.B HUE
-- TV hue, range 0-255
-.TP 2
+.TP
+\fBHUE\fP - TV hue, range 0-255
 Adjust TV hue, default value is 0.
-
-.PP
-.B TV_FORMAT
-- output standard
-.TP 2
-This property allows you to control the output standard used on your TV output port.  You can select between NTSC-M, NTSC-443, NTSC-J, PAL-M, PAL-N, and PAL.
-
-.PP
-.B TV_Connector
-- connector type
-.TP 2
-This config option should be added to xorg.conf TV monitor's section, it allows you to force the TV output connector type, which bypass load detect and TV will always be taken as connected. You can select between S-Video, Composite and Component.
+.TP
+\fBTV_FORMAT\fP - output standard
+This property allows you to control the output standard used on your
+TV output port.  You can select between NTSC-M, NTSC-443, NTSC-J,
+PAL-M, PAL-N, and PAL.
+.TP
+\fBTV_Connector\fP - connector type
+This config option should be added to xorg.conf TV monitor's section,
+it allows you to force the TV output connector type, which bypass load
+detect and TV will always be taken as connected. You can select
+between S-Video, Composite and Component.
 
 .SS "TMDS-1"
 First DVI SDVO output
@@ -340,15 +333,15 @@ Second DVI SDVO output
 
 .SS "TMDS-1", "TMDS-2", "HDMI-1", "HDMI-2"
 DVI/HDMI outputs. Avaliable common properties include:
-.PP
-.B BROADCAST_RGB
-- method used to set RGB color range(full range 0-255, not full range 16-235)
-.TP 2
-Adjusting this propertie allows you to set RGB color range on each channel in order to match HDTV requirment(default 0 for full range). Setting 1 means RGB color range is 16-235, 0 means RGB color range is 0-255 on each channel.
+.TP
+\fBBROADCAST_RGB\fP - method used to set RGB color range
+Adjusting this property allows you to set RGB color range on each
+channel in order to match HDTV requirment(default 0 for full
+range). Setting 1 means RGB color range is 16-235, 0 means RGB color
+range is 0-255 on each channel.  (Full range is 0-255, not 16-235)
 
 .PP
 SDVO and DVO TV outputs are not supported by the driver at this time.
-
 .PP
 See __xconfigfile__(__filemansuffix__) for information on associating Monitor
 sections with these outputs for configuration.  Associating Monitor sections
commit fc6b7f564df88ca773ae245b1b4e278b47dffd59
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 23 15:13:14 2012 +0100

    sna: Flush the batch if it contains any DRI pixmaps
    
    This fixes a regression from
    
    commit 02963f489b177d0085006753e91e240545933387
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Sun Aug 19 15:45:35 2012 +0100
    
        sna: Only submit the batch if flushing a DRI client bo
    
    which made the presumption that we called sna_add_flush_pixmap() for
    every DRI pixmap that we used. However, that is only called for the
    dirty pixmaps, any native exported pixmap only marks the batch as
    requiring a flush. So in those cases we always need to submit the batch
    if it contains an exported DRI pixmap.
    
    Reported-by: chr.ohm at gmx.net
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=53967
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 077f3e1..1cf7957 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2057,6 +2057,7 @@ void kgem_reset(struct kgem *kgem)
 	kgem->nbatch = 0;
 	kgem->surface = kgem->batch_size;
 	kgem->mode = KGEM_NONE;
+	kgem->flush = 0;
 
 	kgem->next_request = __kgem_request_alloc();
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 72d2d02..0499a1d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1031,8 +1031,6 @@ void sna_add_flush_pixmap(struct sna *sna,
 		DBG(("%s: flush bo idle, flushing\n", __FUNCTION__));
 		kgem_submit(&sna->kgem);
 	}
-
-	sna->kgem.flush = true;
 }
 
 static void __sna_free_pixmap(struct sna *sna,
@@ -13413,9 +13411,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	 * by checking for outgoing damage events or sync replies. Tricky,
 	 * and doesn't appear to mitigate the performance loss.
 	 */
-	DBG(("%s: flush?=%d\n", __FUNCTION__, sna->kgem.flush));
-	if (!sna->kgem.flush)
-		return;
+	DBG(("%s: flush?=%d, dirty?=%d\n", __FUNCTION__,
+	     sna->kgem.flush, list_is_empty(&sna->flush_pixmap)));
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	while (!list_is_empty(&sna->flush_pixmaps)) {
@@ -13435,14 +13432,14 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		} else {
 			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber));
-			if (sna_pixmap_move_to_gpu(priv->pixmap,
-						     MOVE_READ | __MOVE_FORCE))
-				kgem_bo_submit(&sna->kgem, priv->gpu_bo);
+			ret = sna_pixmap_move_to_gpu(priv->pixmap,
+						     MOVE_READ | __MOVE_FORCE);
 		}
 		(void)ret;
 	}
 
-	sna->kgem.flush = false;
+	if (sna->kgem.flush)
+		kgem_submit(&sna->kgem);
 }
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
commit fd38f45ec5421802b426867a050c978a2feec937
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 23 12:11:48 2012 +0100

    sna: Mark the CPU damage as needing flushing for DRI buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9bab91c..72d2d02 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1022,6 +1022,7 @@ void sna_add_flush_pixmap(struct sna *sna,
 {
 	DBG(("%s: marking pixmap=%ld for flushing\n",
 	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
+	assert(bo);
 	list_move(&priv->list, &sna->flush_pixmaps);
 
 	if (bo->exec == NULL && sna->kgem.need_retire)
@@ -2631,6 +2632,10 @@ use_cpu_bo:
 		if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 			return NULL;
 	}
+	if (priv->flush) {
+		assert(!priv->shm);
+		sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
+	}
 
 	DBG(("%s: using CPU bo with damage? %d\n",
 	     __FUNCTION__, *damage != NULL));
commit ac6cb667546a82b865c959a1be32f52b2da7bf7d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 12:00:49 2012 +0100

    sna: Flush before adding any SHM pixmap into the batch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 157830b..d655da0 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -458,7 +458,9 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 	return sna_pixmap_move_to_gpu(get_drawable_pixmap(drawable), flags) != NULL;
 }
 
-void sna_add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv);
+void sna_add_flush_pixmap(struct sna *sna,
+			  struct sna_pixmap *priv,
+			  struct kgem_bo *bo);
 
 struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bba24e5..9bab91c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1016,16 +1016,18 @@ fallback:
 	return create_pixmap(sna, screen, width, height, depth, usage);
 }
 
-void sna_add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
+void sna_add_flush_pixmap(struct sna *sna,
+			  struct sna_pixmap *priv,
+			  struct kgem_bo *bo)
 {
 	DBG(("%s: marking pixmap=%ld for flushing\n",
 	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
 	list_move(&priv->list, &sna->flush_pixmaps);
 
-	if (sna->kgem.need_retire)
+	if (bo->exec == NULL && sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
-	if (!sna->kgem.need_retire || !sna->kgem.flush) {
-		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+	if (bo->exec == NULL || !sna->kgem.need_retire) {
+		DBG(("%s: flush bo idle, flushing\n", __FUNCTION__));
 		kgem_submit(&sna->kgem);
 	}
 
@@ -1078,8 +1080,8 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 
 	if (priv->shm && kgem_bo_is_busy(priv->cpu_bo)) {
+		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 		kgem_bo_submit(&sna->kgem, priv->cpu_bo); /* XXX ShmDetach */
-		sna_add_flush_pixmap(sna, priv);
 	} else
 		__sna_free_pixmap(sna, pixmap, priv);
 	return TRUE;
@@ -1419,7 +1421,7 @@ skip_inplace_map:
 
 		if (priv->flush) {
 			assert(!priv->shm);
-			sna_add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
 		}
 	}
 
@@ -2030,7 +2032,7 @@ done:
 		}
 		if (priv->flush) {
 			assert(!priv->shm);
-			sna_add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
 		}
 	}
 
@@ -2244,7 +2246,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 							    box, n, 0);
 				if (ok && priv->shm) {
 					assert(!priv->flush);
-					sna_add_flush_pixmap(sna, priv);
+					sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 				}
 			}
 			if (!ok) {
@@ -2288,7 +2290,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    box, 1, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				sna_add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 			}
 		}
 		if (!ok) {
@@ -2323,7 +2325,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    box, n, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				sna_add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 			}
 		}
 		if (!ok) {
@@ -2349,7 +2351,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		sna_add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 	}
 
 done:
@@ -2623,7 +2625,7 @@ use_cpu_bo:
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		sna_add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 
 		/* As we may have flushed and retired,, recheck for busy bo */
 		if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
@@ -2851,7 +2853,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    box, n, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				sna_add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 			}
 		}
 		if (!ok) {
@@ -2886,7 +2888,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		sna_add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 	}
 
 	/* For large bo, try to keep only a single copy around */
@@ -3441,7 +3443,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		}
 		if (priv->flush) {
 			assert(!priv->shm);
-			sna_add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
 		}
 	}
 	priv->cpu = true;
@@ -4320,7 +4322,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (src_priv->shm) {
 				assert(!src_priv->flush);
-				sna_add_flush_pixmap(sna, src_priv);
+				sna_add_flush_pixmap(sna, src_priv, src_priv->cpu_bo);
 			}
 
 			if (damage)
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 13822b8..8373890 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -378,7 +378,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		sna_add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv, priv->cpu_bo);
 	}
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
commit 785c1046b94fd9ca6f22b2a6d73639408a411cea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 10:56:12 2012 +0100

    sna: Only use the GPU for an active CPU bo unless forced
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 110d9c5..bba24e5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1339,7 +1339,8 @@ skip_inplace_map:
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush &&
+		    __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
 			assert(!priv->shm);
 			sna_pixmap_free_cpu(sna, priv);
 		}
@@ -1439,8 +1440,11 @@ done:
 			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		}
-		if (flags & MOVE_WRITE)
+		if (flags & MOVE_WRITE) {
+			DBG(("%s: discarding GPU bo in favour of CPU bo\n", __FUNCTION__));
 			sna_pixmap_free_gpu(sna, priv);
+			priv->undamaged = false;
+		}
 	}
 	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
@@ -2371,6 +2375,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	struct sna *sna;
 	RegionRec region;
 	int16_t dx, dy;
 	int ret;
@@ -2565,8 +2570,13 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
-	if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
+	sna = to_sna_from_pixmap(pixmap);
+	if ((flags & FORCE_GPU) == 0 &&
+	    !__kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
+		DBG(("%s: has CPU bo, but is idle and acceleration not forced\n",
+		     __FUNCTION__));
 		return NULL;
+	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -2590,11 +2600,11 @@ use_cpu_bo:
 		if (priv->cpu_bo->pitch >= 4096)
 			goto move_to_gpu;
 
-		if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
+		if (!sna->kgem.can_blt_cpu)
 			goto move_to_gpu;
 	}
 
-	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
+	if (!sna->kgem.can_blt_cpu)
 		return NULL;
 
 	if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, &region,
@@ -2613,7 +2623,11 @@ use_cpu_bo:
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		sna_add_flush_pixmap(to_sna_from_pixmap(pixmap), priv);
+		sna_add_flush_pixmap(sna, priv);
+
+		/* As we may have flushed and retired,, recheck for busy bo */
+		if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
+			return NULL;
 	}
 
 	DBG(("%s: using CPU bo with damage? %d\n",
commit 8032f51859ce1928922edf6892f493a84d9c39f0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 10:40:53 2012 +0100

    sna: If we cannot use the CPU bo along a render pathway, promote to GPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5521c83..110d9c5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2589,6 +2589,9 @@ use_cpu_bo:
 
 		if (priv->cpu_bo->pitch >= 4096)
 			goto move_to_gpu;
+
+		if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
+			goto move_to_gpu;
 	}
 
 	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
commit cc967507bbf357e1d5ec3cd0f3c0c5ecfa8b867a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 10:36:04 2012 +0100

    sna: Convert to using IGNORE_CPU flag rather than complicating the CPU damage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9731a0a..5521c83 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4113,6 +4113,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	struct sna *sna = to_sna_from_pixmap(src_pixmap);
 	struct sna_damage **damage;
 	struct kgem_bo *bo;
+	unsigned hint;
 	int16_t src_dx, src_dy;
 	int16_t dst_dx, dst_dy;
 	BoxPtr box = RegionRects(region);
@@ -4171,20 +4172,20 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst_priv == NULL)
 		goto fallback;
 
+	hint = source_prefer_gpu(src_priv) ?:
+		region_inplace(sna, dst_pixmap, region,
+			       dst_priv, alu_overwrites(alu));
 	if (dst_priv->cpu_damage && alu_overwrites(alu)) {
 		DBG(("%s: overwritting CPU damage\n", __FUNCTION__));
-		sna_damage_subtract(&dst_priv->cpu_damage, region);
-		if (dst_priv->cpu_damage == NULL) {
+		if (region_subsumes_damage(region, dst_priv->cpu_damage)) {
+			DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
+			sna_damage_destroy(&dst_priv->cpu_damage);
 			list_del(&dst_priv->list);
-			dst_priv->undamaged = false;
-			dst_priv->cpu = false;
 		}
+		hint |= IGNORE_CPU;
 	}
 
-	bo = sna_drawable_use_bo(&dst_pixmap->drawable,
-				 source_prefer_gpu(src_priv) ?:
-				 region_inplace(sna, dst_pixmap, region,
-						dst_priv, alu_overwrites(alu)),
+	bo = sna_drawable_use_bo(&dst_pixmap->drawable, hint,
 				 &region->extents, &damage);
 	if (bo) {
 		if (src_priv && src_priv->clear) {
commit 5aa59ce7c012eb309c4f5a362ccf531c065bd7ff
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 10:10:33 2012 +0100

    sna: Assert that the CPU bo is not used if the GPU is clear
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ba8c93d..9731a0a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2561,6 +2561,7 @@ use_gpu_bo:
 	return priv->gpu_bo;
 
 use_cpu_bo:
+	assert(!priv->clear);
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
commit 273765033223024ff6a662195e0e4b96f8960463
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 00:48:08 2012 +0100

    sna: Make sure the opposite damage is destroyed after reducing to all
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ee23b43..ba8c93d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2536,9 +2536,10 @@ done:
 	assert(priv->gpu_bo != NULL);
 	if (sna_damage_is_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
-			      pixmap->drawable.height))
+			      pixmap->drawable.height)) {
+		sna_damage_destroy(&priv->cpu_damage);
 		*damage = NULL;
-	else
+	} else
 		*damage = &priv->gpu_damage;
 
 	DBG(("%s: using GPU bo with damage? %d\n",
@@ -2579,6 +2580,8 @@ use_cpu_bo:
 	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
 		goto move_to_gpu;
 
+	assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+
 	if (flags & RENDER_GPU) {
 		if (priv->gpu_bo && priv->gpu_bo->tiling)
 			goto move_to_gpu;
@@ -2598,9 +2601,10 @@ use_cpu_bo:
 
 	if (sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
-			      pixmap->drawable.height))
+			      pixmap->drawable.height)) {
+		sna_damage_destroy(&priv->gpu_damage);
 		*damage = NULL;
-	else
+	} else
 		*damage = &priv->cpu_damage;
 
 	if (priv->shm) {
@@ -2726,6 +2730,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
 		DBG(("%s: already all-damaged\n", __FUNCTION__));
+		sna_damage_destroy(&priv->cpu_damage);
+		priv->undamaged = true;
 		goto active;
 	}
 
commit 31e398c9dbd8539e6fd2cc7398e97ee1df2f7f23
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 00:38:17 2012 +0100

    sna: Discard a no-longer-used GPU bo after moving to the CPU domain
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 59ed0d7..ee23b43 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1426,6 +1426,12 @@ done:
 	if (flags & MOVE_WRITE) {
 		priv->source_count = SOURCE_BIAS;
 		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+		if (priv->gpu_bo && priv->gpu_bo->domain != DOMAIN_GPU) {
+			DBG(("%s: discarding inactive GPU bo\n", __FUNCTION__));
+			assert(DAMAGE_IS_ALL(priv->cpu_damage));
+			sna_pixmap_free_gpu(sna, priv);
+			priv->undamaged = false;
+		}
 	}
 
 	if (priv->cpu_bo) {
commit 8be15c37df9d9b34e0f21700673212bd19c772b2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 22 00:27:00 2012 +0100

    sna: Balance CPU bo accounting for SHM pixmaps
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1c77104..59ed0d7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -473,8 +473,8 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
 #endif
 		if (priv->cpu_bo->flush) {
+			assert(priv->cpu_bo->reusable == false);
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
-			priv->cpu_bo->reusable = false;
 			sna_accel_watch_flush(sna, -1);
 		}
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
@@ -819,6 +819,10 @@ fallback:
 	priv->cpu_bo->pitch = pitch;
 	priv->cpu_bo->reusable = false;
 	sna_accel_watch_flush(sna, 1);
+#ifdef DEBUG_MEMORY
+	sna->debug_memory.cpu_bo_allocs++;
+	sna->debug_memory.cpu_bo_bytes += kgem_bo_size(priv->cpu_bo);
+#endif
 
 	priv->cpu = true;
 	priv->shm = true;
commit 262d57a5aaac46508d8e29860c7a567bcd5bc4d8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 21 23:36:12 2012 +0100

    sna: Display still resident memory in inactive/snoop caches under DEBUG_MEMORY
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0942917..077f3e1 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2343,6 +2343,16 @@ bool kgem_expire_cache(struct kgem *kgem)
 			kgem_bo_free(kgem, bo);
 		}
 	}
+#ifdef DEBUG_MEMORY
+	{
+		long size = 0;
+		int count = 0;
+		list_for_each_entry(bo, &kgem->snoop, list)
+			count++, size += bytes(bo);
+		ErrorF("%s: still allocated %d bo, %ld bytes, in snoop cache\n",
+		       __FUNCTION__, count, size);
+	}
+#endif
 
 	kgem_retire(kgem);
 	if (kgem->wedged)
@@ -2408,6 +2418,18 @@ bool kgem_expire_cache(struct kgem *kgem)
 		}
 	}
 
+#ifdef DEBUG_MEMORY
+	{
+		long size = 0;
+		int count = 0;
+		for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
+			list_for_each_entry(bo, &kgem->inactive[i], list)
+				count++, size += bytes(bo);
+		ErrorF("%s: still allocated %d bo, %ld bytes, in inactive cache\n",
+		       __FUNCTION__, count, size);
+	}
+#endif
+
 	DBG(("%s: expired %d objects, %d bytes, idle? %d\n",
 	     __FUNCTION__, count, size, idle));
 
commit 304581cc02adeb865a24edde934b5b9ceba68d96
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 21 23:29:12 2012 +0100

    sna: Add a DBG to log pixmap destruction
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9fda8ca..1c77104 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1059,6 +1059,8 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 		return TRUE;
 
 	priv = sna_pixmap(pixmap);
+	DBG(("%s: pixmap=%ld, attached?=%d\n",
+	     __FUNCTION__, pixmap->drawable.serialNumber, priv != NULL));
 	if (priv == NULL) {
 		FreePixmap(pixmap);
 		return TRUE;
commit 946d54238d90f5e31772f0df336ac9a5e7f2d62b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 21 21:02:00 2012 +0100

    sna: Fix the assertion for tracking proxies in the batch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 7498e73..0942917 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1773,6 +1773,7 @@ static void kgem_commit(struct kgem *kgem)
 
 		assert(!bo->purged);
 		assert(bo->exec);
+		assert(bo->proxy == NULL || bo->exec == &_kgem_dummy_exec);
 		assert(bo->rq == rq || (bo->proxy->rq == rq));
 
 		bo->presumed_offset = bo->exec->offset;
@@ -1789,7 +1790,6 @@ static void kgem_commit(struct kgem *kgem)
 		bo->dirty = false;
 
 		if (bo->proxy) {
-			assert(bo->exec == &_kgem_dummy_exec);
 			/* proxies are not used for domain tracking */
 			list_del(&bo->request);
 			bo->rq = NULL;
commit 4ee2e227ddf61c87f08f55d4922d2562b563ca87
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 21 00:28:40 2012 +0100

    sna: Mark all levels of a proxy as dirty
    
    So that if we write to a surface through one view, we make sure that the
    sample cache is invalidated for all future views.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 49e27d0..7498e73 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1789,10 +1789,11 @@ static void kgem_commit(struct kgem *kgem)
 		bo->dirty = false;
 
 		if (bo->proxy) {
+			assert(bo->exec == &_kgem_dummy_exec);
 			/* proxies are not used for domain tracking */
 			list_del(&bo->request);
 			bo->rq = NULL;
-			bo->exec = &_kgem_dummy_exec;
+			bo->exec = NULL;
 		}
 	}
 
@@ -3546,14 +3547,18 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			delta += bo->delta;
 			assert(bo->handle == bo->proxy->handle);
 			/* need to release the cache upon batch submit */
-			list_move_tail(&bo->request,
-				       &kgem->next_request->buffers);
-			bo->exec = &_kgem_dummy_exec;
+			if (bo->exec == NULL) {
+				list_move_tail(&bo->request,
+					       &kgem->next_request->buffers);
+				bo->rq = kgem->next_request;
+				bo->exec = &_kgem_dummy_exec;
+			}
+
 			bo = bo->proxy;
+			assert(bo->refcnt);
+			assert(!bo->purged);
 		}
 
-		assert(!bo->purged);
-
 		if (bo->exec == NULL)
 			_kgem_add_bo(kgem, bo);
 		assert(bo->rq == kgem->next_request);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index cf7cf70..583bafc 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -536,15 +536,17 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 
 static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
 {
-	if (bo->dirty)
-		return;
+	do {
+		if (bo->dirty)
+			return;
 
-	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
+		DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
+		assert(bo->exec);
+		assert(bo->rq);
 
-	assert(bo->exec);
-	assert(bo->rq);
-	bo->needs_flush = bo->dirty = true;
-	list_move(&bo->request, &bo->rq->buffers);
+		bo->needs_flush = bo->dirty = true;
+		list_move(&bo->request, &bo->rq->buffers);
+	} while ((bo = bo->proxy));
 }
 
 #define KGEM_BUFFER_WRITE	0x1
commit eee181e9d71273d94fe735805ed84e8f7b7b2180
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 21 00:19:10 2012 +0100

    sna/gen6+: Only mark the dst as dirty again if it already is in the batch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index af8899e..257dbc8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -903,7 +903,8 @@ gen6_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen6_emit_flush(sna);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		if (op->dst.bo->exec)
+			kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall) {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index d34cdfa..99296fb 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1047,7 +1047,8 @@ gen7_emit_state(struct sna *sna,
 			need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
 		gen7_emit_pipe_invalidate(sna, need_stall);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		if (op->dst.bo->exec)
+			kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall)
commit c86df17c1455a53cb52f33a25c8c362e5331621e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 22:54:06 2012 +0100

    sna/gen3: Fix assertion to check the freshly allocated vertex bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 7fb5a08..ab94bdb 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1713,7 +1713,7 @@ static void gen3_vertex_close(struct sna *sna)
 			bo = kgem_create_linear(&sna->kgem,
 						4*sna->render.vertex_used, 0);
 			if (bo) {
-				assert(sna->render.vbo->snoop == false);
+				assert(bo->snoop == false);
 				kgem_bo_write(&sna->kgem, bo,
 					      sna->render.vertex_data,
 					      4*sna->render.vertex_used);
commit 6aabe90587f4916a01a1cd2bbc577a1e7fa20eca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 22:09:54 2012 +0100

    sna: Allow target bo promotion to GPU even on old architectures
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f530603..9fda8ca 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2551,9 +2551,6 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
-	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
-		return NULL;
-
 	if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
@@ -2578,6 +2575,9 @@ use_cpu_bo:
 			goto move_to_gpu;
 	}
 
+	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
+		return NULL;
+
 	if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, &region,
 					     MOVE_READ | MOVE_ASYNC_HINT)) {
 		DBG(("%s: failed to move-to-cpu, fallback\n", __FUNCTION__));
commit 1a4b6fea7b1516de35e6800efa5b85f8401a5b2a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 22:08:23 2012 +0100

    sna: Assign a unique id to snoopable CPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c5b88ff..49e27d0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3309,6 +3309,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		assert(bo->snoop);
 		bo->refcnt = 1;
 		bo->pitch = stride;
+		bo->unique_id = kgem_get_unique_id(kgem);
 		return bo;
 	}
 
@@ -3331,6 +3332,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		}
 
 		bo->pitch = stride;
+		bo->unique_id = kgem_get_unique_id(kgem);
 		return bo;
 	}
 
@@ -3350,6 +3352,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 		bo->map = MAKE_USER_MAP(ptr);
 		bo->pitch = stride;
+		bo->unique_id = kgem_get_unique_id(kgem);
 		return bo;
 	}
 
commit 892b1a1e431e8f27133825f8a27dde4955da0054
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 22:07:05 2012 +0100

    sna/gen3: Convert to sna_drawable_use_bo()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 81d2c95..7fb5a08 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2483,37 +2483,48 @@ gen3_align_vertex(struct sna *sna,
 static bool
 gen3_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
-			  PicturePtr dst)
+			  PicturePtr dst,
+			  int x, int y, int w, int h)
 {
-	struct sna_pixmap *priv;
+	BoxRec box;
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	op->dst.format = dst->format;
 	op->dst.width = op->dst.pixmap->drawable.width;
 	op->dst.height = op->dst.pixmap->drawable.height;
 
-	op->dst.bo = NULL;
-	priv = sna_pixmap(op->dst.pixmap);
-	if (priv &&
-	    priv->gpu_bo == NULL &&
-	    priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU) {
-		op->dst.bo = priv->cpu_bo;
-		op->damage = &priv->cpu_damage;
+	if (w && h) {
+		box.x1 = x;
+		box.y1 = y;
+		box.x2 = x + w;
+		box.y2 = y + h;
+	} else {
+		box.x1 = dst->pDrawable->x;
+		box.y1 = dst->pDrawable->y;
+		box.x2 = box.x1 + dst->pDrawable->width;
+		box.y2 = box.y1 + dst->pDrawable->height;
 	}
-	if (op->dst.bo == NULL) {
-		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-		if (priv == NULL)
+
+	op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+					  PREFER_GPU | FORCE_GPU | RENDER_GPU,
+					  &box, &op->damage);
+	if (op->dst.bo == NULL)
+		return false;
+
+	/* For single-stream mode there should be no minimum alignment
+	 * required, except that the width must be at least 2 elements.
+	 */
+	if (op->dst.bo->pitch < 2*op->dst.pixmap->drawable.bitsPerPixel) {
+		struct sna_pixmap *priv;
+
+		priv = sna_pixmap_move_to_gpu (op->dst.pixmap,
+					       MOVE_READ | MOVE_WRITE);
+		if (priv == NULL || priv->pinned)
 			return false;
 
-		/* For single-stream mode there should be no minimum alignment
-		 * required, except that the width must be at least 2 elements.
-		 */
 		if (priv->gpu_bo->pitch < 2*op->dst.pixmap->drawable.bitsPerPixel) {
 			struct kgem_bo *bo;
 
-			if (priv->pinned)
-				return false;
-
 			bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 					     op->dst.width, op->dst.height,
 					     2*op->dst.pixmap->drawable.bitsPerPixel,
@@ -2524,13 +2535,13 @@ gen3_composite_set_target(struct sna *sna,
 			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 			priv->gpu_bo = bo;
 		}
-		assert(priv->gpu_bo->proxy == NULL);
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
+		if (sna_damage_is_all(op->damage,
+				      op->dst.width, op->dst.height))
+			op->damage = NULL;
 	}
-	if (sna_damage_is_all(op->damage, op->dst.width, op->dst.height))
-		op->damage = NULL;
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
@@ -2543,6 +2554,7 @@ gen3_composite_set_target(struct sna *sna,
 	     op->dst.x, op->dst.y,
 	     op->damage ? *op->damage : (void *)-1));
 
+	assert(op->dst.bo->proxy == NULL);
 	return true;
 }
 
@@ -2832,14 +2844,13 @@ gen3_render_composite(struct sna *sna,
 					    width,  height,
 					    tmp);
 
-	if (!gen3_composite_set_target(sna, tmp, dst)) {
+	if (!gen3_composite_set_target(sna, tmp, dst,
+				       dst_x, dst_y, width, height)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
 		return false;
 	}
 
-	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
 	tmp->op = op;
 	tmp->rb_reversed = gen3_dst_rb_reversed(tmp->dst.format);
 	if (too_large(tmp->dst.width, tmp->dst.height) ||
@@ -3421,12 +3432,12 @@ gen3_render_composite_spans(struct sna *sna,
 						  width, height, flags, tmp);
 	}
 
-	if (!gen3_composite_set_target(sna, &tmp->base, dst)) {
+	if (!gen3_composite_set_target(sna, &tmp->base, dst,
+				       dst_x, dst_y, width, height)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
 		return false;
 	}
-	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	tmp->base.op = op;
 	tmp->base.rb_reversed = gen3_dst_rb_reversed(tmp->base.dst.format);
diff --git a/src/sna/sna.h b/src/sna/sna.h
index cd55afd..157830b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -464,7 +464,8 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 
 #define PREFER_GPU	0x1
 #define FORCE_GPU	0x2
-#define IGNORE_CPU	0x4
+#define RENDER_GPU	0x4
+#define IGNORE_CPU	0x8
 struct kgem_bo *
 sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 		    struct sna_damage ***damage);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e592b10..f530603 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2570,6 +2570,14 @@ use_cpu_bo:
 	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
 		goto move_to_gpu;
 
+	if (flags & RENDER_GPU) {
+		if (priv->gpu_bo && priv->gpu_bo->tiling)
+			goto move_to_gpu;
+
+		if (priv->cpu_bo->pitch >= 4096)
+			goto move_to_gpu;
+	}
+
 	if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, &region,
 					     MOVE_READ | MOVE_ASYNC_HINT)) {
 		DBG(("%s: failed to move-to-cpu, fallback\n", __FUNCTION__));
commit 3ca1bfb51ba522454433d58131e7dab7fcbe7e34
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 21:28:47 2012 +0100

    sna: Trim a parameter from kgem_bo_mark_dirty() and add some assertions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index fea1791..9e51cb7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -547,7 +547,7 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
 	assert(sna->render_state.gen2.vertex_offset == 0);
 
 	if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
-		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+		kgem_bo_mark_dirty(op->dst.bo);
 		return;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 72a2575..81d2c95 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1342,6 +1342,7 @@ static void gen3_emit_target(struct sna *sna,
 	struct gen3_render_state *state = &sna->render_state.gen3;
 
 	/* BUF_INFO is an implicit flush, so skip if the target is unchanged. */
+	assert(bo->unique_id != 0);
 	if (bo->unique_id != state->current_dst) {
 		uint32_t v;
 
@@ -1373,7 +1374,7 @@ static void gen3_emit_target(struct sna *sna,
 
 		state->current_dst = bo->unique_id;
 	}
-	kgem_bo_mark_dirty(&sna->kgem, bo);
+	kgem_bo_mark_dirty(bo);
 }
 
 static void gen3_emit_composite_state(struct sna *sna,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d72a2fd..632793f 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -654,15 +654,12 @@ gen4_bind_bo(struct sna *sna,
 	assert(!kgem_bo_is_snoop(bo));
 
 	/* After the first bind, we manage the cache domains within the batch */
-	if (is_dst) {
-		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(&sna->kgem, bo);
-	} else
-		domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
 	offset = kgem_bo_get_binding(bo, format);
-	if (offset)
+	if (offset) {
+		if (is_dst)
+			kgem_bo_mark_dirty(bo);
 		return offset * sizeof(uint32_t);
+	}
 
 	offset = sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
@@ -671,6 +668,11 @@ gen4_bind_bo(struct sna *sna,
 	ss->ss0.surface_type = GEN4_SURFACE_2D;
 	ss->ss0.surface_format = format;
 
+	if (is_dst)
+		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
+	else
+		domains = I915_GEM_DOMAIN_SAMPLER << 16;
+
 	ss->ss0.data_return_format = GEN4_SURFACERETURNFORMAT_FLOAT32;
 	ss->ss0.color_blend = 1;
 	ss->ss1.base_addr =
@@ -1385,7 +1387,7 @@ gen4_emit_state(struct sna *sna,
 		     kgem_bo_is_dirty(op->mask.bo)));
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+		kgem_bo_mark_dirty(op->dst.bo);
 	}
 }
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 9b976c8..2894c58 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -639,16 +639,13 @@ gen5_bind_bo(struct sna *sna,
 	uint32_t *ss;
 
 	/* After the first bind, we manage the cache domains within the batch */
-	if (is_dst) {
-		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(&sna->kgem, bo);
-	} else
-		domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
 	if (!DBG_NO_SURFACE_CACHE) {
 		offset = kgem_bo_get_binding(bo, format);
-		if (offset)
+		if (offset) {
+			if (is_dst)
+				kgem_bo_mark_dirty(bo);
 			return offset * sizeof(uint32_t);
+		}
 	}
 
 	offset = sna->kgem.surface -=
@@ -659,6 +656,10 @@ gen5_bind_bo(struct sna *sna,
 		 GEN5_SURFACE_BLEND_ENABLED |
 		 format << GEN5_SURFACE_FORMAT_SHIFT);
 
+	if (is_dst)
+		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
+	else
+		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
 	ss[2] = ((width - 1)  << GEN5_SURFACE_WIDTH_SHIFT |
@@ -1387,7 +1388,7 @@ gen5_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+		kgem_bo_mark_dirty(op->dst.bo);
 	}
 }
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 710a35e..af8899e 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -903,7 +903,7 @@ gen6_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen6_emit_flush(sna);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+		kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall) {
@@ -1230,17 +1230,13 @@ gen6_bind_bo(struct sna *sna,
 	uint16_t offset;
 
 	/* After the first bind, we manage the cache domains within the batch */
-	if (is_dst) {
-		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(&sna->kgem, bo);
-	} else
-		domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
 	offset = kgem_bo_get_binding(bo, format);
 	if (offset) {
 		DBG(("[%x]  bo(handle=%d), format=%d, reuse %s binding\n",
 		     offset, bo->handle, format,
 		     domains & 0xffff ? "render" : "sampler"));
+		if (is_dst)
+			kgem_bo_mark_dirty(bo);
 		return offset * sizeof(uint32_t);
 	}
 
@@ -1250,6 +1246,10 @@ gen6_bind_bo(struct sna *sna,
 	ss[0] = (GEN6_SURFACE_2D << GEN6_SURFACE_TYPE_SHIFT |
 		 GEN6_SURFACE_BLEND_ENABLED |
 		 format << GEN6_SURFACE_FORMAT_SHIFT);
+	if (is_dst)
+		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
+	else
+		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN6_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN6_SURFACE_HEIGHT_SHIFT);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 8a281e5..d34cdfa 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1047,7 +1047,7 @@ gen7_emit_state(struct sna *sna,
 			need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
 		gen7_emit_pipe_invalidate(sna, need_stall);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
+		kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall)
@@ -1348,15 +1348,12 @@ gen7_bind_bo(struct sna *sna,
 	COMPILE_TIME_ASSERT(sizeof(struct gen7_surface_state) == 32);
 
 	/* After the first bind, we manage the cache domains within the batch */
-	if (is_dst) {
-		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(&sna->kgem, bo);
-	} else
-		domains = I915_GEM_DOMAIN_SAMPLER << 16;
-
 	offset = kgem_bo_get_binding(bo, format);
-	if (offset)
+	if (offset) {
+		if (is_dst)
+			kgem_bo_mark_dirty(bo);
 		return offset * sizeof(uint32_t);
+	}
 
 	offset = sna->kgem.surface -=
 		sizeof(struct gen7_surface_state) / sizeof(uint32_t);
@@ -1364,6 +1361,10 @@ gen7_bind_bo(struct sna *sna,
 	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
 		 gen7_tiling_bits(bo->tiling) |
 		 format << GEN7_SURFACE_FORMAT_SHIFT);
+	if (is_dst)
+		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
+	else
+		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN7_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 11cd6a4..c5b88ff 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1811,6 +1811,8 @@ static void kgem_commit(struct kgem *kgem)
 		}
 
 		kgem_retire(kgem);
+		assert(list_is_empty(&rq->buffers));
+
 		gem_close(kgem->fd, rq->bo->handle);
 	} else {
 		list_add_tail(&rq->list, &kgem->requests);
@@ -3551,6 +3553,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 
 		if (bo->exec == NULL)
 			_kgem_add_bo(kgem, bo);
+		assert(bo->rq == kgem->next_request);
 
 		if (kgem->gen < 40 && read_write_domain & KGEM_RELOC_FENCED) {
 			if (bo->tiling &&
@@ -3568,7 +3571,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 		kgem->reloc[index].presumed_offset = bo->presumed_offset;
 
 		if (read_write_domain & 0x7ff)
-			kgem_bo_mark_dirty(kgem, bo);
+			kgem_bo_mark_dirty(bo);
 
 		delta += bo->presumed_offset;
 	} else {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8227538..cf7cf70 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -534,15 +534,17 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 	return bo->dirty;
 }
 
-static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
+static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
 {
 	if (bo->dirty)
 		return;
 
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 
+	assert(bo->exec);
+	assert(bo->rq);
 	bo->needs_flush = bo->dirty = true;
-	list_move(&bo->request, &kgem->next_request->buffers);
+	list_move(&bo->request, &bo->rq->buffers);
 }
 
 #define KGEM_BUFFER_WRITE	0x1
commit 16f3d3a9ae145a3af51d2c0c42c6c585d676a863
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 19:42:22 2012 +0100

    sna: Keep a stash of the most recently allocated requests
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5441eed..11cd6a4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -125,6 +125,7 @@ struct kgem_buffer {
 };
 
 static struct kgem_bo *__kgem_freed_bo;
+static struct kgem_request *__kgem_freed_request;
 static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
 
 static inline int bytes(struct kgem_bo *bo)
@@ -538,15 +539,28 @@ static struct kgem_request *__kgem_request_alloc(void)
 {
 	struct kgem_request *rq;
 
-	rq = malloc(sizeof(*rq));
-	if (rq == NULL)
-		rq = &_kgem_static_request;
+	rq = __kgem_freed_request;
+	if (rq) {
+		__kgem_freed_request = *(struct kgem_request **)rq;
+	} else {
+		rq = malloc(sizeof(*rq));
+		if (rq == NULL)
+			rq = &_kgem_static_request;
+	}
 
 	list_init(&rq->buffers);
+	rq->bo = NULL;
 
 	return rq;
 }
 
+static void __kgem_request_free(struct kgem_request *rq)
+{
+	_list_del(&rq->list);
+	*(struct kgem_request **)rq = __kgem_freed_request;
+	__kgem_freed_request = rq;
+}
+
 static struct list *inactive(struct kgem *kgem, int num_pages)
 {
 	return &kgem->inactive[cache_bucket(num_pages)];
@@ -1699,8 +1713,7 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			}
 		}
 
-		_list_del(&rq->list);
-		free(rq);
+		__kgem_request_free(rq);
 	}
 
 #if HAS_DEBUG_FULL
@@ -1963,8 +1976,7 @@ static void kgem_cleanup(struct kgem *kgem)
 				kgem_bo_free(kgem, bo);
 		}
 
-		_list_del(&rq->list);
-		free(rq);
+		__kgem_request_free(rq);
 	}
 
 	kgem_close_inactive(kgem);
@@ -2303,6 +2315,11 @@ bool kgem_expire_cache(struct kgem *kgem)
 		free(bo);
 	}
 
+	while (__kgem_freed_request) {
+		struct kgem_request *rq = __kgem_freed_request;
+		__kgem_freed_request = *(struct kgem_request **)rq;
+		free(rq);
+	}
 
 	expire = 0;
 	list_for_each_entry(bo, &kgem->snoop, list) {
commit fb349ced91e15ecaa025321bd37d1fe3cfdd2f44
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 17:35:44 2012 +0100

    sna: A few more buffer cache management assertions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3cb9eb6..5441eed 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1333,6 +1333,9 @@ static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 
 static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 {
+	assert(bo->refcnt == 0);
+	assert(bo->exec == NULL);
+
 	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
 		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
 		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
@@ -1953,7 +1956,9 @@ static void kgem_cleanup(struct kgem *kgem)
 
 			list_del(&bo->request);
 			bo->rq = NULL;
+			bo->exec = NULL;
 			bo->domain = DOMAIN_NONE;
+			bo->dirty = false;
 			if (bo->refcnt == 0)
 				kgem_bo_free(kgem, bo);
 		}
commit ae6b3c7508d570af94f172d6a91d62d2987d654b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 17:03:16 2012 +0100

    Check that the module that indeed i915 before using custom ioctls
    
    Thanks to Adam Jackson for pointing me towards drmGetVersion() and
    Julien Cristau for saying "Yuck!"
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index edea48d..e5f98d4 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -392,11 +392,19 @@ static Bool has_kernel_mode_setting(struct pci_device *dev)
 	ret = FALSE;
 	fd = drmOpen(NULL, id);
 	if (fd != -1) {
-		struct drm_i915_getparam gp;
+		drmVersionPtr version = drmGetVersion(fd);
+		if (version) {
+			ret = strcmp ("i915", version->name) == 0;
+			drmFreeVersion(version);
+		}
+		if (ret) {
+			struct drm_i915_getparam gp;
+			gp.param = I915_PARAM_HAS_GEM;
+			gp.value = &ret;
+			if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+				ret = FALSE;
+		}
 
-		gp.param = I915_PARAM_HAS_GEM;
-		gp.value = &ret;
-		(void)drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
 		close(fd);
 	}
 
commit 3c611087e8ae09fc3fe4271f16d912ac8ae89f6b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 16:09:25 2012 +0100

    Only open the matching BusID and not the first named
    
    If you pass a name to drmOpen() it will attempt to open any device
    corresponding to that name if it first fails to open the device
    corresponding to the BusID. Obviously we want the failure from opening
    the specified device in order to prevent wrongly opening the first found
    device multiple times.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 1ef06fb..c5be679 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -249,7 +249,7 @@ static Bool intel_open_drm_master(ScrnInfoPtr scrn)
 	snprintf(busid, sizeof(busid), "pci:%04x:%02x:%02x.%d",
 		 dev->domain, dev->bus, dev->dev, dev->func);
 
-	intel->drmSubFD = drmOpen("i915", busid);
+	intel->drmSubFD = drmOpen(NULL, busid);
 	if (intel->drmSubFD == -1) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "[drm] Failed to open DRM device for %s: %s\n",
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 6810c7a..a0beb4c 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -259,7 +259,7 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	snprintf(busid, sizeof(busid), "pci:%04x:%02x:%02x.%d",
 		 pci->domain, pci->bus, pci->dev, pci->func);
 
-	fd = drmOpen("i915", busid);
+	fd = drmOpen(NULL, busid);
 	if (fd == -1) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "[drm] Failed to open DRM device for %s: %s\n",
commit dd6c67b32f726b6ad7f12f3b83f6d8c868ff4dc1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 15:58:03 2012 +0100

    sna: Add a couple of buffer cache management assertions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a3df628..3cb9eb6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1280,6 +1280,7 @@ inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
 
 	list_del(&bo->list);
 	assert(bo->rq == NULL);
+	assert(bo->exec == NULL);
 	if (bo->map) {
 		assert(!list_is_empty(&bo->vma));
 		list_del(&bo->vma);
@@ -1293,6 +1294,7 @@ inline static void kgem_bo_remove_from_active(struct kgem *kgem,
 	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
 
 	list_del(&bo->list);
+	assert(bo->rq != NULL);
 	if (bo->rq == &_kgem_static_request)
 		list_del(&bo->request);
 	assert(list_is_empty(&bo->vma));
@@ -1366,6 +1368,7 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		assert(bo->proxy == NULL);
 		assert(bo->tiling == I915_TILING_NONE);
 		assert(bo->rq == NULL);
+		assert(bo->exec == NULL);
 
 		if (num_pages > num_pages(bo))
 			continue;
@@ -1753,6 +1756,7 @@ static void kgem_commit(struct kgem *kgem)
 		     (unsigned)bo->exec->offset));
 
 		assert(!bo->purged);
+		assert(bo->exec);
 		assert(bo->rq == rq || (bo->proxy->rq == rq));
 
 		bo->presumed_offset = bo->exec->offset;
@@ -2485,6 +2489,8 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			assert(IS_CPU_MAP(bo->map) == for_cpu);
 			assert(bucket(bo) == cache_bucket(num_pages));
 			assert(bo->proxy == NULL);
+			assert(bo->rq == NULL);
+			assert(bo->exec == NULL);
 
 			if (num_pages > num_pages(bo)) {
 				DBG(("inactive too small: %d < %d\n",
commit 01ebdb4d7a8bb751167153554f9122d996e4ea91
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 15:33:26 2012 +0100

    sna: Remove confusing is_cpu()
    
    The only real user now has its own heuristics, so convert the remaining
    users over to !is_gpu().
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 31859b4..fea1791 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1479,7 +1479,8 @@ try_blt(struct sna *sna,
 		     src->pDrawable->width, src->pDrawable->height));
 		return true;
 	}
-	return is_cpu(src->pDrawable);
+
+	return !is_gpu(src->pDrawable);
 }
 
 static bool
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 48f104e..72a2575 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2428,7 +2428,7 @@ source_use_blt(struct sna *sna, PicturePtr picture)
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
 		return true;
 
-	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
+	return !is_gpu(picture->pDrawable);
 }
 
 static bool
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index a56ef79..d72a2fd 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1996,7 +1996,7 @@ picture_is_cpu(PicturePtr picture)
 	if (!picture->pDrawable)
 		return false;
 
-	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
+	return !is_gpu(picture->pDrawable);
 }
 
 static inline bool prefer_blt(struct sna *sna)
@@ -2063,7 +2063,7 @@ untransformed(PicturePtr p)
 static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && untransformed(p) && is_cpu(p->pDrawable);
+	return p->pDrawable && untransformed(p) && !is_gpu(p->pDrawable);
 }
 
 static bool
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 71e77a4..9b976c8 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2012,8 +2012,7 @@ picture_is_cpu(PicturePtr picture)
 	if (!picture->pDrawable)
 		return false;
 
-
-	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
+	return !is_gpu(picture->pDrawable);
 }
 
 static bool
@@ -2073,7 +2072,7 @@ untransformed(PicturePtr p)
 static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && untransformed(p) && is_cpu(p->pDrawable);
+	return p->pDrawable && untransformed(p) && !is_gpu(p->pDrawable);
 }
 
 static bool
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 3849a88..64f8a46 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -88,32 +88,6 @@ is_gpu(DrawablePtr drawable)
 }
 
 static inline bool
-is_cpu(DrawablePtr drawable)
-{
-	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	if (priv == NULL || priv->clear)
-		return true;
-
-	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
-		return false;
-
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return true;
-
-	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo))
-		return false;
-
-	return true;
-}
-
-static inline bool
-is_dirty(DrawablePtr drawable)
-{
-	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	return priv == NULL || kgem_bo_is_dirty(priv->gpu_bo);
-}
-
-static inline bool
 too_small(struct sna_pixmap *priv)
 {
 	assert(priv);
commit 410316d20299b9ed3447d1d897f904af786ed097
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 14:56:12 2012 +0100

    sna: Refine decision making for maybe-inplace trapezoids
    
    In particular, we want to avoid preferentially taking the CPU paths
    when it may force any migration (including clear).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 6cc03f9..dd438de 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4660,6 +4660,8 @@ static bool
 trapezoid_spans_maybe_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 			      PictFormatPtr maskFormat)
 {
+	struct sna_pixmap *priv;
+
 	if (NO_SCAN_CONVERTER)
 		return false;
 
@@ -4712,7 +4714,26 @@ trapezoid_spans_maybe_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 out:
-	return is_cpu(dst->pDrawable) ? true : dst->pDrawable->width <= TOR_INPLACE_SIZE;
+	priv = sna_pixmap_from_drawable(dst->pDrawable);
+	if (priv == NULL)
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return false;
+
+	if (DAMAGE_IS_ALL(priv->cpu_damage) || priv->gpu_damage == NULL)
+		return true;
+
+	if (priv->clear)
+		return dst->pDrawable->width <= TOR_INPLACE_SIZE;
+
+	if (kgem_bo_is_busy(priv->gpu_bo))
+		return false;
+
+	if (priv->cpu_damage)
+		return true;
+
+	return dst->pDrawable->width <= TOR_INPLACE_SIZE;
 }
 
 static bool
commit 91f1bf971f9cdc6498f513a5ddec1ad7a4e24b3d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 15:27:27 2012 +0100

    sna: Correct ordering of calls to memcpy for BLT cpu composite paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index 1ad5eee..853eb20 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -153,6 +153,8 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	assert(dst);
 	assert(width && height);
 	assert(bpp >= 8);
+	assert(width*bpp <= 8*src_stride);
+	assert(width*bpp <= 8*dst_stride);
 
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
 	     __FUNCTION__, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
@@ -245,6 +247,7 @@ memmove_box(const void *src, void *dst,
 
 	width = (box->x2 - box->x1) * bpp;
 	height = (box->y2 - box->y1);
+	assert(width <= 8*stride);
 	if (width == stride) {
 		width *= height;
 		height = 1;
@@ -372,6 +375,8 @@ memcpy_xor(const void *src, void *dst, int bpp,
 
 	assert(width && height);
 	assert(bpp >= 8);
+	assert(width*bpp <= 8*src_stride);
+	assert(width*bpp <= 8*dst_stride);
 
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d, bpp=%d, and=%x, xor=%x\n",
 	     __FUNCTION__,
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 750d749..656e979 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1278,8 +1278,8 @@ blt_put_composite__cpu(struct sna *sna,
 	memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
 		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 		   r->src.x + op->u.blt.sx, r->src.y + op->u.blt.sy,
-		   r->width, r->height,
-		   r->dst.x + op->dst.x, r->dst.y + op->dst.y);
+		   r->dst.x + op->dst.x, r->dst.y + op->dst.y,
+		   r->width, r->height);
 }
 
 fastcall static void
@@ -1292,8 +1292,8 @@ blt_put_composite_box__cpu(struct sna *sna,
 	memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
 		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 		   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
-		   box->x2-box->x1, box->y2-box->y1,
-		   box->x1 + op->dst.x, box->y1 + op->dst.y);
+		   box->x1 + op->dst.x, box->y1 + op->dst.y,
+		   box->x2-box->x1, box->y2-box->y1);
 }
 
 static void
@@ -1307,8 +1307,8 @@ blt_put_composite_boxes__cpu(struct sna *sna,
 		memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
 			   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 			   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
-			   box->x2-box->x1, box->y2-box->y1,
-			   box->x1 + op->dst.x, box->y1 + op->dst.y);
+			   box->x1 + op->dst.x, box->y1 + op->dst.y,
+			   box->x2-box->x1, box->y2-box->y1);
 		box++;
 	} while (--n);
 }
@@ -1323,8 +1323,8 @@ blt_put_composite_with_alpha__cpu(struct sna *sna,
 	memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
 		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 		   r->src.x + op->u.blt.sx, r->src.y + op->u.blt.sy,
-		   r->width, r->height,
 		   r->dst.x + op->dst.x, r->dst.y + op->dst.y,
+		   r->width, r->height,
 		   0xffffffff, op->u.blt.pixel);
 
 }
@@ -1339,8 +1339,8 @@ blt_put_composite_box_with_alpha__cpu(struct sna *sna,
 	memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
 		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 		   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
-		   box->x2-box->x1, box->y2-box->y1,
 		   box->x1 + op->dst.x, box->y1 + op->dst.y,
+		   box->x2-box->x1, box->y2-box->y1,
 		   0xffffffff, op->u.blt.pixel);
 }
 
@@ -1355,8 +1355,8 @@ blt_put_composite_boxes_with_alpha__cpu(struct sna *sna,
 		memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
 			   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
 			   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
-			   box->x2-box->x1, box->y2-box->y1,
 			   box->x1 + op->dst.x, box->y1 + op->dst.y,
+			   box->x2-box->x1, box->y2-box->y1,
 			   0xffffffff, op->u.blt.pixel);
 		box++;
 	} while (--n);
commit 027569bf83fc6d67dca7cfd65fdfa37ef6b47204
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 12:55:20 2012 +0100

    Missing includes for b5b76ad849b
    
    The warnings of implicit function declarations were lost amongst the
    noise.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index 0a70b24..edea48d 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -28,8 +28,10 @@
 #include "config.h"
 #endif
 
+#include <unistd.h>
 #include <xf86_OSproc.h>
 #include <xf86Parser.h>
+#include <xf86drm.h>
 #include <xf86drmMode.h>
 #include <i915_drm.h>
 
@@ -394,7 +396,7 @@ static Bool has_kernel_mode_setting(struct pci_device *dev)
 
 		gp.param = I915_PARAM_HAS_GEM;
 		gp.value = &ret;
-		(void)drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
+		(void)drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
 		close(fd);
 	}
 
commit f1b259f627814c765ea93d8c839ee7533249974f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 12:51:51 2012 +0100

    sna: Add a modicum of DBG for PolyFillRect
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8941be9..e592b10 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11512,11 +11512,10 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	     region.extents.x2, region.extents.y2,
 	     flags));
 
-	if (FORCE_FALLBACK)
-		goto fallback;
-
-	if (!ACCEL_POLY_FILL_RECT)
+	if (FORCE_FALLBACK || !ACCEL_POLY_FILL_RECT) {
+		DBG(("%s: fallback forced\n", __FUNCTION__));
 		goto fallback;
+	}
 
 	if (priv == NULL) {
 		DBG(("%s: fallback -- unattached\n", __FUNCTION__));
@@ -11568,13 +11567,17 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 
 	/* If the source is already on the GPU, keep the operation on the GPU */
 	if (gc->fillStyle == FillTiled) {
-		if (!gc->tileIsPixel && sna_pixmap_is_gpu(gc->tile.pixmap))
+		if (!gc->tileIsPixel && sna_pixmap_is_gpu(gc->tile.pixmap)) {
+			DBG(("%s: source is already on the gpu\n"));
 			hint |= PREFER_GPU | FORCE_GPU;
+		}
 	}
 
 	bo = sna_drawable_use_bo(draw, hint, &region.extents, &damage);
-	if (bo == NULL)
+	if (bo == NULL) {
+		DBG(("%s: not using GPU, hint=%x\n", __FUNCTION__, hint));
 		goto fallback;
+	}
 
 	if (gc_is_solid(gc, &color)) {
 		DBG(("%s: solid fill [%08x], testing for blt\n",
commit b5b76ad849bfda1e75192d1cb3c6c0fcc623bb91
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 12:01:39 2012 +0100

    Sanity check that the driver is an i915.ko GEM device before claiming it
    
    This fixes an issue with us claiming Poulsbo and friends even though we
    do not speak their language.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index c0403ca..0a70b24 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -31,6 +31,7 @@
 #include <xf86_OSproc.h>
 #include <xf86Parser.h>
 #include <xf86drmMode.h>
+#include <i915_drm.h>
 
 #include <xorgVersion.h>
 
@@ -369,20 +370,35 @@ static Bool intel_driver_func(ScrnInfoPtr pScrn,
 static Bool has_kernel_mode_setting(struct pci_device *dev)
 {
 	char id[20];
-	int ret;
+	int ret, fd;
 
 	snprintf(id, sizeof(id),
 		 "pci:%04x:%02x:%02x.%d",
 		 dev->domain, dev->bus, dev->dev, dev->func);
 
 	ret = drmCheckModesettingSupported(id);
-	if (ret && xf86LoadKernelModule("i915"))
-		ret = drmCheckModesettingSupported(id);
-	/* Be nice to the user and load fbcon too */
-	if (!ret)
+	if (ret) {
+		if (xf86LoadKernelModule("i915"))
+			ret = drmCheckModesettingSupported(id);
+		if (ret)
+			return FALSE;
+		/* Be nice to the user and load fbcon too */
 		(void)xf86LoadKernelModule("fbcon");
+	}
+
+	/* Confirm that this is a i915.ko device with GEM/KMS enabled */
+	ret = FALSE;
+	fd = drmOpen(NULL, id);
+	if (fd != -1) {
+		struct drm_i915_getparam gp;
+
+		gp.param = I915_PARAM_HAS_GEM;
+		gp.value = &ret;
+		(void)drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp, sizeof(gp));
+		close(fd);
+	}
 
-	return ret == 0;
+	return ret;
 }
 
 extern XF86ConfigPtr xf86configptr;
commit 74f998136bac441d782faf779616af6f7bdcadad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 11:07:55 2012 +0100

    sna: Avoid migrating the BLT composite src to the GPU if the dst is not
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 99671a9..750d749 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1641,6 +1641,15 @@ prepare_blt_put(struct sna *sna,
 	return true;
 }
 
+static bool source_is_gpu(PixmapPtr pixmap, const BoxRec *box)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return false;
+	return sna_damage_contains_box(priv->gpu_damage,
+				       box) != PIXMAN_REGION_OUT;
+}
+
 #define alphaless(format) PICT_FORMAT(PICT_FORMAT_BPP(format),		\
 				      PICT_FORMAT_TYPE(format),		\
 				      0,				\
@@ -1839,7 +1848,9 @@ clear:
 	src_box.y1 = y;
 	src_box.x2 = x + width;
 	src_box.y2 = y + height;
-	bo = __sna_render_pixmap_bo(sna, src_pixmap, &src_box, true);
+	bo = NULL;
+	if (tmp->dst.bo || source_is_gpu(src_pixmap, &src_box))
+		bo = __sna_render_pixmap_bo(sna, src_pixmap, &src_box, true);
 	if (bo) {
 		if (!tmp->dst.bo)
 			tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
commit a05643eb670e91ab102465df799301def88faaf9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 10:46:14 2012 +0100

    sna: Remove unneeded source bo unref after __sna_render_pixmap_bo()
    
    As __sna_render_pixmap_bo() deliberately does not reference its returned
    bo, we need to avoid unreferencing it else we cause explosions later.
    
    Fixes regression from commit a13781d19defc97af6a279c11a85e33ef825020e
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Sun Aug 19 09:45:12 2012 +0100
    
        sna: Enable BLT composite functions to target CPU buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index f050669..99671a9 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1850,11 +1850,9 @@ clear:
 		if (!tmp->dst.bo) {
 			DBG(("%s: fallback -- unaccelerated read back\n",
 			     __FUNCTION__));
-			kgem_bo_destroy(&sna->kgem, bo);
 		} else if (bo->snoop && tmp->dst.bo->snoop) {
 			DBG(("%s: fallback -- can not copy between snooped bo\n",
 			     __FUNCTION__));
-			kgem_bo_destroy(&sna->kgem, bo);
 		} else {
 			ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
 			if (fallback)
commit c80db9bc2ef0a37a4abb78c9ef667c8b36ab6fba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Aug 20 10:45:36 2012 +0100

    sna: Flush the batch before preparing for a FlushCallback
    
    As we end up submitting and maybe synchronising upon a batch within
    FlushCallback (or our client will) it is important that we start that
    serialized batch as early as possible to minimise the impending stalls.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3c83ac5..cd55afd 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -458,6 +458,8 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 	return sna_pixmap_move_to_gpu(get_drawable_pixmap(drawable), flags) != NULL;
 }
 
+void sna_add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv);
+
 struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 
 #define PREFER_GPU	0x1
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 04dacf1..8941be9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1012,11 +1012,19 @@ fallback:
 	return create_pixmap(sna, screen, width, height, depth, usage);
 }
 
-static inline void add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
+void sna_add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
 {
 	DBG(("%s: marking pixmap=%ld for flushing\n",
 	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
 	list_move(&priv->list, &sna->flush_pixmaps);
+
+	if (sna->kgem.need_retire)
+		kgem_retire(&sna->kgem);
+	if (!sna->kgem.need_retire || !sna->kgem.flush) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+		kgem_submit(&sna->kgem);
+	}
+
 	sna->kgem.flush = true;
 }
 
@@ -1065,7 +1073,7 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 
 	if (priv->shm && kgem_bo_is_busy(priv->cpu_bo)) {
 		kgem_bo_submit(&sna->kgem, priv->cpu_bo); /* XXX ShmDetach */
-		add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv);
 	} else
 		__sna_free_pixmap(sna, pixmap, priv);
 	return TRUE;
@@ -1404,7 +1412,7 @@ skip_inplace_map:
 
 		if (priv->flush) {
 			assert(!priv->shm);
-			add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv);
 		}
 	}
 
@@ -2006,7 +2014,7 @@ done:
 		}
 		if (priv->flush) {
 			assert(!priv->shm);
-			add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv);
 		}
 	}
 
@@ -2220,7 +2228,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 							    box, n, 0);
 				if (ok && priv->shm) {
 					assert(!priv->flush);
-					add_flush_pixmap(sna, priv);
+					sna_add_flush_pixmap(sna, priv);
 				}
 			}
 			if (!ok) {
@@ -2264,7 +2272,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    box, 1, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv);
 			}
 		}
 		if (!ok) {
@@ -2299,7 +2307,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    box, n, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv);
 			}
 		}
 		if (!ok) {
@@ -2325,7 +2333,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv);
 	}
 
 done:
@@ -2576,11 +2584,8 @@ use_cpu_bo:
 		*damage = &priv->cpu_damage;
 
 	if (priv->shm) {
-		struct sna *sna = to_sna_from_pixmap(pixmap);
 		assert(!priv->flush);
-		add_flush_pixmap(sna, priv);
-		if (!kgem_bo_is_busy(priv->cpu_bo))
-			kgem_submit(&sna->kgem);
+		sna_add_flush_pixmap(to_sna_from_pixmap(pixmap), priv);
 	}
 
 	DBG(("%s: using CPU bo with damage? %d\n",
@@ -2802,7 +2807,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    box, n, 0);
 			if (ok && priv->shm) {
 				assert(!priv->flush);
-				add_flush_pixmap(sna, priv);
+				sna_add_flush_pixmap(sna, priv);
 			}
 		}
 		if (!ok) {
@@ -2837,7 +2842,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		add_flush_pixmap(sna, priv);
+		sna_add_flush_pixmap(sna, priv);
 	}
 
 	/* For large bo, try to keep only a single copy around */
@@ -3392,7 +3397,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		}
 		if (priv->flush) {
 			assert(!priv->shm);
-			add_flush_pixmap(sna, priv);
+			sna_add_flush_pixmap(sna, priv);
 		}
 	}
 	priv->cpu = true;
@@ -4270,7 +4275,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (src_priv->shm) {
 				assert(!src_priv->flush);
-				add_flush_pixmap(sna, src_priv);
+				sna_add_flush_pixmap(sna, src_priv);
 			}
 
 			if (damage)
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 7b54f23..13822b8 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -378,8 +378,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 
 	if (priv->shm) {
 		assert(!priv->flush);
-		list_move(&priv->list, &sna->flush_pixmaps);
-		sna->kgem.flush |= 1;
+		sna_add_flush_pixmap(sna, priv);
 	}
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
commit c39fe0253847f5a86e16b47ba420c8ba819c9110
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 20:42:55 2012 +0100

    sna: Do not use the GPU to migrate to the CPU whilst wedged!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4596a7a..04dacf1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1126,6 +1126,9 @@ static inline bool use_cpu_bo_for_download(struct sna *sna,
 	if (DBG_NO_CPU_DOWNLOAD)
 		return false;
 
+	if (wedged(sna))
+		return false;
+
 	if (priv->cpu_bo == NULL || !sna->kgem.can_blt_cpu)
 		return false;
 
@@ -2144,6 +2147,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 	assert_pixmap_damage(pixmap);
 	assert_pixmap_contains_box(pixmap, box);
+	assert(!wedged(sna));
 
 	if (sna_damage_is_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
commit fe05268d70088c8cad5f4b5ef756e1ffe2069fca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 19:28:42 2012 +0100

    sna: Experiment with flushing the batch prior to rendering to a ShmPixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0cd4b77..4596a7a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2572,8 +2572,11 @@ use_cpu_bo:
 		*damage = &priv->cpu_damage;
 
 	if (priv->shm) {
+		struct sna *sna = to_sna_from_pixmap(pixmap);
 		assert(!priv->flush);
-		add_flush_pixmap(to_sna_from_pixmap(pixmap), priv);
+		add_flush_pixmap(sna, priv);
+		if (!kgem_bo_is_busy(priv->cpu_bo))
+			kgem_submit(&sna->kgem);
 	}
 
 	DBG(("%s: using CPU bo with damage? %d\n",
commit bbd7a825810cc9772e6d613df449cb5ecb0be3f6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 19:28:20 2012 +0100

    sna: Don't promote a ShmPixmap to GPU for a CopyArea
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2a022e6..0cd4b77 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3836,6 +3836,8 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	} else {
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 			return false;
+		if (priv->shm)
+			return false;
 	}
 
 	count = priv->source_count++;
@@ -3947,7 +3949,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))
 		goto fallback;
 
-	if (priv->gpu_bo) {
+	if (priv->gpu_damage) {
 		if (alu == GXcopy && priv->clear)
 			goto out;
 
commit 7bf7a5ad1057f1aeb5b261da6dc501323c022287
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 19:17:35 2012 +0100

    sna/gen3: Tidy vbo discard
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 5b0894e..48f104e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1652,6 +1652,7 @@ static int gen3_vertex_finish(struct sna *sna)
 		sna->render.vbo = NULL;
 		return 0;
 	}
+	assert(sna->render.vbo->snoop == false);
 
 	if (sna->render.vertex_used) {
 		memcpy(sna->render.vertices,
@@ -1710,10 +1711,12 @@ static void gen3_vertex_close(struct sna *sna)
 			     sna->render.vertex_used));
 			bo = kgem_create_linear(&sna->kgem,
 						4*sna->render.vertex_used, 0);
-			if (bo)
+			if (bo) {
+				assert(sna->render.vbo->snoop == false);
 				kgem_bo_write(&sna->kgem, bo,
 					      sna->render.vertex_data,
 					      4*sna->render.vertex_used);
+			}
 			free_bo = bo;
 		}
 	}
@@ -1967,9 +1970,10 @@ gen3_render_reset(struct sna *sna)
 	state->last_vertex_offset = 0;
 	state->vertex_offset = 0;
 
-	if (sna->render.vbo &&
+	if (sna->render.vbo != NULL &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
-		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
+		DBG(("%s: discarding vbo as next access will stall: %d\n",
+		     __FUNCTION__, sna->render.vbo->presumed_offset));
 		discard_vbo(sna);
 	}
 }
@@ -1980,7 +1984,8 @@ gen3_render_retire(struct kgem *kgem)
 	struct sna *sna;
 
 	sna = container_of(kgem, struct sna, kgem);
-	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+	if (sna->render.vertex_reloc[0] == 0 &&
+	    sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
 		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
commit d1b808fd72b477bde96f7c6737a993bd1a20baf2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 17:53:24 2012 +0100

    sna: Tweak is_cpu/is_gpu heuristics
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 0024f99..3849a88 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -75,24 +75,32 @@ is_gpu(DrawablePtr drawable)
 	if (priv == NULL || priv->clear)
 		return false;
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage) ||
-	    (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
+	if (priv->cpu_damage == NULL)
 		return true;
 
-	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
+	if (priv->gpu_damage && !priv->gpu_bo->proxy)
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo);
 }
 
 static inline bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	if (priv == NULL || priv->gpu_bo == NULL || priv->clear)
+	if (priv == NULL || priv->clear)
 		return true;
 
-	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo))
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
 		return false;
 
-	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+	if (DAMAGE_IS_ALL(priv->cpu_damage))
+		return true;
+
+	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo))
 		return false;
 
 	return true;
commit c682c1a37692021cbd9bfc1e3f1ccf2b648c73f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 17:28:25 2012 +0100

    sna: Discard GPU (and damage) after applying clear on migration to CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1723757..2a022e6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1326,7 +1326,6 @@ skip_inplace_map:
 			assert(!priv->shm);
 			sna_pixmap_free_cpu(sna, priv);
 		}
-		sna_damage_destroy(&priv->gpu_damage);
 	}
 
 	if (pixmap->devPrivate.ptr == NULL &&
@@ -1355,6 +1354,11 @@ skip_inplace_map:
 				    priv->clear_color);
 		}
 
+		sna_damage_all(&priv->cpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+		sna_pixmap_free_gpu(sna, priv);
+		priv->undamaged = false;
 		priv->clear = false;
 	}
 
commit dc83ef49f9e8ff94ab77aa9d54fe29e32b9d7a3d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 17:20:45 2012 +0100

    sna/trapezoids: Accept more operators for maybe-inplace
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 0dcb971..6cc03f9 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4671,22 +4671,41 @@ trapezoid_spans_maybe_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (is_mono(dst, maskFormat))
 		goto out;
 
-	if (!sna_picture_is_solid(src, NULL))
-		return false;
-
 	switch ((int)dst->format) {
 	case PICT_a8:
+		if (!sna_picture_is_solid(src, NULL))
+			return false;
+
+		switch (op) {
+		case PictOpIn:
+		case PictOpAdd:
+		case PictOpSrc:
+			break;
+		default:
+			return false;
+		}
+		break;
+
 	case PICT_x8r8g8b8:
 	case PICT_a8r8g8b8:
-		break;
-	default:
-		return false;
-	}
+		if (picture_is_gpu(src))
+			return false;
 
-	switch (op) {
-	case PictOpIn:
-	case PictOpAdd:
-	case PictOpSrc:
+		switch (op) {
+		case PictOpOver:
+		case PictOpAdd:
+		case PictOpOutReverse:
+			break;
+		case PictOpSrc:
+			if (sna_picture_is_solid(src, NULL))
+				break;
+
+			if (!sna_drawable_is_clear(dst->pDrawable))
+				return false;
+			break;
+		default:
+			return false;
+		}
 		break;
 	default:
 		return false;
commit 47d948f5df0cd0e975cfe5183b6ce79cd3fd27ee
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 17:09:21 2012 +0100

    sna: Update maybe_inplace to recognise more types of handled pixel formats
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index a1141f1..0dcb971 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4674,8 +4674,14 @@ trapezoid_spans_maybe_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (!sna_picture_is_solid(src, NULL))
 		return false;
 
-	if (dst->format != PICT_a8)
+	switch ((int)dst->format) {
+	case PICT_a8:
+	case PICT_x8r8g8b8:
+	case PICT_a8r8g8b8:
+		break;
+	default:
 		return false;
+	}
 
 	switch (op) {
 	case PictOpIn:
commit 02963f489b177d0085006753e91e240545933387
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 15:45:35 2012 +0100

    sna: Only submit the batch if flushing a DRI client bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 31a9d3d..1723757 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13362,13 +13362,13 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		} else {
 			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber));
-			ret = sna_pixmap_move_to_gpu(priv->pixmap,
-						     MOVE_READ | __MOVE_FORCE);
+			if (sna_pixmap_move_to_gpu(priv->pixmap,
+						     MOVE_READ | __MOVE_FORCE))
+				kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 		}
 		(void)ret;
 	}
 
-	kgem_submit(&sna->kgem);
 	sna->kgem.flush = false;
 }
 
commit 13d1a105159222518800d3c5ad5660725864ec6b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 09:48:05 2012 +0100

    sna: compare the correct trailing dword when skipping identical bitmap lines
    
    Fixes regression in 2.20.4 from
    
    commit 85192f00e345830541e3715e211b1f98154bbef4
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Wed Aug 8 12:11:50 2012 +0100
    
        sna: Ignore trailing bits when comparing lines inside the bitmap
    
    Reported-by: Edward Sheldrake <ejsheldrake at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=53699
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index bba6ea3..7c037fe 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -88,9 +88,7 @@ fbBitmapToRegion(PixmapPtr pixmap)
 		line += stride;
 		while (y2 < pixmap->drawable.height &&
 		       memcmp(bits, line, width >> 3) == 0 &&
-		       (maskw == 0 ||
-			(bits[width >> (FB_SHIFT - 3)] & maskw) ==
-			(line[width >> (FB_SHIFT - 3)] & maskw)))
+		       (maskw == 0 || (bits[width >> FB_SHIFT] & maskw) == (line[width >> FB_SHIFT] & maskw)))
 			line += stride, y2++;
 
 		if (READ(bits) & MASK_0)
commit a13781d19defc97af6a279c11a85e33ef825020e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 19 09:45:12 2012 +0100

    sna: Enable BLT composite functions to target CPU buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 9824202..f050669 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -760,6 +760,65 @@ static void blt_composite_fill(struct sna *sna,
 	sna_blt_fill_one(sna, &op->u.blt, x1, y1, x2-x1, y2-y1);
 }
 
+fastcall
+static void blt_composite_fill__cpu(struct sna *sna,
+				    const struct sna_composite_op *op,
+				    const struct sna_composite_rectangles *r)
+{
+	int x1, x2, y1, y2;
+
+	x1 = r->dst.x + op->dst.x;
+	y1 = r->dst.y + op->dst.y;
+	x2 = x1 + r->width;
+	y2 = y1 + r->height;
+
+	if (x1 < 0)
+		x1 = 0;
+	if (y1 < 0)
+		y1 = 0;
+
+	if (x2 > op->dst.width)
+		x2 = op->dst.width;
+	if (y2 > op->dst.height)
+		y2 = op->dst.height;
+
+	if (x2 <= x1 || y2 <= y1)
+		return;
+
+	pixman_fill(op->dst.pixmap->devPrivate.ptr,
+		    op->dst.pixmap->devKind / sizeof(uint32_t),
+		    op->dst.pixmap->drawable.bitsPerPixel,
+		    x1, y1, x2-x1, y2-y1,
+		    op->u.blt.pixel);
+}
+
+fastcall static void
+blt_composite_fill_box__cpu(struct sna *sna,
+			    const struct sna_composite_op *op,
+			    const BoxRec *box)
+{
+	pixman_fill(op->dst.pixmap->devPrivate.ptr,
+		    op->dst.pixmap->devKind / sizeof(uint32_t),
+		    op->dst.pixmap->drawable.bitsPerPixel,
+		    box->x1, box->y1, box->x2-box->x1, box->y2-box->y1,
+		    op->u.blt.pixel);
+}
+
+static void
+blt_composite_fill_boxes__cpu(struct sna *sna,
+			      const struct sna_composite_op *op,
+			      const BoxRec *box, int n)
+{
+	do {
+		pixman_fill(op->dst.pixmap->devPrivate.ptr,
+			    op->dst.pixmap->devKind / sizeof(uint32_t),
+			    op->dst.pixmap->drawable.bitsPerPixel,
+			    box->x1, box->y1, box->x2-box->x1, box->y2-box->y1,
+			    op->u.blt.pixel);
+		box++;
+	} while (--n);
+}
+
 inline static void _sna_blt_fill_box(struct sna *sna,
 				     const struct sna_blt_state *blt,
 				     const BoxRec *box)
@@ -932,6 +991,15 @@ prepare_blt_clear(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
+	if (op->dst.bo == NULL) {
+		op->blt   = blt_composite_fill__cpu;
+		op->box   = blt_composite_fill_box__cpu;
+		op->boxes = blt_composite_fill_boxes__cpu;
+		op->done  = nop_done;
+		op->u.blt.pixel = 0;
+		return true;
+	}
+
 	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
@@ -958,6 +1026,15 @@ prepare_blt_fill(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
+	if (op->dst.bo == NULL) {
+		op->u.blt.pixel = get_solid_color(source, op->dst.format);
+		op->blt = blt_composite_fill__cpu;
+		op->box   = blt_composite_fill_box__cpu;
+		op->boxes = blt_composite_fill_boxes__cpu;
+		op->done = nop_done;
+		return true;
+	}
+
 	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
@@ -1143,6 +1220,9 @@ prepare_blt_copy(struct sna *sna,
 {
 	PixmapPtr src = op->u.blt.src_pixmap;
 
+	assert(op->dst.bo);
+	assert(kgem_bo_can_blt(&sna->kgem, op->dst.bo));
+
 	if (!kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
 		return false;
@@ -1189,6 +1269,100 @@ prepare_blt_copy(struct sna *sna,
 }
 
 fastcall static void
+blt_put_composite__cpu(struct sna *sna,
+		       const struct sna_composite_op *op,
+		       const struct sna_composite_rectangles *r)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
+		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+		   r->src.x + op->u.blt.sx, r->src.y + op->u.blt.sy,
+		   r->width, r->height,
+		   r->dst.x + op->dst.x, r->dst.y + op->dst.y);
+}
+
+fastcall static void
+blt_put_composite_box__cpu(struct sna *sna,
+			   const struct sna_composite_op *op,
+			   const BoxRec *box)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
+		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+		   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
+		   box->x2-box->x1, box->y2-box->y1,
+		   box->x1 + op->dst.x, box->y1 + op->dst.y);
+}
+
+static void
+blt_put_composite_boxes__cpu(struct sna *sna,
+			     const struct sna_composite_op *op,
+			     const BoxRec *box, int n)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	do {
+		memcpy_blt(src->devPrivate.ptr, dst->devPrivate.ptr,
+			   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+			   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
+			   box->x2-box->x1, box->y2-box->y1,
+			   box->x1 + op->dst.x, box->y1 + op->dst.y);
+		box++;
+	} while (--n);
+}
+
+fastcall static void
+blt_put_composite_with_alpha__cpu(struct sna *sna,
+				  const struct sna_composite_op *op,
+				  const struct sna_composite_rectangles *r)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
+		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+		   r->src.x + op->u.blt.sx, r->src.y + op->u.blt.sy,
+		   r->width, r->height,
+		   r->dst.x + op->dst.x, r->dst.y + op->dst.y,
+		   0xffffffff, op->u.blt.pixel);
+
+}
+
+fastcall static void
+blt_put_composite_box_with_alpha__cpu(struct sna *sna,
+				      const struct sna_composite_op *op,
+				      const BoxRec *box)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
+		   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+		   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
+		   box->x2-box->x1, box->y2-box->y1,
+		   box->x1 + op->dst.x, box->y1 + op->dst.y,
+		   0xffffffff, op->u.blt.pixel);
+}
+
+static void
+blt_put_composite_boxes_with_alpha__cpu(struct sna *sna,
+					const struct sna_composite_op *op,
+					const BoxRec *box, int n)
+{
+	PixmapPtr dst = op->dst.pixmap;
+	PixmapPtr src = op->u.blt.src_pixmap;
+	do {
+		memcpy_xor(src->devPrivate.ptr, dst->devPrivate.ptr,
+			   src->drawable.bitsPerPixel, src->devKind, dst->devKind,
+			   box->x1 + op->u.blt.sx, box->y1 + op->u.blt.sy,
+			   box->x2-box->x1, box->y2-box->y1,
+			   box->x1 + op->dst.x, box->y1 + op->dst.y,
+			   0xffffffff, op->u.blt.pixel);
+		box++;
+	} while (--n);
+}
+
+fastcall static void
 blt_put_composite(struct sna *sna,
 		  const struct sna_composite_op *op,
 		  const struct sna_composite_rectangles *r)
@@ -1439,46 +1613,34 @@ prepare_blt_put(struct sna *sna,
 	assert(src->devKind);
 	assert(src->devPrivate.ptr);
 
-	if (alpha_fixup) {
-		op->u.blt.pixel = alpha_fixup;
-		op->blt   = blt_put_composite_with_alpha;
-		op->box   = blt_put_composite_box_with_alpha;
-		op->boxes = blt_put_composite_boxes_with_alpha;
+	if (op->dst.bo) {
+		if (alpha_fixup) {
+			op->u.blt.pixel = alpha_fixup;
+			op->blt   = blt_put_composite_with_alpha;
+			op->box   = blt_put_composite_box_with_alpha;
+			op->boxes = blt_put_composite_boxes_with_alpha;
+		} else {
+			op->blt   = blt_put_composite;
+			op->box   = blt_put_composite_box;
+			op->boxes = blt_put_composite_boxes;
+		}
 	} else {
-		op->blt   = blt_put_composite;
-		op->box   = blt_put_composite_box;
-		op->boxes = blt_put_composite_boxes;
+		if (alpha_fixup) {
+			op->u.blt.pixel = alpha_fixup;
+			op->blt   = blt_put_composite_with_alpha__cpu;
+			op->box   = blt_put_composite_box_with_alpha__cpu;
+			op->boxes = blt_put_composite_boxes_with_alpha__cpu;
+		} else {
+			op->blt   = blt_put_composite__cpu;
+			op->box   = blt_put_composite_box__cpu;
+			op->boxes = blt_put_composite_boxes__cpu;
+		}
 	}
 	op->done = nop_done;
 
 	return true;
 }
 
-static void
-reduce_damage(struct sna_composite_op *op,
-	      int dst_x, int dst_y,
-	      int width, int height)
-{
-	BoxRec r;
-
-	if (op->damage == NULL || *op->damage == NULL)
-		return;
-
-	if ((*op->damage)->mode == DAMAGE_ALL) {
-		op->damage = NULL;
-		return;
-	}
-
-	r.x1 = dst_x + op->dst.x;
-	r.x2 = r.x1 + width;
-
-	r.y1 = dst_y + op->dst.y;
-	r.y2 = r.y1 + height;
-
-	if (sna_damage_contains_box__no_reduce(*op->damage, &r))
-		op->damage = NULL;
-}
-
 #define alphaless(format) PICT_FORMAT(PICT_FORMAT_BPP(format),		\
 				      PICT_FORMAT_TYPE(format),		\
 				      0,				\
@@ -1499,10 +1661,9 @@ sna_blt_composite(struct sna *sna,
 {
 	PictFormat src_format = src->format;
 	PixmapPtr src_pixmap;
-	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
 	int16_t tx, ty;
-	BoxRec box;
+	BoxRec dst_box, src_box;
 	uint32_t alpha_fixup;
 	bool was_clear;
 	bool ret;
@@ -1527,14 +1688,13 @@ sna_blt_composite(struct sna *sna,
 
 	was_clear = sna_drawable_is_clear(dst->pDrawable);
 	tmp->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
-	priv = sna_pixmap_move_to_gpu(tmp->dst.pixmap, MOVE_WRITE | MOVE_READ);
-	if (priv == NULL) {
-		DBG(("%s: dst not attached\n", __FUNCTION__));
-		return false;
-	}
-	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
+
+	dst_box.x1 = dst_x; dst_box.x2 = dst_x + width;
+	dst_box.y1 = dst_y; dst_box.y2 = dst_y + height;
+	bo = sna_drawable_use_bo(dst->pDrawable, PREFER_GPU, &dst_box, &tmp->damage);
+	if (bo && !kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
-		     __FUNCTION__, priv->gpu_bo->tiling, priv->gpu_bo->pitch));
+		     __FUNCTION__, bo->tiling, bo->pitch));
 		return false;
 	}
 
@@ -1543,19 +1703,24 @@ sna_blt_composite(struct sna *sna,
 	tmp->dst.height = tmp->dst.pixmap->drawable.height;
 	get_drawable_deltas(dst->pDrawable, tmp->dst.pixmap,
 			    &tmp->dst.x, &tmp->dst.y);
-	tmp->dst.bo = priv->gpu_bo;
-	if (!sna_damage_is_all(&priv->gpu_damage,
-			       tmp->dst.width, tmp->dst.height))
-		tmp->damage = &priv->gpu_damage;
-	if (width && height)
-		reduce_damage(tmp, dst_x, dst_y, width, height);
+	tmp->dst.bo = bo;
 
 	if (op == PictOpClear) {
 clear:
 		if (was_clear)
 			return prepare_blt_nop(sna, tmp);
-		else
-			return prepare_blt_clear(sna, tmp);
+
+		if (!tmp->dst.bo) {
+			RegionRec region;
+
+			region.extents = dst_box;
+			region.data = NULL;
+
+			if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
+							     MOVE_INPLACE_HINT | MOVE_WRITE))
+				return false;
+		}
+		return prepare_blt_clear(sna, tmp);
 	}
 
 	if (is_solid(src)) {
@@ -1574,6 +1739,17 @@ clear:
 			return false;
 		}
 
+		if (!tmp->dst.bo) {
+			RegionRec region;
+
+			region.extents = dst_box;
+			region.data = NULL;
+
+			if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
+							MOVE_INPLACE_HINT | MOVE_WRITE))
+				return false;
+		}
+
 		return prepare_blt_fill(sna, tmp, src);
 	}
 
@@ -1659,15 +1835,44 @@ clear:
 	     tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
 
 	ret = false;
-	box.x1 = x;
-	box.y1 = y;
-	box.x2 = x + width;
-	box.y2 = y + height;
-	bo = __sna_render_pixmap_bo(sna, src_pixmap, &box, true);
-	if (bo)
-		ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
-	if (!ret && (bo == NULL || fallback))
+	src_box.x1 = x;
+	src_box.y1 = y;
+	src_box.x2 = x + width;
+	src_box.y2 = y + height;
+	bo = __sna_render_pixmap_bo(sna, src_pixmap, &src_box, true);
+	if (bo) {
+		if (!tmp->dst.bo)
+			tmp->dst.bo = sna_drawable_use_bo(dst->pDrawable,
+							  FORCE_GPU | PREFER_GPU,
+							  &dst_box,
+							  &tmp->damage);
+
+		if (!tmp->dst.bo) {
+			DBG(("%s: fallback -- unaccelerated read back\n",
+			     __FUNCTION__));
+			kgem_bo_destroy(&sna->kgem, bo);
+		} else if (bo->snoop && tmp->dst.bo->snoop) {
+			DBG(("%s: fallback -- can not copy between snooped bo\n",
+			     __FUNCTION__));
+			kgem_bo_destroy(&sna->kgem, bo);
+		} else {
+			ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
+			if (fallback)
+				ret = prepare_blt_put(sna, tmp, alpha_fixup);
+		}
+	} else {
+		if (!tmp->dst.bo) {
+			RegionRec region;
+
+			region.extents = dst_box;
+			region.data = NULL;
+
+			if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
+							MOVE_INPLACE_HINT | MOVE_WRITE))
+				return false;
+		}
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
+	}
 
 	return ret;
 }
@@ -2264,7 +2469,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 				assert(box->y1 >= 0);
 
 				*(uint64_t *)&b[0] = hdr;
-				*(uint64_t *)&b[2] = *(uint64_t *)box;
+				*(uint64_t *)&b[2] = *(const uint64_t *)box;
 				b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo,
 						      I915_GEM_DOMAIN_RENDER << 16 |
 						      I915_GEM_DOMAIN_RENDER |
commit be940856c74fbedd27997dd61e2a85959b321193
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 21:23:03 2012 +0100

    sna: Consider sample wraparound in each direction independently
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 0f1fa4b..7b54f23 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -592,14 +592,10 @@ sna_render_pixmap_bo(struct sna *sna,
 			if (box.y2 > pixmap->drawable.height)
 				box.y2 = pixmap->drawable.height;
 		} else {
-			if (box.x1 < 0 ||
-			    box.y1 < 0 ||
-			    box.x2 > pixmap->drawable.width ||
-			    box.y2 > pixmap->drawable.height) {
-				box.x1 = box.y1 = 0;
-				box.x2 = pixmap->drawable.width;
-				box.y2 = pixmap->drawable.height;
-			}
+			if (box.x1 < 0 || box.x2 > pixmap->drawable.width)
+				box.x1 = 0, box.x2 = pixmap->drawable.width;
+			if (box.y1 < 0 || box.y2 > pixmap->drawable.height)
+				box.y1 = 0, box.y2 = pixmap->drawable.height;
 		}
 	}
 
@@ -681,25 +677,12 @@ static int sna_render_picture_downsample(struct sna *sna,
 		if (box.y2 > pixmap->drawable.height)
 			box.y2 = pixmap->drawable.height;
 	} else {
-		if (box.x1 < 0 ||
-		    box.y1 < 0 ||
-		    box.x2 > pixmap->drawable.width ||
-		    box.y2 > pixmap->drawable.height) {
-			/* XXX tiled repeats? */
-			box.x1 = box.y1 = 0;
-			box.x2 = pixmap->drawable.width;
-			box.y2 = pixmap->drawable.height;
+		/* XXX tiled repeats? */
+		if (box.x1 < 0 || box.x2 > pixmap->drawable.width)
+			box.x1 = 0, box.x2 = pixmap->drawable.width;
+		if (box.y1 < 0 || box.y2 > pixmap->drawable.height)
+			box.y1 = 0, box.y2 = pixmap->drawable.height;
 
-			if (!channel->is_affine) {
-				DBG(("%s: fallback -- repeating project transform too large for texture\n",
-				     __FUNCTION__));
-				return sna_render_picture_fixup(sna,
-								picture,
-								channel,
-								x, y, w, h,
-								dst_x, dst_y);
-			}
-		}
 	}
 
 	sw = box.x2 - box.x1;
@@ -964,17 +947,10 @@ sna_render_picture_partial(struct sna *sna,
 		if (box.y2 > pixmap->drawable.height)
 			box.y2 = pixmap->drawable.height;
 	} else {
-		if (box.x1 < 0 ||
-		    box.y1 < 0 ||
-		    box.x2 > pixmap->drawable.width ||
-		    box.y2 > pixmap->drawable.height) {
-			box.x1 = box.y1 = 0;
-			box.x2 = pixmap->drawable.width;
-			box.y2 = pixmap->drawable.height;
-
-			if (!channel->is_affine)
-				return 0;
-		}
+		if (box.x1 < 0 || box.x2 > pixmap->drawable.width)
+			box.x1 = 0, box.x2 = pixmap->drawable.width;
+		if (box.y1 < 0 || box.y2 > pixmap->drawable.height)
+			box.y1 = 0, box.y2 = pixmap->drawable.height;
 	}
 
 	if (use_cpu_bo(sna, pixmap, &box, false)) {
@@ -1124,25 +1100,11 @@ sna_render_picture_extract(struct sna *sna,
 		if (box.y2 > pixmap->drawable.height)
 			box.y2 = pixmap->drawable.height;
 	} else {
-		if (box.x1 < 0 ||
-		    box.y1 < 0 ||
-		    box.x2 > pixmap->drawable.width ||
-		    box.y2 > pixmap->drawable.height) {
-			/* XXX tiled repeats? */
-			box.x1 = box.y1 = 0;
-			box.x2 = pixmap->drawable.width;
-			box.y2 = pixmap->drawable.height;
-
-			if (!channel->is_affine) {
-				DBG(("%s: fallback -- repeating project transform too large for texture\n",
-				     __FUNCTION__));
-				return sna_render_picture_fixup(sna,
-								picture,
-								channel,
-								x, y, ow, oh,
-								dst_x, dst_y);
-			}
-		}
+		/* XXX tiled repeats? */
+		if (box.x1 < 0 || box.x2 > pixmap->drawable.width)
+			box.x1 = 0, box.x2 = pixmap->drawable.width;
+		if (box.y1 < 0 || box.y2 > pixmap->drawable.height)
+			box.y1 = 0, box.y2 = pixmap->drawable.height;
 	}
 
 	w = box.x2 - box.x1;
commit 110c7ef7f6c31929affa038918e6ce087bccddc6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 17:42:51 2012 +0100

    sna/damage: Replace the damage with a larger box if subsumed
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index ff76688..0390bf9 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -562,6 +562,12 @@ static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
 	pixman_region_union(region, region, &u);
 }
 
+static bool box_contains_region(const BoxRec *b, const RegionRec *r)
+{
+	return (b->x1 <= r->extents.x1 && b->x2 >= r->extents.x2 &&
+		b->y1 <= r->extents.y1 && b->y2 >= r->extents.y2);
+}
+
 static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 					       const BoxRec *box)
 {
@@ -581,7 +587,8 @@ static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 		break;
 	}
 
-	if (REGION_NUM_RECTS(&damage->region) <= 1) {
+	if (REGION_NUM_RECTS(&damage->region) <= 1 ||
+	    box_contains_region(box, &damage->region)) {
 		_pixman_region_union_box(&damage->region, box);
 		assert(damage->region.extents.x2 > damage->region.extents.x1);
 		assert(damage->region.extents.y2 > damage->region.extents.y1);
commit 75a2fab766d8aed180ef795919e503db22c0e0fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 17:28:08 2012 +0100

    sna: Avoid forcing an upload for an unblittable bo unless on a fallback path
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index ecccbbb..31859b4 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1722,7 +1722,7 @@ gen2_render_composite(struct sna *sna,
 				 src, dst,
 				 src_x, src_y,
 				 dst_x, dst_y,
-				 width, height, tmp);
+				 width, height, tmp, true);
 #endif
 
 	/* Try to use the BLT engine unless it implies a
@@ -1735,7 +1735,7 @@ gen2_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height,
-			      tmp))
+			      tmp, false))
 		return true;
 
 	if (gen2_composite_fallback(sna, src, mask, dst))
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index c44359f..5b0894e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2812,7 +2812,7 @@ gen3_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height,
-			      tmp))
+			      tmp, false))
 		return true;
 
 	if (gen3_composite_fallback(sna, op, src, mask, dst))
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ab06295..a56ef79 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2271,7 +2271,8 @@ gen4_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	if (gen4_composite_fallback(sna, src, mask, dst))
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 488ac34..71e77a4 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2284,7 +2284,8 @@ gen5_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	if (gen5_composite_fallback(sna, src, mask, dst))
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index f8b1e71..710a35e 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2654,7 +2654,8 @@ gen6_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	if (gen6_composite_fallback(sna, src, mask, dst))
@@ -2679,7 +2680,8 @@ gen6_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 318cbef..8a281e5 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2756,7 +2756,8 @@ gen7_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	if (gen7_composite_fallback(sna, src, mask, dst))
@@ -2782,7 +2783,8 @@ gen7_render_composite(struct sna *sna,
 			      src, dst,
 			      src_x, src_y,
 			      dst_x, dst_y,
-			      width, height, tmp))
+			      width, height,
+			      tmp, false))
 		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 819d24a..9824202 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1494,7 +1494,8 @@ sna_blt_composite(struct sna *sna,
 		  int16_t x, int16_t y,
 		  int16_t dst_x, int16_t dst_y,
 		  int16_t width, int16_t height,
-		  struct sna_composite_op *tmp)
+		  struct sna_composite_op *tmp,
+		  bool fallback)
 {
 	PictFormat src_format = src->format;
 	PixmapPtr src_pixmap;
@@ -1665,7 +1666,7 @@ clear:
 	bo = __sna_render_pixmap_bo(sna, src_pixmap, &box, true);
 	if (bo)
 		ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
-	if (!ret)
+	if (!ret && (bo == NULL || fallback))
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
 
 	return ret;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 757b2f4..0f1fa4b 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -84,20 +84,19 @@ no_render_composite(struct sna *sna,
 {
 	DBG(("%s (op=%d, mask? %d)\n", __FUNCTION__, op, mask != NULL));
 
+	if (mask)
+		return false;
+
 	if (!is_gpu(dst->pDrawable) &&
 	    (src->pDrawable == NULL || !is_gpu(src->pDrawable)))
 		return false;
 
-	if (mask == NULL &&
-	    sna_blt_composite(sna,
-			      op, src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height,
-			      tmp))
-		return true;
-
-	return false;
+	return sna_blt_composite(sna,
+				 op, src, dst,
+				 src_x, src_y,
+				 dst_x, dst_y,
+				 width, height,
+				 tmp, true);
 	(void)mask_x;
 	(void)mask_y;
 }
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index b079178..442c78d 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -569,7 +569,8 @@ bool sna_blt_composite(struct sna *sna,
 		       int16_t src_x, int16_t src_y,
 		       int16_t dst_x, int16_t dst_y,
 		       int16_t width, int16_t height,
-		       struct sna_composite_op *tmp);
+		       struct sna_composite_op *tmp,
+		       bool fallback);
 bool sna_blt_composite__convert(struct sna *sna,
 				int x, int y,
 				int width, int height,
commit 110d5519f3523b1e2c50db637cdc4c5bc44c960a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 16:39:52 2012 +0100

    sna: Reduce subtracted damage earlier
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index c7fe4c6..ff76688 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1019,6 +1019,7 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 		return NULL;
 
 	if (!RegionNotEmpty(&damage->region)) {
+no_damage:
 		__sna_damage_destroy(damage);
 		return NULL;
 	}
@@ -1029,15 +1030,17 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 		return damage;
 
 	if (region_is_singular(region) &&
-	    box_contains(&region->extents, &damage->extents)) {
-		__sna_damage_destroy(damage);
-		return NULL;
-	}
+	    box_contains(&region->extents, &damage->extents))
+		goto no_damage;
 
 	if (damage->mode == DAMAGE_ALL) {
 		pixman_region_subtract(&damage->region,
 				       &damage->region,
 				       region);
+		if (damage->region.extents.x2 <= damage->region.extents.x1 ||
+		    damage->region.extents.y2 <= damage->region.extents.y1)
+			goto no_damage;
+
 		damage->extents = damage->region.extents;
 		damage->mode = DAMAGE_ADD;
 		return damage;
@@ -1049,16 +1052,18 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 			assert(RegionNotEmpty(&damage->region));
 		}
 
-		if (pixman_region_equal(region, &damage->region)) {
-			__sna_damage_destroy(damage);
-			return NULL;
-		}
+		if (pixman_region_equal(region, &damage->region))
+			goto no_damage;
 
 		if (region_is_singular(&damage->region) &&
 		    region_is_singular(region)) {
 			pixman_region_subtract(&damage->region,
 					       &damage->region,
 					       region);
+			if (damage->region.extents.x2 <= damage->region.extents.x1 ||
+			    damage->region.extents.y2 <= damage->region.extents.y1)
+				goto no_damage;
+
 			damage->extents = damage->region.extents;
 			assert(pixman_region_not_empty(&damage->region));
 			return damage;
commit 8812e8b6e89e6432a6a768a0566ce4c153e9b256
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 16:21:13 2012 +0100

    sna: Reduce damage after a large composite operation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 72a5a94..d47479b 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -412,7 +412,12 @@ static void apply_damage(struct sna_composite_op *op, RegionPtr region)
 		RegionTranslate(region, op->dst.x, op->dst.y);
 
 	assert_pixmap_contains_box(op->dst.pixmap, RegionExtents(region));
-	sna_damage_add(op->damage, region);
+	if (region->data == NULL &&
+	    region->extents.x2 - region->extents.x1 == op->dst.width &&
+	    region->extents.y2 - region->extents.y1 == op->dst.height)
+		sna_damage_all(op->damage, op->dst.width, op->dst.height);
+	else
+		sna_damage_add(op->damage, region);
 }
 
 static inline bool use_cpu(PixmapPtr pixmap, struct sna_pixmap *priv,
commit e361627b90ea6bf2f9a8c46cf8debe562fdf4f09
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 16:16:17 2012 +0100

    sna/damage: Add some more sanity checks for creating empty regions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 7d78372..c7fe4c6 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -552,6 +552,8 @@ static void damage_union(struct sna_damage *damage, const BoxRec *box)
 		if (damage->extents.y2 < box->y2)
 			damage->extents.y2 = box->y2;
 	}
+	assert(damage->extents.x2 > damage->extents.x1);
+	assert(damage->extents.y2 > damage->extents.y1);
 }
 
 static void _pixman_region_union_box(RegionRec *region, const BoxRec *box)
@@ -581,6 +583,8 @@ static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		_pixman_region_union_box(&damage->region, box);
+		assert(damage->region.extents.x2 > damage->region.extents.x1);
+		assert(damage->region.extents.y2 > damage->region.extents.y1);
 		damage_union(damage, box);
 		return damage;
 	}
@@ -616,6 +620,8 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		pixman_region_union(&damage->region, &damage->region, region);
+		assert(damage->region.extents.x2 > damage->region.extents.x1);
+		assert(damage->region.extents.y2 > damage->region.extents.y1);
 		damage_union(damage, &region->extents);
 		return damage;
 	}
@@ -645,6 +651,8 @@ fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(damage->region.extents.x2 > damage->region.extents.x1);
+	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
 	return damage;
 }
@@ -726,6 +734,8 @@ struct sna_damage *_sna_damage_add_boxes(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(damage->region.extents.x2 > damage->region.extents.x1);
+	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
 	return damage;
 }
@@ -811,6 +821,8 @@ struct sna_damage *_sna_damage_add_rectangles(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(damage->region.extents.x2 > damage->region.extents.x1);
+	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
 	return damage;
 }
@@ -893,6 +905,8 @@ struct sna_damage *_sna_damage_add_points(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(damage->region.extents.x2 > damage->region.extents.x1);
+	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
 	return damage;
 }
@@ -919,6 +933,8 @@ fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage,
 
 	ErrorF("  = %s\n",
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage));
+	assert(damage->region.extents.x2 > damage->region.extents.x1);
+	assert(damage->region.extents.y2 > damage->region.extents.y1);
 
 	return damage;
 }
commit 10f334872e9dd190e18c768219e60815acabe4d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 15:49:02 2012 +0100

    sna: Add damage for the whole unaligned trapezoid not per component
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e63981f..a1141f1 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2739,18 +2739,14 @@ composite_unaligned_box(struct sna *sna,
 
 		pixman_region_init_rects(&region, box, 1);
 		RegionIntersect(&region, &region, clip);
-		if (REGION_NUM_RECTS(&region)) {
+		if (REGION_NUM_RECTS(&region))
 			tmp->boxes(sna, tmp,
 				   REGION_RECTS(&region),
 				   REGION_NUM_RECTS(&region),
 				   opacity);
-			apply_damage(&tmp->base, &region);
-		}
 		pixman_region_fini(&region);
-	} else {
+	} else
 		tmp->box(sna, tmp, box, opacity);
-		apply_damage_box(&tmp->base, box);
-	}
 }
 
 static void
@@ -2869,6 +2865,26 @@ composite_unaligned_trap(struct sna *sna,
 						     grid_coverage(SAMPLES_Y, trap->bottom),
 						     clip);
 	}
+
+	if (tmp->base.damage) {
+		BoxRec box;
+
+		box.x1 = dx + pixman_fixed_to_int(trap->left.p1.x);
+		box.x2 = dx + pixman_fixed_to_int(trap->right.p1.x);
+		box.y1 = y1;
+		box.y2 = y2 + (pixman_fixed_frac(trap->bottom) != 0);
+
+		if (clip) {
+			pixman_region16_t region;
+
+			pixman_region_init_rects(&region, &box, 1);
+			RegionIntersect(&region, &region, clip);
+			if (REGION_NUM_RECTS(&region))
+				apply_damage(&tmp->base, &region);
+			RegionUninit(&region);
+		} else
+			apply_damage_box(&tmp->base, &box);
+	}
 }
 
 inline static void
commit 5ff6198c9346d84717bac28980329b048f4406e8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 11:59:56 2012 +0100

    2.20.4 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 5a9c495..1ad769e 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,33 @@
+Release 2.20.4 (2012-08-18)
+===========================
+Continuing the small bugfix releases, the only real feature is initial
+enabling for Haswell for the purpose of rendering verification and
+validation - by no means is it complete!
+
+Bugs fixed since 2.20.3:
+
+  * Some potential errors along failure paths found by a static analyser
+  with the help of Zdenek Kablac.
+
+  * Eliminate zero-sized rectangles from PolyFillRectangles as the code
+  assumes that they did not exist and so caused corruption.
+
+  * Remove the UXA warning for failing to tile the front buffer if it is
+  disallowed by hardware, and so expected.
+
+  * Fix the validation of the XV pipe parameter.
+
+  * Fix 8x8 tiled pattern fills
+  https://bugs.freedesktop.org/show_bug.cgi?id=53353
+
+  * Fix compile failure when using --with-builderstring
+
+  * Restore w/a flush for gen4 fill/copy/video, fortunately rare
+  operations as at least for fill/copy we prefer to use the BLT.
+  https://bugs.freedesktop.org/show_bug.cgi?id=53119
+
+  * Restore preferred use of the RENDER ring for SNB+ DRI copies.
+
 Release 2.20.3 (2012-08-04)
 ===========================
 Just a minor bugfix for gen4 chipsets (965gm, gm45 and friends) that
diff --git a/configure.ac b/configure.ac
index 7ffbb75..a9c6336 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.3],
+        [2.20.4],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit bc77a466531fdbdd21e9354af567e5215a66edf9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 18 11:09:23 2012 +0100

    sna: Silence a couple of recent compiler warnings
    
    sna_accel.c: In function 'sna_poly_fill_rect_extents':
    sna_accel.c:11438:11: warning: unused variable 'v' [-Wunused-variable]
    
    sna_blt.c: In function 'sna_blt_composite__convert':
    sna_blt.c:1712:3: warning: format '%s' expects a matching 'char *' argument [-Wformat]
    sna_blt.c:1738:3: warning: format '%x' expects argument of type 'unsigned int', but argument 4 has type 'CARD32' [-Wformat]
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cf82cc5..31a9d3d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11435,8 +11435,6 @@ sna_poly_fill_rect_extents(DrawablePtr drawable, GCPtr gc,
 	r++;
 
 	while (--n) {
-		int32_t v;
-
 		if (r->width == 0 || r->height == 0)
 			goto slow;
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 2d7a4aa..819d24a 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1709,7 +1709,7 @@ sna_blt_composite__convert(struct sna *sna,
 	}
 
 	if (tmp->src.transform) {
-		DBG(("%s: transforms not handled by the BLT\n"));
+		DBG(("%s: transforms not handled by the BLT\n", __FUNCTION__));
 		return false;
 	}
 
@@ -1738,7 +1738,7 @@ sna_blt_composite__convert(struct sna *sna,
 		DBG(("%s: incompatible src/dst formats src=%08x, dst=%08x\n",
 		     __FUNCTION__,
 		     (unsigned)tmp->src.pict_format,
-		     tmp->dst.format));
+		     (unsigned)tmp->dst.format));
 		return false;
 	}
 
commit d1da2c71461798bb027a260ff8480b71ebffde26
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 21:15:54 2012 +0100

    sna: Correct assertion for StoreColors
    
    Fixing the assertion reveals that it was bogus!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 31c3c68..cf82cc5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13899,7 +13899,7 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->UninstallColormap = miUninstallColormap;
 	screen->ListInstalledColormaps = miListInstalledColormaps;
 	screen->ResolveColor = miResolveColor;
-	assert(screen->StoreColors == PictureStoreColors);
+	assert(screen->StoreColors == NULL);
 	screen->StoreColors = sna_store_colors;
 	screen->BitmapToRegion = fbBitmapToRegion;
 
commit 31ddaa0cfbde083aef42d9fdb5ed13bb4edc36c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 21:13:52 2012 +0100

    sna: Fixup DBG after 7f5a9e3
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b403597..31c3c68 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11416,7 +11416,7 @@ sna_poly_fill_rect_extents(DrawablePtr drawable, GCPtr gc,
 		return 0;
 
 	DBG(("%s: [0] = (%d, %d)x(%d, %d)\n",
-	     __FUNCTION__, r->x, r->y, r->width, r->height));
+	     __FUNCTION__, (*_r)->x, (*_r)->y, (*_r)->width, (*_r)->height));
 
 	/* Remove any zero-size rectangles from the array */
 	while (*_n && ((*_r)->width == 0 || (*_r)->height == 0))
commit b8bc56c12c6fdc559354fbbf96b6e043a491aa8f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 21:00:57 2012 +0100

    uxa/gen3: Simply simple fill shader generation
    
    Suggested-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index acbd82c..6d3400e 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -779,12 +779,8 @@ i915_composite_emit_shader(intel_screen_private *intel, CARD8 op)
 		else
 			i915_fs_texldp(src_reg, FS_S0, FS_T0);
 
-		if (src_reg != FS_OC) {
-			if (dest_is_alpha)
-				i915_fs_mov(FS_OC, i915_fs_operand(src_reg, W, W, W, W));
-			else
-				i915_fs_mov(FS_OC, i915_fs_operand_reg(src_reg));
-		}
+		if (src_reg != FS_OC)
+			i915_fs_mov(FS_OC, i915_fs_operand(src_reg, W, W, W, W));
 	} else {
 		i915_fs_dcl(FS_T1);
 		i915_fs_dcl(FS_S1);
commit 182581edde356a21a0681de526f83884e1a0de5d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:59:09 2012 +0100

    uxa/glamor: Check for failed prepares
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-accel.c b/uxa/uxa-accel.c
index 4e5bac3..76425fe 100644
--- a/uxa/uxa-accel.c
+++ b/uxa/uxa-accel.c
@@ -52,12 +52,13 @@ uxa_fill_spans(DrawablePtr pDrawable, GCPtr pGC, int n,
 	int off_x, off_y;
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_fill_spans_nf(pDrawable,
-					  pGC, n, ppt, pwidth, fSorted);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_fill_spans_nf(pDrawable,
+						  pGC, n, ppt, pwidth, fSorted);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (!ok)
 			goto fallback;
@@ -213,14 +214,14 @@ uxa_put_image(DrawablePtr pDrawable, GCPtr pGC, int depth, int x, int y,
 	uxa_screen_t *uxa_screen = uxa_get_screen(pDrawable->pScreen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
-
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_put_image_nf(pDrawable,
-					 pGC, depth, x, y, w, h,
-					 leftPad, format, bits);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		int ok = 0;
 
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_put_image_nf(pDrawable,
+						 pGC, depth, x, y, w, h,
+						 leftPad, format, bits);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 		if (!ok)
 			goto fallback;
 
@@ -375,16 +376,18 @@ uxa_copy_n_to_n(DrawablePtr pSrcDrawable,
 	PixmapPtr pSrcPixmap, pDstPixmap;
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
-
-		uxa_prepare_access(pSrcDrawable, UXA_GLAMOR_ACCESS_RO);
-		uxa_prepare_access(pDstDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_copy_n_to_n_nf(pSrcDrawable, pDstDrawable,
-					   pGC, pbox, nbox, dx, dy,
-					   reverse, upsidedown, bitplane,
-					   closure);
-		uxa_finish_access(pDstDrawable, UXA_GLAMOR_ACCESS_RW);
-		uxa_finish_access(pSrcDrawable, UXA_GLAMOR_ACCESS_RO);
+		int ok = 0;
+
+		if (uxa_prepare_access(pSrcDrawable, UXA_GLAMOR_ACCESS_RO)) {
+			if (uxa_prepare_access(pDstDrawable, UXA_GLAMOR_ACCESS_RW)) {
+				ok = glamor_copy_n_to_n_nf(pSrcDrawable, pDstDrawable,
+							   pGC, pbox, nbox, dx, dy,
+							   reverse, upsidedown, bitplane,
+							   closure);
+				uxa_finish_access(pDstDrawable, UXA_GLAMOR_ACCESS_RW);
+			}
+			uxa_finish_access(pSrcDrawable, UXA_GLAMOR_ACCESS_RO);
+		}
 
 		if (!ok)
 			goto fallback;
@@ -561,11 +564,12 @@ uxa_poly_point(DrawablePtr pDrawable, GCPtr pGC, int mode, int npt,
 	uxa_screen_t *uxa_screen = uxa_get_screen(pDrawable->pScreen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_poly_point_nf(pDrawable, pGC, mode, npt, ppt);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_poly_point_nf(pDrawable, pGC, mode, npt, ppt);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (ok)
 			return;
@@ -611,11 +615,12 @@ uxa_poly_lines(DrawablePtr pDrawable, GCPtr pGC, int mode, int npt,
 	uxa_screen_t *uxa_screen = uxa_get_screen(pDrawable->pScreen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_poly_lines_nf(pDrawable, pGC, mode, npt, ppt);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_poly_lines_nf(pDrawable, pGC, mode, npt, ppt);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (ok)
 			return;
@@ -684,11 +689,12 @@ uxa_poly_segment(DrawablePtr pDrawable, GCPtr pGC, int nseg, xSegment * pSeg)
 	uxa_screen_t *uxa_screen = uxa_get_screen(pDrawable->pScreen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_poly_segment_nf(pDrawable, pGC, nseg, pSeg);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_poly_segment_nf(pDrawable, pGC, nseg, pSeg);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (ok)
 			return;
@@ -758,11 +764,12 @@ uxa_poly_fill_rect(DrawablePtr pDrawable,
 	int n;
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_poly_fill_rect_nf(pDrawable, pGC, nrect, prect);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_poly_fill_rect_nf(pDrawable, pGC, nrect, prect);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (!ok)
 			uxa_check_poly_fill_rect(pDrawable, pGC, nrect, prect);
@@ -883,12 +890,13 @@ uxa_get_spans(DrawablePtr pDrawable,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_get_spans_nf(pDrawable, wMax, ppt,
-					 pwidth, nspans, pdstStart);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_get_spans_nf(pDrawable, wMax, ppt,
+						 pwidth, nspans, pdstStart);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (!ok)
 			goto fallback;
@@ -908,12 +916,13 @@ uxa_set_spans(DrawablePtr pDrawable, GCPtr gc, char *src,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_set_spans_nf(pDrawable, gc, src,
-					 points, widths, n, sorted);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_set_spans_nf(pDrawable, gc, src,
+						 points, widths, n, sorted);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (!ok)
 			goto fallback;
@@ -934,15 +943,17 @@ uxa_copy_plane(DrawablePtr pSrc, DrawablePtr pDst, GCPtr pGC,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 		RegionPtr region;
 
-		uxa_prepare_access(pDst, UXA_GLAMOR_ACCESS_RW);
-		uxa_prepare_access(pSrc, UXA_GLAMOR_ACCESS_RO);
-		ok = glamor_copy_plane_nf(pSrc, pDst, pGC, srcx, srcy, w, h,
-					   dstx, dsty, bitPlane, &region);
-		uxa_finish_access(pSrc, UXA_GLAMOR_ACCESS_RO);
-		uxa_finish_access(pDst, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDst, UXA_GLAMOR_ACCESS_RW)) {
+			if (uxa_prepare_access(pSrc, UXA_GLAMOR_ACCESS_RO)) {
+				ok = glamor_copy_plane_nf(pSrc, pDst, pGC, srcx, srcy, w, h,
+							  dstx, dsty, bitPlane, &region);
+				uxa_finish_access(pSrc, UXA_GLAMOR_ACCESS_RO);
+			}
+			uxa_finish_access(pDst, UXA_GLAMOR_ACCESS_RW);
+		}
 		if (!ok)
 			goto fallback;
 		return region;
@@ -962,11 +973,12 @@ uxa_image_glyph_blt(DrawablePtr pDrawable, GCPtr pGC,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_image_glyph_blt_nf(pDrawable, pGC, x, y, nglyph, ppci, pglyphBase);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_image_glyph_blt_nf(pDrawable, pGC, x, y, nglyph, ppci, pglyphBase);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 		if (!ok)
 			goto fallback;
 		return;
@@ -985,11 +997,12 @@ uxa_poly_glyph_blt(DrawablePtr pDrawable, GCPtr pGC,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_poly_glyph_blt_nf(pDrawable, pGC, x, y, nglyph, ppci, pglyphBase);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_poly_glyph_blt_nf(pDrawable, pGC, x, y, nglyph, ppci, pglyphBase);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 		if (!ok)
 			goto fallback;
 		return;
@@ -1007,13 +1020,15 @@ uxa_push_pixels(GCPtr pGC, PixmapPtr pBitmap,
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
+		int ok = 0;
 
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		uxa_prepare_access(&pBitmap->drawable, UXA_GLAMOR_ACCESS_RO);
-		ok = glamor_push_pixels_nf(pGC, pBitmap, pDrawable, w, h, x, y);
-		uxa_finish_access(&pBitmap->drawable, UXA_GLAMOR_ACCESS_RO);
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			if (uxa_prepare_access(&pBitmap->drawable, UXA_GLAMOR_ACCESS_RO)) {
+				ok = glamor_push_pixels_nf(pGC, pBitmap, pDrawable, w, h, x, y);
+				uxa_finish_access(&pBitmap->drawable, UXA_GLAMOR_ACCESS_RO);
+			}
+			uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 		if (!ok)
 			goto fallback;
 		return;
@@ -1231,10 +1246,12 @@ uxa_get_image(DrawablePtr pDrawable, int x, int y, int w, int h,
 	Box.y2 = Box.y1 + h;
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_get_image_nf(pDrawable, x, y, w, h,
-					format, planeMask, d);
-		uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		ok = 0;
+		if (uxa_prepare_access(pDrawable, UXA_GLAMOR_ACCESS_RW)) {
+			ok = glamor_get_image_nf(pDrawable, x, y, w, h,
+						 format, planeMask, d);
+			uxa_finish_access(pDrawable, UXA_GLAMOR_ACCESS_RW);
+		}
 
 		if (!ok)
 			goto fallback;
commit c289b607d1526d8d3625fa84c093552a9f3ea168
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:54:40 2012 +0100

    uxa: Check for failed mmapping of the scanout
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 5aad062..3745ff0 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -1145,7 +1145,8 @@ Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 	if (!uxa_resources_init(screen))
 		return FALSE;
 
-	drm_intel_gem_bo_map_gtt(bo);
+	if (drm_intel_gem_bo_map_gtt(bo))
+		return FALSE;
 
 	pixmap = screen->GetScreenPixmap(screen);
 	intel_set_pixmap_bo(pixmap, bo);
commit 32ec8b979bc8cf7d8ce351ca752b806d42bc1c0f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:53:08 2012 +0100

    test/dri2: Discard error returns from _XReply
    
    This is only test code, so keep the static analyser quiet
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/dri2.c b/test/dri2.c
index 86a0a74..0e2003c 100644
--- a/test/dri2.c
+++ b/test/dri2.c
@@ -475,7 +475,7 @@ DRI2CopyRegion(Display * dpy, XID drawable, XserverRegion region,
    req->dest = dest;
    req->src = src;
 
-   _XReply(dpy, (xReply *) & rep, 0, xFalse);
+   (void) _XReply(dpy, (xReply *) & rep, 0, xFalse);
 
    UnlockDisplay(dpy);
    SyncHandle();
@@ -517,7 +517,7 @@ uint64_t DRI2SwapBuffers(Display *dpy, XID drawable,
     req->drawable = drawable;
     load_swap_req(req, target_msc, divisor, remainder);
 
-    _XReply(dpy, (xReply *)&rep, 0, xFalse);
+    (void) _XReply(dpy, (xReply *)&rep, 0, xFalse);
 
     count = vals_to_card64(rep.swap_lo, rep.swap_hi);
 
commit 5675c36e0dca92b8d24235d82cd0e87f82fdc7b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:44:32 2012 +0100

    sna: Check for failure to pin the front buffer
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 431b5de..b35a7cc 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -964,6 +964,9 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 		DBG(("%s: attaching to framebuffer\n", __FUNCTION__));
 		sna_crtc_disable_shadow(sna, sna_crtc);
 		bo = sna_pixmap_pin(sna->front);
+		if (bo == NULL)
+			return NULL;
+
 		if (!get_fb(sna, bo, scrn->virtualX, scrn->virtualY))
 			return NULL;
 
commit c9db2c74ab3b482489bec63b11df80bb35e21aca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:42:50 2012 +0100

    sna/gen2: Add break rather than fallthrough
    
    The fall-through in this instance is irrelevant, and the static
    analysers complain for not commenting on the fall-through. Silence the
    analyser by removing the fall-through.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 153c7ae..ecccbbb 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1774,6 +1774,7 @@ gen2_render_composite(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen2_composite_solid_init(sna, &tmp->src, 0);
+		break;
 	case 1:
 		if (mask == NULL && tmp->src.bo &&
 		    sna_blt_composite__convert(sna,
commit e0abbc3ccafb51d6b2114e1c7d6dc2eda3a8c1f4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:40:17 2012 +0100

    i810: Query PortPriv for the surface properties
    
    Avoid the NULL deferences.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 4ebad66..bbaa3ce 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -1283,7 +1283,7 @@ I810GetSurfaceAttribute(
     Atom attribute,
     INT32 *value
 ){
-    return I810GetPortAttribute(pScrn, attribute, value, NULL);
+    return I810GetPortAttribute(pScrn, attribute, value, GET_PORT_PRIVATE(pScrn));
 }
 
 static int
@@ -1292,7 +1292,7 @@ I810SetSurfaceAttribute(
     Atom attribute,
     INT32 value
 ){
-    return I810SetPortAttribute(pScrn, attribute, value, NULL);
+    return I810SetPortAttribute(pScrn, attribute, value, GET_PORT_PRIVATE(pScrn));
 }
 
 
commit c09518f98d5c533860b084506d9ee2c819cca52f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:32:53 2012 +0100

    sna: Fix assignment inside assertion (should be an equality check)
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 661dde0..b403597 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13899,7 +13899,7 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->UninstallColormap = miUninstallColormap;
 	screen->ListInstalledColormaps = miListInstalledColormaps;
 	screen->ResolveColor = miResolveColor;
-	assert(screen->StoreColors = PictureStoreColors);
+	assert(screen->StoreColors == PictureStoreColors);
 	screen->StoreColors = sna_store_colors;
 	screen->BitmapToRegion = fbBitmapToRegion;
 
commit 7d3e4328d4f4421178377933ca98fb80daa85c76
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:32:11 2012 +0100

    sna/glyphs: Fix potential leak of glyph extents (for many lists of many formats)
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index db9af14..8cbe39c 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1173,8 +1173,10 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 		extents.x2 = 0;
 		extents.y2 = 0;
 
-		if (format->format != list->format->format)
-			return NULL;
+		if (format->format != list->format->format) {
+			format = NULL;
+			goto out;
+		}
 
 		x += list->xOff;
 		y += list->yOff;
commit 9b00c82df78cb8820c0e32fbecc7c933eed81d52
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 20:30:52 2012 +0100

    sna: Remember alpha-fixup for BLT upload
    
    Still dubious whether this is truly beneficial...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 77fca3d..2d7a4aa 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1439,9 +1439,6 @@ prepare_blt_put(struct sna *sna,
 	assert(src->devKind);
 	assert(src->devPrivate.ptr);
 
-	if (alpha_fixup)
-		return false; /* XXX */
-
 	if (alpha_fixup) {
 		op->u.blt.pixel = alpha_fixup;
 		op->blt   = blt_put_composite_with_alpha;
commit 94871944a0e1351273d6029df7bf0300f31a8571
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Aug 16 00:07:58 2012 +0100

    sna: Avoid conflating tearfree and randr shadow modes
    
    For ordinary TearFree we do not want to adjust the crtc offsets.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index c36dda9..431b5de 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -64,6 +64,7 @@ struct sna_crtc {
 	uint32_t cursor;
 	bool shadow;
 	bool fallback_shadow;
+	bool transform;
 	uint8_t id;
 	uint8_t pipe;
 	uint8_t plane;
@@ -536,7 +537,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	VG_CLEAR(arg);
 	arg.crtc_id = sna_crtc->id;
 	arg.fb_id = fb_id(sna_crtc->bo);
-	if (sna_crtc->shadow) {
+	if (sna_crtc->transform) {
+		assert(sna_crtc->shadow);
 		arg.x = 0;
 		arg.y = 0;
 	} else {
@@ -548,7 +550,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	arg.mode = sna_crtc->kmode;
 	arg.mode_valid = 1;
 
-	DBG(("%s: applying crtc [%d] mode=%dx%d+%d+%d@%d, fb=%d%s update to %d outputs\n",
+	DBG(("%s: applying crtc [%d] mode=%dx%d+%d+%d@%d, fb=%d%s%s update to %d outputs\n",
 	     __FUNCTION__, sna_crtc->id,
 	     arg.mode.hdisplay,
 	     arg.mode.vdisplay,
@@ -556,6 +558,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     arg.mode.clock,
 	     arg.fb_id,
 	     sna_crtc->shadow ? " [shadow]" : "",
+	     sna_crtc->transform ? " [transformed]" : "",
 	     output_count));
 
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg))
@@ -907,6 +910,7 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 	struct sna *sna = to_sna(scrn);
 	struct kgem_bo *bo;
 
+	sna_crtc->transform = false;
 	if (use_shadow(sna, crtc)) {
 		if (!sna_crtc_enable_shadow(sna, sna_crtc))
 			return NULL;
@@ -926,6 +930,7 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 			return NULL;
 		}
 
+		sna_crtc->transform = true;
 		return bo;
 	} else if (sna->flags & SNA_TEAR_FREE) {
 		DBG(("%s: tear-free updates requested\n", __FUNCTION__));
@@ -962,6 +967,7 @@ static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
 		if (!get_fb(sna, bo, scrn->virtualX, scrn->virtualY))
 			return NULL;
 
+		assert(!sna_crtc->shadow);
 		return kgem_bo_reference(bo);
 	}
 }
@@ -990,7 +996,7 @@ static void sna_crtc_randr(xf86CrtcPtr crtc)
 	filter = NULL;
 	params = NULL;
 	nparams = 0;
-	if (sna_crtc->shadow) {
+	if (sna_crtc->transform) {
 #ifdef RANDR_12_INTERFACE
 		if (transform) {
 			if (transform->nparams) {
@@ -1076,6 +1082,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct kgem_bo *saved_bo, *bo;
 	struct drm_mode_modeinfo saved_kmode;
+	bool saved_transform;
 
 	xf86DrvMsg(crtc->scrn->scrnIndex, X_INFO,
 		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
@@ -1097,6 +1104,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 
 	saved_kmode = sna_crtc->kmode;
 	saved_bo = sna_crtc->bo;
+	saved_transform = sna_crtc->transform;
 
 	sna_crtc->fallback_shadow = false;
 retry: /* Attach per-crtc pixmap or direct */
@@ -1117,6 +1125,7 @@ retry: /* Attach per-crtc pixmap or direct */
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(errno));
 
+		sna_crtc->transform = saved_transform;
 		sna_crtc->bo = saved_bo;
 		sna_crtc->kmode = saved_kmode;
 		return FALSE;
commit 1a389842347bab9d91240444e161589071eb9a47
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 15 23:07:16 2012 +0100

    sna: Avoid queuing a pageflip on a DPMS off pipe
    
    If the pipe is not running, attempting to queue a pageflip will result
    in an error and us disabling the output in retaliation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 841d841..c36dda9 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -141,6 +141,7 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	     __FUNCTION__, width, height, scrn->depth, scrn->bitsPerPixel));
 
 	assert(bo->tiling != I915_TILING_Y);
+	assert((bo->pitch & 63) == 0);
 
 	VG_CLEAR(arg);
 	arg.width = width;
@@ -2943,38 +2944,42 @@ void sna_mode_redisplay(struct sna *sna)
 
 		for (i = 0; i < config->num_crtc; i++) {
 			struct sna_crtc *crtc = config->crtc[i]->driver_private;
-			struct drm_mode_crtc_page_flip arg;
 
-			DBG(("%s: crtc %d active? %d\n",
-			     __FUNCTION__, i, crtc->bo != NULL));
+			DBG(("%s: crtc %d [%d, pipe=%d] active? %d\n",
+			     __FUNCTION__, i, crtc->id, crtc->pipe, crtc->bo != NULL));
 			if (crtc->bo != old)
 				continue;
 
-			arg.crtc_id = crtc->id;
-			arg.fb_id = get_fb(sna, new,
-					   sna->scrn->virtualX,
-					   sna->scrn->virtualY);
-			if (arg.fb_id == 0)
-				goto disable;
-
-			/* Only the reference crtc will finally deliver its page flip
-			 * completion event. All other crtc's events will be discarded.
-			 */
-			arg.user_data = 0;
-			arg.flags = DRM_MODE_PAGE_FLIP_EVENT;
-			arg.reserved = 0;
-
-			if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_PAGE_FLIP, &arg)) {
-				DBG(("%s: flip [fb=%d] on crtc %d [%d] failed - %d\n",
-				     __FUNCTION__, arg.fb_id, i, crtc->id, errno));
+			assert(config->crtc[i]->enabled);
+
+			if (crtc->dpms_mode == DPMSModeOn) {
+				struct drm_mode_crtc_page_flip arg;
+				arg.crtc_id = crtc->id;
+				arg.fb_id = get_fb(sna, new,
+						   sna->scrn->virtualX,
+						   sna->scrn->virtualY);
+				if (arg.fb_id == 0)
+					goto disable;
+
+				/* Only the reference crtc will finally deliver its page flip
+				 * completion event. All other crtc's events will be discarded.
+				 */
+				arg.user_data = 0;
+				arg.flags = DRM_MODE_PAGE_FLIP_EVENT;
+				arg.reserved = 0;
+
+				if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_PAGE_FLIP, &arg)) {
+					DBG(("%s: flip [fb=%d] on crtc %d [%d, pipe=%d] failed - %d\n",
+					     __FUNCTION__, arg.fb_id, i, crtc->id, crtc->pipe, errno));
 disable:
-				sna_crtc_disable(config->crtc[i]);
-				continue;
+					sna_crtc_disable(config->crtc[i]);
+					continue;
+				}
+				sna->mode.shadow_flip++;
 			}
 
 			kgem_bo_destroy(&sna->kgem, old);
 			crtc->bo = kgem_bo_reference(new);
-			sna->mode.shadow_flip++;
 		}
 
 		if (sna->mode.shadow) {
commit 615739556dd1cc4565eb1c47f93fe8abd697802f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 15 20:10:52 2012 +0100

    sna: Only continue to update the shadow buffer if the flips succeeded
    
    If the flip fail, we disable the crtc and may end up disabling the
    output, removing the shadow (and old scanout bo) in the process.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 7754efa..841d841 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2977,23 +2977,25 @@ disable:
 			sna->mode.shadow_flip++;
 		}
 
-		/* XXX only works if the kernel stalls fwrites to the current
-		 * scanout whilst the flip is pending
-		 */
-		while (sna->mode.shadow_flip)
-			sna_mode_wakeup(sna);
-		(void)sna->render.copy_boxes(sna, GXcopy,
-					     sna->front, new, 0, 0,
-					     sna->front, old, 0, 0,
-					     REGION_RECTS(region),
-					     REGION_NUM_RECTS(region),
-					     COPY_LAST);
-		kgem_submit(&sna->kgem);
-
-		sna_pixmap(sna->front)->gpu_bo = old;
-		sna->mode.shadow = new;
-
-		new->flush = old->flush;
+		if (sna->mode.shadow) {
+			/* XXX only works if the kernel stalls fwrites to the current
+			 * scanout whilst the flip is pending
+			 */
+			while (sna->mode.shadow_flip)
+				sna_mode_wakeup(sna);
+			(void)sna->render.copy_boxes(sna, GXcopy,
+						     sna->front, new, 0, 0,
+						     sna->front, old, 0, 0,
+						     REGION_RECTS(region),
+						     REGION_NUM_RECTS(region),
+						     COPY_LAST);
+			kgem_submit(&sna->kgem);
+
+			sna_pixmap(sna->front)->gpu_bo = old;
+			sna->mode.shadow = new;
+
+			new->flush = old->flush;
+		}
 
 		RegionEmpty(region);
 	}
commit 2554d0d76e0fcae6e324938c28bb50deeb8814dc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 15 18:04:18 2012 +0100

    sna: Submit the batch upon destroying a ShmPixmap
    
    The midlayer has already detached the pixmap from the segment (possibly
    destroying that segment in the process, thanks midlayer!) so we need to
    submit the batch asap before the segment disappears.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6db986f..a3df628 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2186,13 +2186,14 @@ void _kgem_submit(struct kgem *kgem)
 							break;
 						}
 					}
-					ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, deleted %d\n",
+					ErrorF("exec[%d] = handle:%d, presumed offset: %x, size: %d, tiling %d, fenced %d, snooped %d, deleted %d\n",
 					       i,
 					       kgem->exec[i].handle,
 					       (int)kgem->exec[i].offset,
 					       found ? kgem_bo_size(found) : -1,
 					       found ? found->tiling : -1,
 					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
+					       found ? found->snoop : -1,
 					       found ? found->purged : -1);
 				}
 				for (i = 0; i < kgem->nreloc; i++) {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 95d2b1a..661dde0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1033,6 +1033,7 @@ static void __sna_free_pixmap(struct sna *sna,
 	sna_pixmap_free_cpu(sna, priv);
 
 	if (priv->header) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = sna->freed_pixmap;
 		sna->freed_pixmap = pixmap;
 	} else {
@@ -1062,9 +1063,10 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 	if (priv->gpu_bo)
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 
-	if (priv->shm && priv->cpu_bo->rq)
+	if (priv->shm && kgem_bo_is_busy(priv->cpu_bo)) {
+		kgem_bo_submit(&sna->kgem, priv->cpu_bo); /* XXX ShmDetach */
 		add_flush_pixmap(sna, priv);
-	else
+	} else
 		__sna_free_pixmap(sna, pixmap, priv);
 	return TRUE;
 }
commit 4e604d721be5a1c0f8eaf91e3a5cf8c01a609a69
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 15 16:53:20 2012 +0100

    sna: Flush shm pixmaps after upload
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0e99aa6..95d2b1a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1264,8 +1264,7 @@ skip_inplace_map:
 
 			if (kgem_bo_is_busy(priv->cpu_bo)) {
 				DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
-				assert(priv->gpu_bo);
-				assert(priv->gpu_damage == NULL);
+				assert(priv->gpu_bo == NULL || priv->gpu_damage == NULL);
 
 				sna_damage_destroy(&priv->cpu_damage);
 				priv->undamaged = false;
@@ -2209,6 +2208,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    box, n, 0);
+				if (ok && priv->shm) {
+					assert(!priv->flush);
+					add_flush_pixmap(sna, priv);
+				}
 			}
 			if (!ok) {
 				if (pixmap->devPrivate.ptr == NULL) {
@@ -2249,6 +2252,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, 1, 0);
+			if (ok && priv->shm) {
+				assert(!priv->flush);
+				add_flush_pixmap(sna, priv);
+			}
 		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
@@ -2280,6 +2287,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n, 0);
+			if (ok && priv->shm) {
+				assert(!priv->flush);
+				add_flush_pixmap(sna, priv);
+			}
 		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
@@ -2776,6 +2787,10 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n, 0);
+			if (ok && priv->shm) {
+				assert(!priv->flush);
+				add_flush_pixmap(sna, priv);
+			}
 		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
commit 7f5a9e30c953c174a4d6bc83e0b0a5e068cbda52
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 15 09:34:24 2012 +0100

    sna: Remove zero-size rectangles fro PolyFillRectangle
    
    Some applications like to waste precious bw by attempting to fill
    zero-sized rectangles; remove them upfront rather than process them.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3f886b0..0e99aa6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11387,23 +11387,60 @@ sna_poly_fill_rect_stippled_blt(DrawablePtr drawable,
 
 static unsigned
 sna_poly_fill_rect_extents(DrawablePtr drawable, GCPtr gc,
-			   int n, xRectangle *rect,
+			   int *_n, xRectangle **_r,
 			   BoxPtr out)
 {
+	int n;
+	xRectangle *r;
 	Box32Rec box;
 	bool clipped;
 
-	if (n == 0)
+	if (*_n == 0)
 		return 0;
 
 	DBG(("%s: [0] = (%d, %d)x(%d, %d)\n",
-	     __FUNCTION__, rect->x, rect->y, rect->width, rect->height));
-	box.x1 = rect->x;
-	box.x2 = box.x1 + rect->width;
-	box.y1 = rect->y;
-	box.y2 = box.y1 + rect->height;
-	while (--n)
-		box32_add_rect(&box, ++rect);
+	     __FUNCTION__, r->x, r->y, r->width, r->height));
+
+	/* Remove any zero-size rectangles from the array */
+	while (*_n && ((*_r)->width == 0 || (*_r)->height == 0))
+		--*_n, ++*_r;
+
+	if (*_n == 0)
+		return 0;
+
+	n = *_n;
+	r = *_r;
+
+	box.x1 = r->x;
+	box.x2 = box.x1 + r->width;
+	box.y1 = r->y;
+	box.y2 = box.y1 + r->height;
+	r++;
+
+	while (--n) {
+		int32_t v;
+
+		if (r->width == 0 || r->height == 0)
+			goto slow;
+
+		box32_add_rect(&box, r++);
+	}
+	goto done;
+slow:
+	{
+		xRectangle *rr = r;
+		do {
+			do {
+				--*_n, r++;
+			} while (--n && (r->width == 0 || r->height == 0));
+			while (n && r->width && r->height) {
+				box32_add_rect(&box, r);
+				*rr++ = *r++;
+				n--;
+			}
+		} while (n);
+	}
+done:
 
 	clipped = box32_trim_and_translate(&box, drawable, gc);
 	if (!box32_to_box16(&box, out))
@@ -11431,7 +11468,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	     gc->fillStyle, gc->tileIsPixel,
 	     gc->alu));
 
-	flags = sna_poly_fill_rect_extents(draw, gc, n, rect, &region.extents);
+	flags = sna_poly_fill_rect_extents(draw, gc, &n, &rect, &region.extents);
 	if (flags == 0) {
 		DBG(("%s, nothing to do\n", __FUNCTION__));
 		return;
commit 0c882f4fa991ef379e416628eb41cd2178818ad1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 14 16:02:38 2012 +0100

    sna/gen2,3: Prevent dereferencing a NULL bo with solid render fills
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 431b972..153c7ae 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1775,7 +1775,7 @@ gen2_render_composite(struct sna *sna,
 	case 0:
 		gen2_composite_solid_init(sna, &tmp->src, 0);
 	case 1:
-		if (mask == NULL &&
+		if (mask == NULL && tmp->src.bo &&
 		    sna_blt_composite__convert(sna,
 					       src_x, src_y,
 					       width, height,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ce71d82..c44359f 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2858,7 +2858,7 @@ gen3_render_composite(struct sna *sna,
 		tmp->src.u.gen3.type = SHADER_ZERO;
 		break;
 	case 1:
-		if (mask == NULL &&
+		if (mask == NULL && tmp->src.bo &&
 		    sna_blt_composite__convert(sna,
 					       src_x, src_y,
 					       width, height,
commit 48f436d3705059711097a222aac65f862b4a5cfd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 14 15:05:33 2012 +0100

    sna: Defer the release of a busy SHM pixmap until the next flush callback
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bd07662..3f886b0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1012,6 +1012,35 @@ fallback:
 	return create_pixmap(sna, screen, width, height, depth, usage);
 }
 
+static inline void add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
+{
+	DBG(("%s: marking pixmap=%ld for flushing\n",
+	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
+	list_move(&priv->list, &sna->flush_pixmaps);
+	sna->kgem.flush = true;
+}
+
+static void __sna_free_pixmap(struct sna *sna,
+			      PixmapPtr pixmap,
+			      struct sna_pixmap *priv)
+{
+	list_del(&priv->list);
+	list_del(&priv->inactive);
+
+	sna_damage_destroy(&priv->gpu_damage);
+	sna_damage_destroy(&priv->cpu_damage);
+
+	sna_pixmap_free_cpu(sna, priv);
+
+	if (priv->header) {
+		pixmap->devPrivate.ptr = sna->freed_pixmap;
+		sna->freed_pixmap = pixmap;
+	} else {
+		free(priv);
+		FreePixmap(pixmap);
+	}
+}
+
 static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 {
 	struct sna *sna;
@@ -1027,29 +1056,16 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 	}
 
 	assert_pixmap_damage(pixmap);
-
-	list_del(&priv->list);
-	list_del(&priv->inactive);
-
-	sna_damage_destroy(&priv->gpu_damage);
-	sna_damage_destroy(&priv->cpu_damage);
-
 	sna = to_sna_from_pixmap(pixmap);
 
 	/* Always release the gpu bo back to the lower levels of caching */
 	if (priv->gpu_bo)
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 
-	sna_pixmap_free_cpu(sna, priv);
-
-	if (priv->header) {
-		pixmap->devPrivate.ptr = sna->freed_pixmap;
-		sna->freed_pixmap = pixmap;
-	} else {
-		free(priv);
-		FreePixmap(pixmap);
-	}
-
+	if (priv->shm && priv->cpu_bo->rq)
+		add_flush_pixmap(sna, priv);
+	else
+		__sna_free_pixmap(sna, pixmap, priv);
 	return TRUE;
 }
 
@@ -1160,14 +1176,6 @@ static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags)
 	return priv->stride != 0;
 }
 
-static inline void add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
-{
-	DBG(("%s: marking pixmap=%ld for flushing\n",
-	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
-	list_move(&priv->list, &sna->flush_pixmaps);
-	sna->kgem.flush = true;
-}
-
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -13297,6 +13305,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			     priv->pixmap->drawable.serialNumber));
 			ret = sna_pixmap_move_to_cpu(priv->pixmap,
 						     MOVE_READ | MOVE_WRITE);
+			if (priv->pixmap->refcnt == 0)
+				__sna_free_pixmap(sna, priv->pixmap, priv);
 		} else {
 			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber));
commit 9f07d1fc0181f22da36837337241f228626e0d88
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 14 12:03:24 2012 +0100

    sna: Enable use of shm pixmap for readback
    
    A little too overzealous in the degradation of writing to the CPU bo
    if we need to read from a GPU bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 136aceb..bd07662 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2341,6 +2341,8 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	}
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
+		DBG(("%s: cached upload proxy, discard and revert to GPU\n",
+		     __FUNCTION__));
 		kgem_bo_destroy(&to_sna_from_pixmap(pixmap)->kgem,
 				priv->gpu_bo);
 		priv->gpu_bo = NULL;
@@ -2350,19 +2352,32 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	if (priv->flush)
 		flags |= PREFER_GPU;
 	if (priv->shm)
-		flags = 0;
+		flags &= ~PREFER_GPU;
 	if (priv->cpu && (flags & FORCE_GPU) == 0)
-		flags = 0;
+		flags &= ~PREFER_GPU;
+
+	DBG(("%s: flush=%d, shm=%d, cpu=%d => flags=%x\n",
+	     __FUNCTION__, priv->flush, priv->shm, priv->cpu, flags));
 
-	if (!flags && (!priv->gpu_damage || !kgem_bo_is_busy(priv->gpu_bo)))
+	if ((flags & PREFER_GPU) == 0 &&
+	    (!priv->gpu_damage || !kgem_bo_is_busy(priv->gpu_bo))) {
+		DBG(("%s: try cpu as GPU bo is idle\n", __FUNCTION__));
 		goto use_cpu_bo;
+	}
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+		DBG(("%s: use GPU fast path (all-damaged)\n", __FUNCTION__));
 		goto use_gpu_bo;
+	}
 
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
+	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
+		DBG(("%s: use CPU fast path (all-damaged)\n", __FUNCTION__));
 		goto use_cpu_bo;
+	}
 
+	DBG(("%s: gpu? %d, damaged? %d; cpu? %d, damaged? %d\n", __FUNCTION__,
+	     priv->gpu_bo ? priv->gpu_bo->handle : 0, priv->gpu_damage != NULL,
+	     priv->cpu_bo ? priv->cpu_bo->handle : 0, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
 		unsigned int move;
 
@@ -2502,7 +2517,7 @@ use_cpu_bo:
 	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
 		return NULL;
 
-	if (flags == 0 && !kgem_bo_is_busy(priv->cpu_bo))
+	if ((flags & FORCE_GPU) == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -4008,6 +4023,9 @@ source_prefer_gpu(struct sna_pixmap *priv)
 		return PREFER_GPU | FORCE_GPU;
 	}
 
+	if (DAMAGE_IS_ALL(priv->cpu_damage))
+		return 0;
+
 	DBG(("%s: source has GPU bo? %d\n",
 	     __FUNCTION__, priv->gpu_bo != NULL));
 	return priv->gpu_bo != NULL;
commit 9b016d2e466f342cc6649504fa48ab6a810c7e94
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 12 21:36:30 2012 +0100

    sna: do not bother attaching to a miniscule ShmPixmap
    
    If the pixmap is less than a page in size we are unlikely to recover the
    cost of tracking it via the GPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b27e7fb..136aceb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -757,7 +757,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 	DBG(("%s(%dx%d, depth=%d, bpp=%d, pitch=%d)\n",
 	     __FUNCTION__, width, height, depth, bpp, pitch));
 
-	if (wedged(sna) || bpp == 0) {
+	if (wedged(sna) || bpp == 0 || pitch*height <= 4096) {
 fallback:
 		pixmap = sna_pixmap_create_unattached(screen, 0, 0, depth);
 		if (pixmap == NULL)
@@ -809,9 +809,7 @@ fallback:
 		}
 	}
 
-	priv->cpu_bo = kgem_create_map(&sna->kgem,
-				       addr, pitch*(height-1)+width*bpp/8,
-				       false);
+	priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false);
 	if (priv->cpu_bo == NULL) {
 		priv->header = true;
 		sna_pixmap_destroy(pixmap);
commit 2f4de90709264ad19a3e3f5f0f79f4bba78a760a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 12 13:01:24 2012 +0100

    uxa: Only emit a warning if we fail to tile the frontbuffer and the hw allows
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_memory.c b/src/intel_memory.c
index bb7710f..f08ebdd 100644
--- a/src/intel_memory.c
+++ b/src/intel_memory.c
@@ -264,10 +264,12 @@ retry:
 		return NULL;
 	}
 
-	if ((intel->tiling & INTEL_TILING_FB) && tiling_mode != I915_TILING_X) {
+	/* If we could have used tiling but failed, warn */
+	if (intel->tiling & INTEL_TILING_FB &&
+	    tiling_mode != I915_TILING_X &&
+	    intel_check_display_stride(scrn, pitch, I915_TILING_X))
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "Failed to set tiling on frontbuffer.\n");
-	}
 
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "Allocated new frame buffer %dx%d stride %ld, %s\n",
commit 4d3cafc99ab49da6744046bc2004981144cb064d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 12 13:02:43 2012 +0100

    sna: Fix compilation without pixman glyphs
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 54ad510..db9af14 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1298,15 +1298,11 @@ glyphs_fallback(CARD8 op,
 		int nlist, GlyphListPtr list, GlyphPtr *glyphs)
 {
 	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
-	pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
-	pixman_glyph_t *pglyphs = stack_glyphs;
 	pixman_image_t *src_image, *dst_image;
-	pixman_glyph_cache_t *cache;
-	int dst_x = list->xOff, dst_y = list->yOff;
-	int src_dx, src_dy, dst_dx, dst_dy;
+	int src_dx, src_dy;
 	ScreenPtr screen = dst->pDrawable->pScreen;
 	RegionRec region;
-	int x, y, count, n;
+	int x, y, n;
 
 	glyph_extents(nlist, list, glyphs, &region.extents);
 	if (region.extents.x2 <= region.extents.x1 ||
@@ -1356,7 +1352,12 @@ glyphs_fallback(CARD8 op,
 
 #if HAS_PIXMAN_GLYPHS
 	if (sna->render.glyph_cache) {
-		cache = sna->render.glyph_cache;
+		pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
+		pixman_glyph_t *pglyphs = stack_glyphs;
+		pixman_glyph_cache_t *cache = sna->render.glyph_cache;
+		int dst_x = list->xOff, dst_y = list->yOff;
+		int dst_dx, dst_dy, count;
+
 		pixman_glyph_cache_freeze(cache);
 
 		count = 0;
commit dc18eaa585c36c8e5f5b4ec405a976a835fd2ac3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 12 10:34:10 2012 +0100

    sna: Make the failure to create render caches non-fatal
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 7295bef..3c83ac5 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -629,13 +629,13 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 }
 
 bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
+void sna_accel_create(struct sna *sna);
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
 void sna_accel_wakeup_handler(struct sna *sna);
 void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
-bool sna_accel_create(ScreenPtr screen, struct sna *sna);
 void sna_copy_fbcon(struct sna *sna);
 
 bool sna_composite_create(struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 660c17a..b27e7fb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13874,18 +13874,24 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	return true;
 }
 
-bool sna_accel_create(ScreenPtr screen, struct sna *sna)
+void sna_accel_create(struct sna *sna)
 {
 	if (!sna_glyphs_create(sna))
-		return false;
+		goto fail;
 
 	if (!sna_gradients_create(sna))
-		return false;
+		goto fail;
 
 	if (!sna_composite_create(sna))
-		return false;
+		goto fail;
 
-	return true;
+	return;
+
+fail:
+	xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+		   "Failed to allocate caches, disabling RENDER acceleration\n");
+	sna->have_render = false;
+	no_render_init(sna);
 }
 
 void sna_accel_watch_flush(struct sna *sna, int enable)
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 48b6836..72a5a94 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -46,6 +46,9 @@ bool sna_composite_create(struct sna *sna)
 	xRenderColor color ={ 0 };
 	int error;
 
+	if (!can_render(sna))
+		return true;
+
 	sna->clear = CreateSolidPicture(0, &color, &error);
 	return sna->clear != NULL;
 }
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 5d2915f..6810c7a 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -168,11 +168,7 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 	free(screen->devPrivate);
 	screen->devPrivate = NULL;
 
-	if (!sna_accel_create(screen, sna)) {
-		xf86DrvMsg(screen->myNum, X_ERROR,
-			   "[intel] Failed to initialise acceleration routines\n");
-		goto cleanup_front;
-	}
+	sna_accel_create(sna);
 
 	sna->front = screen->CreatePixmap(screen,
 					  screen->width,
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index e299ac5..54ad510 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -881,7 +881,7 @@ glyphs_via_mask(struct sna *sna,
 
 		memset(pixmap->devPrivate.ptr, 0, pixmap->devKind*height);
 #if HAS_PIXMAN_GLYPHS
-		{
+		if (sna->render.glyph_cache) {
 			pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
 			pixman_glyph_t *pglyphs = stack_glyphs;
 			pixman_glyph_cache_t *cache;
@@ -909,7 +909,7 @@ glyphs_via_mask(struct sna *sna,
 					const void *ptr;
 
 					if (g->info.width == 0 || g->info.height == 0)
-						goto next_image;
+						goto next_pglyph;
 
 					ptr = pixman_glyph_cache_lookup(cache, g, NULL);
 					if (ptr == NULL) {
@@ -917,14 +917,14 @@ glyphs_via_mask(struct sna *sna,
 
 						glyph_image = sna_glyph_get_image(g, screen);
 						if (glyph_image == NULL)
-							goto next_image;
+							goto next_pglyph;
 
 						ptr = pixman_glyph_cache_insert(cache, g, NULL,
 										g->info.x,
 										g->info.y,
 										glyph_image);
 						if (ptr == NULL)
-							goto next_image;
+							goto next_pglyph;
 					}
 
 					pglyphs[count].x = x;
@@ -932,7 +932,7 @@ glyphs_via_mask(struct sna *sna,
 					pglyphs[count].glyph = ptr;
 					count++;
 
-next_image:
+next_pglyph:
 					x += g->info.xOff;
 					y += g->info.yOff;
 				}
@@ -948,8 +948,8 @@ next_image:
 			pixman_glyph_cache_thaw(cache);
 			if (pglyphs != stack_glyphs)
 				free(pglyphs);
-		}
-#else
+		} else
+#endif
 		do {
 			int n = list->len;
 			x += list->xOff;
@@ -1011,7 +1011,6 @@ next_image:
 			}
 			list++;
 		} while (--nlist);
-#endif
 		pixman_image_unref(mask_image);
 
 		mask = CreatePicture(0, &pixmap->drawable,
@@ -1290,7 +1289,6 @@ static bool can_discard_mask(uint8_t op, PicturePtr src, PictFormatPtr mask,
 	return color >> 24 == 0xff;
 }
 
-#if HAS_PIXMAN_GLYPHS
 static void
 glyphs_fallback(CARD8 op,
 		PicturePtr src,
@@ -1356,312 +1354,254 @@ glyphs_fallback(CARD8 op,
 		mask_format = NULL;
 	}
 
-	cache = sna->render.glyph_cache;
-	pixman_glyph_cache_freeze(cache);
-
-	count = 0;
-	for (n = 0; n < nlist; ++n)
-		count += list[n].len;
-	if (count > N_STACK_GLYPHS) {
-		pglyphs = malloc (count * sizeof(pixman_glyph_t));
-		if (pglyphs == NULL)
-			goto out;
-	}
+#if HAS_PIXMAN_GLYPHS
+	if (sna->render.glyph_cache) {
+		cache = sna->render.glyph_cache;
+		pixman_glyph_cache_freeze(cache);
+
+		count = 0;
+		for (n = 0; n < nlist; ++n)
+			count += list[n].len;
+		if (count > N_STACK_GLYPHS) {
+			pglyphs = malloc (count * sizeof(pixman_glyph_t));
+			if (pglyphs == NULL)
+				goto out;
+		}
 
-	count = 0;
-	x = y = 0;
-	while (nlist--) {
-		n = list->len;
-		x += list->xOff;
-		y += list->yOff;
-		while (n--) {
-			GlyphPtr g = *glyphs++;
-			const void *ptr;
+		count = 0;
+		x = y = 0;
+		while (nlist--) {
+			n = list->len;
+			x += list->xOff;
+			y += list->yOff;
+			while (n--) {
+				GlyphPtr g = *glyphs++;
+				const void *ptr;
 
-			if (g->info.width == 0 || g->info.height == 0)
-				goto next;
+				if (g->info.width == 0 || g->info.height == 0)
+					goto next;
 
-			ptr = pixman_glyph_cache_lookup(cache, g, NULL);
-			if (ptr == NULL) {
-				pixman_image_t *glyph_image;
+				ptr = pixman_glyph_cache_lookup(cache, g, NULL);
+				if (ptr == NULL) {
+					pixman_image_t *glyph_image;
 
-				glyph_image = sna_glyph_get_image(g, screen);
-				if (glyph_image == NULL)
-					goto next;
+					glyph_image = sna_glyph_get_image(g, screen);
+					if (glyph_image == NULL)
+						goto next;
 
-				ptr = pixman_glyph_cache_insert(cache, g, NULL,
-								g->info.x,
-								g->info.y,
-								glyph_image);
-				if (ptr == NULL)
-					goto out;
-			}
+					ptr = pixman_glyph_cache_insert(cache, g, NULL,
+									g->info.x,
+									g->info.y,
+									glyph_image);
+					if (ptr == NULL)
+						goto out;
+				}
 
-			pglyphs[count].x = x;
-			pglyphs[count].y = y;
-			pglyphs[count].glyph = ptr;
-			count++;
+				pglyphs[count].x = x;
+				pglyphs[count].y = y;
+				pglyphs[count].glyph = ptr;
+				count++;
 
 next:
-			x += g->info.xOff;
-			y += g->info.yOff;
+				x += g->info.xOff;
+				y += g->info.yOff;
+			}
+			list++;
 		}
-		list++;
-	}
 
-	src_image = image_from_pict(src, FALSE, &src_dx, &src_dy);
-	if (src_image == NULL)
-		goto out;
-
-	dst_image = image_from_pict(dst, TRUE, &dst_dx, &dst_dy);
-	if (dst_image == NULL)
-		goto out_free_src;
-
-	if (mask_format) {
-		pixman_composite_glyphs(op, src_image, dst_image,
-					mask_format->format | (mask_format->depth << 24),
-					src_x + src_dx + region.extents.x1 - dst_x,
-					src_y + src_dy + region.extents.y1 - dst_y,
-					region.extents.x1, region.extents.y1,
-					region.extents.x1 + dst_dx, region.extents.y1 + dst_dy,
-					region.extents.x2 - region.extents.x1,
-					region.extents.y2 - region.extents.y1,
-					cache, count, pglyphs);
-	} else {
-		pixman_composite_glyphs_no_mask(op, src_image, dst_image,
-						src_x + src_dx - dst_x, src_y + src_dy - dst_y,
-						dst_dx, dst_dy,
+		src_image = image_from_pict(src, FALSE, &src_dx, &src_dy);
+		if (src_image == NULL)
+			goto out;
+
+		dst_image = image_from_pict(dst, TRUE, &dst_dx, &dst_dy);
+		if (dst_image == NULL)
+			goto out_free_src;
+
+		if (mask_format) {
+			pixman_composite_glyphs(op, src_image, dst_image,
+						mask_format->format | (mask_format->depth << 24),
+						src_x + src_dx + region.extents.x1 - dst_x,
+						src_y + src_dy + region.extents.y1 - dst_y,
+						region.extents.x1, region.extents.y1,
+						region.extents.x1 + dst_dx, region.extents.y1 + dst_dy,
+						region.extents.x2 - region.extents.x1,
+						region.extents.y2 - region.extents.y1,
 						cache, count, pglyphs);
-	}
+		} else {
+			pixman_composite_glyphs_no_mask(op, src_image, dst_image,
+							src_x + src_dx - dst_x, src_y + src_dy - dst_y,
+							dst_dx, dst_dy,
+							cache, count, pglyphs);
+		}
 
-	free_pixman_pict(dst, dst_image);
+		free_pixman_pict(dst, dst_image);
 
 out_free_src:
-	free_pixman_pict(src, src_image);
+		free_pixman_pict(src, src_image);
 
 out:
-	pixman_glyph_cache_thaw(cache);
-	if (pglyphs != stack_glyphs)
-		free(pglyphs);
-}
-#else
-static void
-glyphs_fallback(CARD8 op,
-		PicturePtr src,
-		PicturePtr dst,
-		PictFormatPtr mask_format,
-		int src_x,
-		int src_y,
-		int nlist,
-		GlyphListPtr list,
-		GlyphPtr *glyphs)
-{
-	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
-	ScreenPtr screen = dst->pDrawable->pScreen;
-	pixman_image_t *dst_image, *mask_image, *src_image;
-	int dx, dy, x, y;
-	RegionRec region;
-
-	glyph_extents(nlist, list, glyphs, &region.extents);
-	if (region.extents.x2 <= region.extents.x1 ||
-	    region.extents.y2 <= region.extents.y1)
-		return;
-
-	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     region.extents.x1, region.extents.y1,
-	     region.extents.x2, region.extents.y2));
-
-	region.data = NULL;
-	RegionTranslate(&region, dst->pDrawable->x, dst->pDrawable->y);
-	if (dst->pCompositeClip)
-		RegionIntersect(&region, &region, dst->pCompositeClip);
-	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
-	     __FUNCTION__,
-	     RegionExtents(&region)->x1, RegionExtents(&region)->y1,
-	     RegionExtents(&region)->x2, RegionExtents(&region)->y2));
-	if (!RegionNotEmpty(&region))
-		return;
-
-	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
-					     MOVE_READ | MOVE_WRITE))
-		return;
-	if (dst->alphaMap &&
-	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
-				      MOVE_READ | MOVE_WRITE))
-		return;
-
-	if (src->pDrawable) {
-		if (!sna_drawable_move_to_cpu(src->pDrawable,
-					      MOVE_READ))
-			return;
-
-		if (src->alphaMap &&
-		    !sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
-					      MOVE_READ))
-			return;
-	}
-	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
-
-	dst_image = image_from_pict(dst, TRUE, &x, &y);
-	if (dst_image == NULL)
-		goto cleanup_region;
-	DBG(("%s: dst offset (%d, %d)\n", __FUNCTION__, x, y));
-	if (x | y) {
-		region.extents.x1 += x;
-		region.extents.x2 += x;
-		region.extents.y1 += y;
-		region.extents.y2 += y;
-	}
-
-	src_image = image_from_pict(src, FALSE, &dx, &dy);
-	if (src_image == NULL)
-		goto cleanup_dst;
-	DBG(("%s: src offset (%d, %d)\n", __FUNCTION__, dx, dy));
-	src_x += dx - list->xOff;
-	src_y += dy - list->yOff;
-
-	if (mask_format &&
-	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
-	    mask_format == glyphs_format(nlist, list, glyphs))
-		mask_format = NULL;
+		pixman_glyph_cache_thaw(cache);
+		if (pglyphs != stack_glyphs)
+			free(pglyphs);
+	} else
+#endif
+	{
+		pixman_image_t *mask_image;
 
-	if (mask_format) {
-		DBG(("%s: create mask (%d, %d)x(%d,%d) + (%d,%d) + (%d,%d), depth=%d, format=%lx [%lx], ca? %d\n",
-		     __FUNCTION__,
-		     region.extents.x1, region.extents.y1,
-		     region.extents.x2 - region.extents.x1,
-		     region.extents.y2 - region.extents.y1,
-		     dst->pDrawable->x, dst->pDrawable->y,
-		     x, y,
-		     mask_format->depth,
-		     (long)mask_format->format,
-		     (long)(mask_format->depth << 24 | mask_format->format),
-		     NeedsComponent(mask_format->format)));
-		mask_image =
-			pixman_image_create_bits(mask_format->depth << 24 | mask_format->format,
-						 region.extents.x2 - region.extents.x1,
-						 region.extents.y2 - region.extents.y1,
-						 NULL, 0);
-		if (mask_image == NULL)
-			goto cleanup_src;
-		if (NeedsComponent(mask_format->format))
-			pixman_image_set_component_alpha(mask_image, TRUE);
+		dst_image = image_from_pict(dst, TRUE, &x, &y);
+		if (dst_image == NULL)
+			goto cleanup_region;
+		DBG(("%s: dst offset (%d, %d)\n", __FUNCTION__, x, y));
+		if (x | y) {
+			region.extents.x1 += x;
+			region.extents.x2 += x;
+			region.extents.y1 += y;
+			region.extents.y2 += y;
+		}
 
-		x -= region.extents.x1;
-		y -= region.extents.y1;
-	} else {
-		mask_image = dst_image;
-		src_x -= x - dst->pDrawable->x;
-		src_y -= y - dst->pDrawable->y;
-	}
+		src_image = image_from_pict(src, FALSE, &src_dx, &src_dy);
+		if (src_image == NULL)
+			goto cleanup_dst;
+		DBG(("%s: src offset (%d, %d)\n", __FUNCTION__, src_dx, src_dy));
+		src_x += src_dx - list->xOff;
+		src_y += src_dy - list->yOff;
 
-	do {
-		int n = list->len;
-		x += list->xOff;
-		y += list->yOff;
-		while (n--) {
-			GlyphPtr g = *glyphs++;
-			pixman_image_t *glyph_image;
+		if (mask_format) {
+			DBG(("%s: create mask (%d, %d)x(%d,%d) + (%d,%d) + (%d,%d), depth=%d, format=%lx [%lx], ca? %d\n",
+			     __FUNCTION__,
+			     region.extents.x1, region.extents.y1,
+			     region.extents.x2 - region.extents.x1,
+			     region.extents.y2 - region.extents.y1,
+			     dst->pDrawable->x, dst->pDrawable->y,
+			     x, y,
+			     mask_format->depth,
+			     (long)mask_format->format,
+			     (long)(mask_format->depth << 24 | mask_format->format),
+			     NeedsComponent(mask_format->format)));
+			mask_image =
+				pixman_image_create_bits(mask_format->depth << 24 | mask_format->format,
+							 region.extents.x2 - region.extents.x1,
+							 region.extents.y2 - region.extents.y1,
+							 NULL, 0);
+			if (mask_image == NULL)
+				goto cleanup_src;
+			if (NeedsComponent(mask_format->format))
+				pixman_image_set_component_alpha(mask_image, TRUE);
+
+			x -= region.extents.x1;
+			y -= region.extents.y1;
+		} else {
+			mask_image = dst_image;
+			src_x -= x - dst->pDrawable->x;
+			src_y -= y - dst->pDrawable->y;
+		}
 
-			if (g->info.width == 0 || g->info.height == 0)
-				goto next_glyph;
+		do {
+			n = list->len;
+			x += list->xOff;
+			y += list->yOff;
+			while (n--) {
+				GlyphPtr g = *glyphs++;
+				pixman_image_t *glyph_image;
 
-			glyph_image = sna_glyph_get_image(g, screen);
-			if (glyph_image == NULL)
-				goto next_glyph;
+				if (g->info.width == 0 || g->info.height == 0)
+					goto next_glyph;
 
-			if (mask_format) {
-				DBG(("%s: glyph+(%d,%d) to mask (%d, %d)x(%d, %d)\n",
-				     __FUNCTION__,
-				     dx, dy,
-				     x - g->info.x,
-				     y - g->info.y,
-				     g->info.width,
-				     g->info.height));
+				glyph_image = sna_glyph_get_image(g, screen);
+				if (glyph_image == NULL)
+					goto next_glyph;
 
-				if (list->format == mask_format) {
-					assert(pixman_image_get_format(glyph_image) == pixman_image_get_format(mask_image));
-					pixman_image_composite(PictOpAdd,
-							       glyph_image,
-							       NULL,
-							       mask_image,
-							       dx, dy,
-							       0, 0,
-							       x - g->info.x,
-							       y - g->info.y,
-							       g->info.width,
-							       g->info.height);
+				if (mask_format) {
+					DBG(("%s: glyph to mask (%d, %d)x(%d, %d)\n",
+					     __FUNCTION__,
+					     x - g->info.x,
+					     y - g->info.y,
+					     g->info.width,
+					     g->info.height));
+
+					if (list->format == mask_format) {
+						assert(pixman_image_get_format(glyph_image) == pixman_image_get_format(mask_image));
+						pixman_image_composite(PictOpAdd,
+								       glyph_image,
+								       NULL,
+								       mask_image,
+								       0, 0,
+								       0, 0,
+								       x - g->info.x,
+								       y - g->info.y,
+								       g->info.width,
+								       g->info.height);
+					} else {
+						pixman_image_composite(PictOpAdd,
+								       sna->render.white_image,
+								       glyph_image,
+								       mask_image,
+								       0, 0,
+								       0, 0,
+								       x - g->info.x,
+								       y - g->info.y,
+								       g->info.width,
+								       g->info.height);
+					}
 				} else {
-					pixman_image_composite(PictOpAdd,
-							       sna->render.white_image,
+					int xi = x - g->info.x;
+					int yi = y - g->info.y;
+
+					DBG(("%s: glyph to dst (%d, %d)x(%d, %d)/[(%d, %d)x(%d, %d)], src (%d, %d) [op=%d]\n",
+					     __FUNCTION__,
+					     xi, yi,
+					     g->info.width, g->info.height,
+					     dst->pDrawable->x,
+					     dst->pDrawable->y,
+					     dst->pDrawable->width,
+					     dst->pDrawable->height,
+					     src_x + xi,
+					     src_y + yi,
+					     op));
+
+					pixman_image_composite(op,
+							       src_image,
 							       glyph_image,
-							       mask_image,
-							       dx, dy,
+							       dst_image,
+							       src_x + xi,
+							       src_y + yi,
 							       0, 0,
-							       x - g->info.x,
-							       y - g->info.y,
+							       xi, yi,
 							       g->info.width,
 							       g->info.height);
 				}
-			} else {
-				int xi = x - g->info.x;
-				int yi = y - g->info.y;
-
-				DBG(("%s: glyph+(%d, %d) to dst (%d, %d)x(%d, %d)/[(%d, %d)x(%d, %d)], src (%d, %d) [op=%d]\n",
-				     __FUNCTION__,
-				     dx, dy,
-				     xi, yi,
-				     g->info.width, g->info.height,
-				     dst->pDrawable->x,
-				     dst->pDrawable->y,
-				     dst->pDrawable->width,
-				     dst->pDrawable->height,
-				     src_x + xi,
-				     src_y + yi,
-				     op));
-
-				pixman_image_composite(op,
-						       src_image,
-						       glyph_image,
-						       dst_image,
-						       src_x + xi,
-						       src_y + yi,
-						       dx, dy,
-						       xi, yi,
-						       g->info.width,
-						       g->info.height);
-			}
 next_glyph:
-			x += g->info.xOff;
-			y += g->info.yOff;
+				x += g->info.xOff;
+				y += g->info.yOff;
+			}
+			list++;
+		} while (--nlist);
+
+		if (mask_format) {
+			DBG(("%s: glyph mask composite src=(%d+%d,%d+%d) dst=(%d, %d)x(%d, %d)\n",
+			     __FUNCTION__,
+			     src_x, region.extents.x1, src_y, region.extents.y1,
+			     region.extents.x1, region.extents.y1,
+			     region.extents.x2 - region.extents.x1,
+			     region.extents.y2 - region.extents.y1));
+			pixman_image_composite(op, src_image, mask_image, dst_image,
+					       src_x, src_y,
+					       0, 0,
+					       region.extents.x1, region.extents.y1,
+					       region.extents.x2 - region.extents.x1,
+					       region.extents.y2 - region.extents.y1);
+			pixman_image_unref(mask_image);
 		}
-		list++;
-	} while (--nlist);
-
-	if (mask_format) {
-		DBG(("%s: glyph mask composite src=(%d+%d,%d+%d) dst=(%d, %d)x(%d, %d)\n",
-		     __FUNCTION__,
-		     src_x, region.extents.x1, src_y, region.extents.y1,
-		     region.extents.x1, region.extents.y1,
-		     region.extents.x2 - region.extents.x1,
-		     region.extents.y2 - region.extents.y1));
-		pixman_image_composite(op, src_image, mask_image, dst_image,
-				       src_x, src_y,
-				       0, 0,
-				       region.extents.x1, region.extents.y1,
-				       region.extents.x2 - region.extents.x1,
-				       region.extents.y2 - region.extents.y1);
-		pixman_image_unref(mask_image);
-	}
 
 cleanup_src:
-	free_pixman_pict(src, src_image);
+		free_pixman_pict(src, src_image);
 cleanup_dst:
-	free_pixman_pict(dst, dst_image);
+		free_pixman_pict(dst, dst_image);
+	}
+
 cleanup_region:
 	RegionUninit(&region);
 }
-#endif
 
 void
 sna_glyphs(CARD8 op,
@@ -1757,7 +1697,9 @@ sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph)
 	if (priv->image) {
 #if HAS_PIXMAN_GLYPHS
 		struct sna *sna = to_sna_from_screen(screen);
-		pixman_glyph_cache_remove(sna->render.glyph_cache, glyph, NULL);
+		if (sna->render.glyph_cache)
+			pixman_glyph_cache_remove(sna->render.glyph_cache,
+						  glyph, NULL);
 #endif
 		pixman_image_unref(priv->image);
 		priv->image = NULL;
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index a364c11..5f06fbc 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -382,6 +382,9 @@ bool sna_gradients_create(struct sna *sna)
 {
 	DBG(("%s\n", __FUNCTION__));
 
+	if (!can_render(sna))
+		return true;
+
 	if (!sna_alpha_cache_init(sna))
 		return false;
 
commit b580abdfa68108f3e63ee1f897b6ea83b9c47935
Author: Dave Airlie <airlied at gmail.com>
Date:   Sun Aug 12 20:52:12 2012 +1000

    intel: fix video xvPipe range check
    
    Pointed out by Russell King on irc.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/intel_video.c b/src/intel_video.c
index c8a9a92..0be72e2 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -675,7 +675,7 @@ I830SetPortAttributeOverlay(ScrnInfoPtr scrn,
 		adaptor_priv->saturation = value;
 	} else if (attribute == xvPipe) {
 		xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
-		if ((value < -1) || (value > xf86_config->num_crtc))
+		if ((value < -1) || (value >= xf86_config->num_crtc))
 			return BadValue;
 		if (value < 0)
 			adaptor_priv->desired_crtc = NULL;
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 068f234..babdfc6 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -194,7 +194,7 @@ sna_video_overlay_set_port_attribute(ScrnInfoPtr scrn,
 		video->saturation = value;
 	} else if (attribute == xvPipe) {
 		xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
-		if ((value < -1) || (value > xf86_config->num_crtc))
+		if ((value < -1) || (value >= xf86_config->num_crtc))
 			return BadValue;
 		if (value < 0)
 			video->desired_crtc = NULL;
commit 44f848f9b2f2a2dcd9087210ea46bc4fdb63c057
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 19:44:15 2012 +0100

    sna: Fix typo in computation of texel offsets for tiled 8x8 blts
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=53353
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4e3e9eb..660c17a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9458,10 +9458,10 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 		sna_damage_add_rectangles(damage, r, n, dx, dy);
 		if (n == 1) {
 			tx = (r->x - origin->x) % 8;
-			if (tx < 8)
+			if (tx < 0)
 				tx = 8 - tx;
 			ty = (r->y - origin->y) % 8;
-			if (ty < 8)
+			if (ty < 0)
 				ty = 8 - ty;
 
 			assert(r->x + dx >= 0);
@@ -9520,10 +9520,10 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 				assert(r->y + dy + r->height <= pixmap->drawable.height);
 
 				tx = (r->x - origin->x) % 8;
-				if (tx < 8)
+				if (tx < 0)
 					tx = 8 - tx;
 				ty = (r->y - origin->y) % 8;
-				if (ty < 8)
+				if (ty < 0)
 					ty = 8 - ty;
 
 				b[0] = br00 | tx << 12 | ty << 8;
commit b335d369bbcbfaef878795a5c8e72c2d40d3a536
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 12:59:49 2012 +0100

    sna: Discard the GPU bo after syncing to CPU bo for writing
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 227bb01..4e3e9eb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1400,9 +1400,13 @@ done:
 		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
 	}
 
-	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
-		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
-		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+	if (priv->cpu_bo) {
+		if ((flags & MOVE_ASYNC_HINT) == 0) {
+			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
+			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+		}
+		if (flags & MOVE_WRITE)
+			sna_pixmap_free_gpu(sna, priv);
 	}
 	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
commit 029934662e0bad6cf47baf4d7202656dd3e0ba08
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:47:44 2012 +0100

    sna: Add a little DBG for promotion of CPU source bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index f507f49..757b2f4 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -352,8 +352,11 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 			bool want_tiling;
 
 			if (priv->cpu_bo->pitch >= 4096) {
-				DBG(("%s: promoting snooped CPU bo due to TLB miss\n",
-				     __FUNCTION__));
+				DBG(("%s: size=%dx%d, promoting reused (%d) CPU bo due to TLB miss (%dx%d, pitch=%d)\n",
+				     __FUNCTION__, w, h, priv->source_count,
+				     pixmap->drawable.width,
+				     pixmap->drawable.height,
+				     priv->cpu_bo->pitch));
 				return NULL;
 			}
 
commit f20f8556bfc8b4df6ba5050cca656a4ff0fdf18f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:46:40 2012 +0100

    sna: Treat ShmPixmap as ordinary
    
    In theory the code handles the automatic promotion and demotion of the
    GPU buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ac88641..227bb01 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -776,7 +776,7 @@ fallback:
 		pixmap = sna->freed_pixmap;
 		sna->freed_pixmap = pixmap->devPrivate.ptr;
 
-		pixmap->usage_hint = -1;
+		pixmap->usage_hint = 0;
 		pixmap->refcnt = 1;
 
 		pixmap->drawable.width = width;
@@ -793,7 +793,7 @@ fallback:
 
 		priv = _sna_pixmap_reset(pixmap);
 	} else {
-		pixmap = create_pixmap(sna, screen, 0, 0, depth, -1);
+		pixmap = create_pixmap(sna, screen, 0, 0, depth, 0);
 		if (pixmap == NullPixmap)
 			return NullPixmap;
 
commit 16c751a06c503b85c5ab6588bae277af4348487d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:44:08 2012 +0100

    sna: Only mark userptr bo as snooped on non-LLC architectures
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1fdd89c..6db986f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3808,7 +3808,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 		return NULL;
 	}
 
-	bo->snoop = true;
+	bo->snoop = !kgem->has_llc;
 	debug_alloc__bo(kgem, bo);
 
 	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
@@ -4011,6 +4011,8 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	struct kgem_buffer *bo;
 	uint32_t handle;
 
+	assert(!kgem->has_llc);
+
 	if (kgem->has_cacheing) {
 		struct kgem_bo *old;
 
commit 59359dba6496229eff7c60dd04536c5add69cec8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:25:40 2012 +0100

    sna: Mark SHM userptr bo as unreusable
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 517c698..ac88641 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -819,6 +819,7 @@ fallback:
 	}
 	priv->cpu_bo->flush = true;
 	priv->cpu_bo->pitch = pitch;
+	priv->cpu_bo->reusable = false;
 	sna_accel_watch_flush(sna, 1);
 
 	priv->cpu = true;
commit 64488010504a6e76008bb3b3c1e61caeb025913c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:21:39 2012 +0100

    sna/gen2-5: Substitute an equivalent BLT composite operation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index b65454d..431b972 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1755,14 +1755,6 @@ gen2_render_composite(struct sna *sna,
 		return false;
 	}
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height, tmp))
-		return true;
-
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	tmp->op = op;
@@ -1783,6 +1775,13 @@ gen2_render_composite(struct sna *sna,
 	case 0:
 		gen2_composite_solid_init(sna, &tmp->src, 0);
 	case 1:
+		if (mask == NULL &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
 		break;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 18c5d85..ce71d82 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2832,14 +2832,6 @@ gen3_render_composite(struct sna *sna,
 		return false;
 	}
 
-	if (mask == NULL && sna->kgem.mode != KGEM_RENDER &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height, tmp))
-		return true;
-
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	tmp->op = op;
@@ -2866,6 +2858,14 @@ gen3_render_composite(struct sna *sna,
 		tmp->src.u.gen3.type = SHADER_ZERO;
 		break;
 	case 1:
+		if (mask == NULL &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
+
 		gen3_composite_channel_convert(&tmp->src);
 		break;
 	}
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 7dab92b..ab06295 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1913,6 +1913,7 @@ gen4_composite_picture(struct sna *sna,
 	} else
 		channel->transform = picture->transform;
 
+	channel->pict_format = picture->format;
 	channel->card_format = gen4_get_card_format(picture->format);
 	if (channel->card_format == -1)
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
@@ -2305,6 +2306,14 @@ gen4_render_composite(struct sna *sna,
 		gen4_composite_solid_init(sna, &tmp->src, 0);
 		/* fall through to fixup */
 	case 1:
+		if (mask == NULL &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
+
 		gen4_composite_channel_convert(&tmp->src);
 		break;
 	}
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index f50e785..488ac34 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1923,6 +1923,7 @@ gen5_composite_picture(struct sna *sna,
 	} else
 		channel->transform = picture->transform;
 
+	channel->pict_format = picture->format;
 	channel->card_format = gen5_get_card_format(picture->format);
 	if (channel->card_format == -1)
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
@@ -2302,14 +2303,6 @@ gen5_render_composite(struct sna *sna,
 		return false;
 	}
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height, tmp))
-		return true;
-
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
@@ -2330,6 +2323,14 @@ gen5_render_composite(struct sna *sna,
 		gen5_composite_solid_init(sna, &tmp->src, 0);
 		/* fall through to fixup */
 	case 1:
+		if (mask == NULL &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
+
 		gen5_composite_channel_convert(&tmp->src);
 		break;
 	}
commit f464d508c870293699616626d64bd64f16051467
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 10:10:32 2012 +0100

    sna/gen6+: Try to use the BLT to avoid TLB misses
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6d4d79e..f8b1e71 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2267,6 +2267,7 @@ gen6_composite_picture(struct sna *sna,
 	} else
 		channel->transform = picture->transform;
 
+	channel->pict_format = picture->format;
 	channel->card_format = gen6_get_card_format(picture->format);
 	if (channel->card_format == (unsigned)-1)
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
@@ -2369,6 +2370,16 @@ static bool can_switch_rings(struct sna *sna)
 	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
+static inline bool untiled_tlb_miss(struct kgem_bo *bo)
+{
+	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
+}
+
+static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
+{
+	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
+}
+
 static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
@@ -2607,6 +2618,19 @@ reuse_source(struct sna *sna,
 }
 
 static bool
+prefer_blt_composite(struct sna *sna, struct sna_composite_op *tmp)
+{
+	if (sna->kgem.ring == KGEM_BLT)
+		return true;
+
+	if (!prefer_blt_ring(sna))
+		return false;
+
+	return (prefer_blt_bo(sna, tmp->dst.bo) ||
+		prefer_blt_bo(sna, tmp->src.bo));
+}
+
+static bool
 gen6_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2677,23 +2701,20 @@ gen6_render_composite(struct sna *sna,
 		gen6_composite_solid_init(sna, &tmp->src, 0);
 		/* fall through to fixup */
 	case 1:
+		/* Did we just switch rings to prepare the source? */
+		if (mask == NULL &&
+		    prefer_blt_composite(sna, tmp) &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
+
 		gen6_composite_channel_convert(&tmp->src);
 		break;
 	}
 
-	/* Did we just switch rings to prepare the source? */
-	if (sna->kgem.ring == KGEM_BLT && mask == NULL &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height, tmp)) {
-		if (tmp->redirect.real_bo)
-			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
-		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return true;
-	}
-
 	tmp->is_affine = tmp->src.is_affine;
 	tmp->has_component_alpha = false;
 	tmp->need_magic_ca_pass = false;
@@ -3216,21 +3237,9 @@ gen6_emit_copy_state(struct sna *sna,
 	gen6_emit_state(sna, op, offset | dirty);
 }
 
-static inline bool untiled_tlb_miss(struct kgem_bo *bo)
-{
-	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
-}
-
-static bool prefer_blt_bo(struct sna *sna,
-			  PixmapPtr pixmap,
-			  struct kgem_bo *bo)
-{
-	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
-}
-
 static inline bool prefer_blt_copy(struct sna *sna,
-				   PixmapPtr src, struct kgem_bo *src_bo,
-				   PixmapPtr dst, struct kgem_bo *dst_bo,
+				   struct kgem_bo *src_bo,
+				   struct kgem_bo *dst_bo,
 				   unsigned flags)
 {
 	if (PREFER_RENDER)
@@ -3238,8 +3247,8 @@ static inline bool prefer_blt_copy(struct sna *sna,
 
 	return (sna->kgem.ring == KGEM_BLT ||
 		(flags & COPY_LAST && sna->kgem.mode == KGEM_NONE) ||
-		prefer_blt_bo(sna, src, src_bo) ||
-		prefer_blt_bo(sna, dst, dst_bo));
+		prefer_blt_bo(sna, src_bo) ||
+		prefer_blt_bo(sna, dst_bo));
 }
 
 static inline bool
@@ -3289,7 +3298,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n, &extents)));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
+	if (prefer_blt_copy(sna, src_bo, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3528,7 +3537,7 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, 0) &&
+	if (prefer_blt_copy(sna, src_bo, dst_bo, 0) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 954e42f..318cbef 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2367,6 +2367,7 @@ gen7_composite_picture(struct sna *sna,
 	} else
 		channel->transform = picture->transform;
 
+	channel->pict_format = picture->format;
 	channel->card_format = gen7_get_card_format(picture->format);
 	if (channel->card_format == (unsigned)-1)
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
@@ -2457,6 +2458,16 @@ inline static bool can_switch_rings(struct sna *sna)
 	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
+static inline bool untiled_tlb_miss(struct kgem_bo *bo)
+{
+	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
+}
+
+static bool prefer_blt_bo(struct sna *sna, struct kgem_bo *bo)
+{
+	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
+}
+
 inline static bool prefer_blt_ring(struct sna *sna)
 {
 	return sna->kgem.ring != KGEM_RENDER || can_switch_rings(sna);
@@ -2709,6 +2720,19 @@ reuse_source(struct sna *sna,
 }
 
 static bool
+prefer_blt_composite(struct sna *sna, struct sna_composite_op *tmp)
+{
+	if (sna->kgem.ring == KGEM_BLT)
+		return true;
+
+	if (!prefer_blt_ring(sna))
+		return false;
+
+	return (prefer_blt_bo(sna, tmp->dst.bo) ||
+		prefer_blt_bo(sna, tmp->src.bo));
+}
+
+static bool
 gen7_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2752,7 +2776,8 @@ gen7_render_composite(struct sna *sna,
 	if (!gen7_composite_set_target(sna, tmp, dst))
 		return false;
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
+	if (mask == NULL &&
+	    sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
 			      src, dst,
 			      src_x, src_y,
@@ -2779,23 +2804,20 @@ gen7_render_composite(struct sna *sna,
 		gen7_composite_solid_init(sna, &tmp->src, 0);
 		/* fall through to fixup */
 	case 1:
+		/* Did we just switch rings to prepare the source? */
+		if (mask == NULL &&
+		    prefer_blt_composite(sna, tmp) &&
+		    sna_blt_composite__convert(sna,
+					       src_x, src_y,
+					       width, height,
+					       dst_x, dst_y,
+					       tmp))
+			return true;
+
 		gen7_composite_channel_convert(&tmp->src);
 		break;
 	}
 
-	/* Did we just switch rings to prepare the source? */
-	if (sna->kgem.ring == KGEM_BLT && mask == NULL &&
-	    sna_blt_composite(sna, op,
-			      src, dst,
-			      src_x, src_y,
-			      dst_x, dst_y,
-			      width, height, tmp)) {
-		if (tmp->redirect.real_bo)
-			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
-		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return true;
-	}
-
 	tmp->is_affine = tmp->src.is_affine;
 	tmp->has_component_alpha = false;
 	tmp->need_magic_ca_pass = false;
@@ -3305,27 +3327,15 @@ gen7_emit_copy_state(struct sna *sna,
 	gen7_emit_state(sna, op, offset);
 }
 
-static inline bool untiled_tlb_miss(struct kgem_bo *bo)
-{
-	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
-}
-
-static bool prefer_blt_bo(struct sna *sna,
-			  PixmapPtr pixmap,
-			  struct kgem_bo *bo)
-{
-	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
-}
-
 static inline bool prefer_blt_copy(struct sna *sna,
-				   PixmapPtr src, struct kgem_bo *src_bo,
-				   PixmapPtr dst, struct kgem_bo *dst_bo,
+				   struct kgem_bo *src_bo,
+				   struct kgem_bo *dst_bo,
 				   unsigned flags)
 {
 	return (sna->kgem.ring == KGEM_BLT ||
 		(flags & COPY_LAST && sna->kgem.mode == KGEM_NONE) ||
-		prefer_blt_bo(sna, src, src_bo) ||
-		prefer_blt_bo(sna, dst, dst_bo));
+		prefer_blt_bo(sna, src_bo) ||
+		prefer_blt_bo(sna, dst_bo));
 }
 
 static inline bool
@@ -3375,7 +3385,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n, &extents)));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
+	if (prefer_blt_copy(sna, src_bo, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3604,7 +3614,7 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, 0) &&
+	if (prefer_blt_copy(sna, src_bo, dst_bo, 0) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 6bf223a..77fca3d 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1674,6 +1674,131 @@ clear:
 	return ret;
 }
 
+static void convert_done(struct sna *sna, const struct sna_composite_op *op)
+{
+	struct kgem *kgem = &sna->kgem;
+
+	if (kgem->gen >= 60 && kgem_check_batch(kgem, 3)) {
+		uint32_t *b = kgem->batch + kgem->nbatch;
+		b[0] = XY_SETUP_CLIP;
+		b[1] = b[2] = 0;
+		kgem->nbatch += 3;
+	}
+
+	kgem_bo_destroy(kgem, op->src.bo);
+	sna_render_composite_redirect_done(sna, op);
+}
+
+bool
+sna_blt_composite__convert(struct sna *sna,
+			   int x, int y,
+			   int width, int height,
+			   int dst_x, int dst_y,
+			   struct sna_composite_op *tmp)
+{
+	uint32_t alpha_fixup;
+	uint8_t op;
+
+#if DEBUG_NO_BLT || NO_BLT_COMPOSITE
+	return false;
+#endif
+
+	DBG(("%s\n", __FUNCTION__));
+
+	if (!kgem_bo_can_blt(&sna->kgem, tmp->dst.bo) ||
+	    !kgem_bo_can_blt(&sna->kgem, tmp->src.bo)) {
+		DBG(("%s: cannot blt from src or to dst\n", __FUNCTION__));
+		return false;
+	}
+
+	if (tmp->src.transform) {
+		DBG(("%s: transforms not handled by the BLT\n"));
+		return false;
+	}
+
+	if (tmp->src.filter == PictFilterConvolution) {
+		DBG(("%s: convolutions filters not handled\n",
+		     __FUNCTION__));
+		return false;
+	}
+
+	op = tmp->op;
+	if (op == PictOpOver && PICT_FORMAT_A(tmp->src.pict_format) == 0)
+		op = PictOpSrc;
+	if (op != PictOpSrc) {
+		DBG(("%s: unsuported op [%d] for blitting\n",
+		     __FUNCTION__, op));
+		return false;
+	}
+
+	alpha_fixup = 0;
+	if (!(tmp->dst.format == tmp->src.pict_format ||
+	      tmp->dst.format == alphaless(tmp->src.pict_format) ||
+	      (alphaless(tmp->dst.format) == alphaless(tmp->src.pict_format) &&
+	       sna_get_pixel_from_rgba(&alpha_fixup,
+				       0, 0, 0, 0xffff,
+				       tmp->dst.format)))) {
+		DBG(("%s: incompatible src/dst formats src=%08x, dst=%08x\n",
+		     __FUNCTION__,
+		     (unsigned)tmp->src.pict_format,
+		     tmp->dst.format));
+		return false;
+	}
+
+	x += tmp->src.offset[0];
+	y += tmp->src.offset[1];
+	if (x < 0 || y < 0 ||
+	    x + width  > tmp->src.width ||
+	    y + height > tmp->src.height) {
+		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid drawable %dx%d\n",
+		     __FUNCTION__,
+		     x, y, x+width, y+width, tmp->src.width, tmp->src.height));
+		return false;
+	}
+
+	if (!kgem_check_many_bo_fenced(&sna->kgem, tmp->dst.bo, tmp->src.bo, NULL)) {
+		_kgem_submit(&sna->kgem);
+		if (!kgem_check_many_bo_fenced(&sna->kgem,
+					       tmp->dst.bo, tmp->src.bo, NULL)) {
+			DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
+			return false;
+		}
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
+	}
+
+	tmp->u.blt.src_pixmap = NULL;
+	tmp->u.blt.sx = x - dst_x;
+	tmp->u.blt.sy = y - dst_y;
+	DBG(("%s: blt dst offset (%d, %d), source offset (%d, %d), with alpha fixup? %x\n",
+	     __FUNCTION__,
+	     tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
+
+	if (alpha_fixup) {
+		tmp->blt   = blt_composite_copy_with_alpha;
+		tmp->box   = blt_composite_copy_box_with_alpha;
+		tmp->boxes = blt_composite_copy_boxes_with_alpha;
+
+		if (!sna_blt_alpha_fixup_init(sna, &tmp->u.blt,
+					      tmp->src.bo, tmp->dst.bo,
+					      PICT_FORMAT_BPP(tmp->src.pict_format),
+					      alpha_fixup))
+			return false;
+	} else {
+		tmp->blt   = blt_composite_copy;
+		tmp->box   = blt_composite_copy_box;
+		tmp->boxes = blt_composite_copy_boxes;
+
+		if (!sna_blt_copy_init(sna, &tmp->u.blt,
+				       tmp->src.bo, tmp->dst.bo,
+				       PICT_FORMAT_BPP(tmp->src.pict_format),
+				       GXcopy))
+			return false;
+	}
+
+	tmp->done = convert_done;
+	return true;
+}
+
 static void sna_blt_fill_op_blt(struct sna *sna,
 				const struct sna_fill_op *op,
 				int16_t x, int16_t y,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index a2bcb45..b079178 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -570,6 +570,11 @@ bool sna_blt_composite(struct sna *sna,
 		       int16_t dst_x, int16_t dst_y,
 		       int16_t width, int16_t height,
 		       struct sna_composite_op *tmp);
+bool sna_blt_composite__convert(struct sna *sna,
+				int x, int y,
+				int width, int height,
+				int dst_x, int dst_y,
+				struct sna_composite_op *tmp);
 
 bool sna_blt_fill(struct sna *sna, uint8_t alu,
 		  struct kgem_bo *bo,
commit e9c0e54e69a5ce93bb4e79d56da4a83fddf49c4e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 11 08:58:14 2012 +0100

    sna: Handle userptr failures more gracefully
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 50e0321..1fdd89c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2190,7 +2190,7 @@ void _kgem_submit(struct kgem *kgem)
 					       i,
 					       kgem->exec[i].handle,
 					       (int)kgem->exec[i].offset,
-					       found ? bytes(found) : -1,
+					       found ? kgem_bo_size(found) : -1,
 					       found ? found->tiling : -1,
 					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
 					       found ? found->purged : -1);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dc6557a..517c698 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -754,9 +754,11 @@ sna_pixmap_create_shm(ScreenPtr screen,
 	struct sna_pixmap *priv;
 	PixmapPtr pixmap;
 
-	DBG(("%s(%d, %d, %d)\n", __FUNCTION__, width, height, depth));
+	DBG(("%s(%dx%d, depth=%d, bpp=%d, pitch=%d)\n",
+	     __FUNCTION__, width, height, depth, bpp, pitch));
 
-	if (wedged(sna)) {
+	if (wedged(sna) || bpp == 0) {
+fallback:
 		pixmap = sna_pixmap_create_unattached(screen, 0, 0, depth);
 		if (pixmap == NULL)
 			return NULL;
@@ -807,19 +809,20 @@ sna_pixmap_create_shm(ScreenPtr screen,
 		}
 	}
 
-	priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false);
+	priv->cpu_bo = kgem_create_map(&sna->kgem,
+				       addr, pitch*(height-1)+width*bpp/8,
+				       false);
 	if (priv->cpu_bo == NULL) {
-		free(priv);
-		FreePixmap(pixmap);
-		return GetScratchPixmapHeader(screen, width, height, depth,
-					      bpp, pitch, addr);
+		priv->header = true;
+		sna_pixmap_destroy(pixmap);
+		goto fallback;
 	}
 	priv->cpu_bo->flush = true;
 	priv->cpu_bo->pitch = pitch;
 	sna_accel_watch_flush(sna, 1);
 
+	priv->cpu = true;
 	priv->shm = true;
-	priv->header = true;
 	sna_damage_all(&priv->cpu_damage, width, height);
 
 	pixmap->devKind = pitch;
commit 5d6d9231cd2003fda1c6f2dd3174014317a45704
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 10 23:07:07 2012 +0100

    sna: Reset BLT state after copy-boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 4263bf7..6bf223a 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -2223,6 +2223,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		kgem->nbatch += 3;
 	}
 
+	sna->blt_state.fill_bo = 0;
 	return true;
 }
 
commit 50bfc172f1f89c023bb3a47418b845e31eba7126
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 10 22:49:26 2012 +0100

    sna: Improve a DBG statement
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 935f1bc..dc6557a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9403,7 +9403,7 @@ static bool
 sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 				 struct kgem_bo *bo, struct sna_damage **damage,
 				 struct kgem_bo *tile_bo, GCPtr gc,
-				 int n, xRectangle *r,
+				 int n, const xRectangle *r,
 				 const BoxRec *extents, unsigned clipped)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
@@ -9417,7 +9417,7 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 	if (NO_TILE_8x8)
 		return false;
 
-	DBG(("%s x %d [(%d, %d)+(%d, %d)...], clipped=%x\n",
+	DBG(("%s x %d [(%d, %d)x(%d, %d)...], clipped=%x\n",
 	     __FUNCTION__, n, r->x, r->y, r->width, r->height, clipped));
 
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9690,7 +9690,7 @@ static bool
 sna_poly_fill_rect_tiled_nxm_blt(DrawablePtr drawable,
 				 struct kgem_bo *bo,
 				 struct sna_damage **damage,
-				 GCPtr gc, int n, xRectangle *rect,
+				 GCPtr gc, int n, const xRectangle *rect,
 				 const BoxRec *extents, unsigned clipped)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
@@ -9758,7 +9758,7 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	int tile_width, tile_height;
 	int16_t dx, dy;
 
-	DBG(("%s x %d [(%d, %d)+(%d, %d)...]\n",
+	DBG(("%s x %d [(%d, %d)x(%d, %d)...]\n",
 	     __FUNCTION__, n, rect->x, rect->y, rect->width, rect->height));
 
 	tile_width = tile->drawable.width;
commit b33f6754a99f6d11e423d6a03739fa2c04eeed88
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 10 21:59:36 2012 +0100

    sna: Add assertions to 8x8 tiled BLTs and reset BLT state afterwards
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=53353
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9e7e61c..935f1bc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9438,6 +9438,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 	br13 |= fill_ROP[gc->alu] << 16;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	assert(extents->x1 + dx >= 0);
+	assert(extents->y1 + dy >= 0);
+	assert(extents->x2 + dx <= pixmap->drawable.width);
+	assert(extents->y2 + dy <= pixmap->drawable.height);
+
 	if (!clipped) {
 		dx += drawable->x;
 		dy += drawable->y;
@@ -9451,6 +9456,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 			if (ty < 8)
 				ty = 8 - ty;
 
+			assert(r->x + dx >= 0);
+			assert(r->y + dy >= 0);
+			assert(r->x + dx + r->width  <= pixmap->drawable.width);
+			assert(r->y + dy + r->height <= pixmap->drawable.height);
+
 			b = sna->kgem.batch + sna->kgem.nbatch;
 			b[0] = XY_PAT_BLT | tx << 12 | ty << 8 | 3 << 20 | (br00 & BLT_DST_TILED);
 			b[1] = br13;
@@ -9580,6 +9590,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 						sna->kgem.nbatch += 8;
 					}
 
+					assert(box.x1 + dx >= 0);
+					assert(box.y1 + dy >= 0);
+					assert(box.x2 + dx <= pixmap->drawable.width);
+					assert(box.y2 + dy <= pixmap->drawable.height);
+
 					ty = (box.y1 - drawable->y - origin->y) % 8;
 					if (ty < 0)
 						ty = 8 - ty;
@@ -9590,8 +9605,8 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 
 					b = sna->kgem.batch + sna->kgem.nbatch;
 					b[0] = br00 | tx << 12 | ty << 8;
-					b[1] = box.y1 << 16 | box.x1;
-					b[2] = box.y2 << 16 | box.x2;
+					b[1] = (box.y1 + dy) << 16 | (box.x1 + dx);
+					b[2] = (box.y2 + dy) << 16 | (box.x2 + dx);
 					sna->kgem.nbatch += 3;
 				}
 			}
@@ -9642,6 +9657,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 							sna->kgem.nbatch += 8;
 						}
 
+						assert(bb.x1 + dx >= 0);
+						assert(bb.y1 + dy >= 0);
+						assert(bb.x2 + dx <= pixmap->drawable.width);
+						assert(bb.y2 + dy <= pixmap->drawable.height);
+
 						ty = (bb.y1 - drawable->y - origin->y) % 8;
 						if (ty < 0)
 							ty = 8 - ty;
@@ -9652,8 +9672,8 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 
 						b = sna->kgem.batch + sna->kgem.nbatch;
 						b[0] = br00 | tx << 12 | ty << 8;
-						b[1] = bb.y1 << 16 | bb.x1;
-						b[2] = bb.y2 << 16 | bb.x2;
+						b[1] = (bb.y1 + dy) << 16 | (bb.x1 + dx);
+						b[2] = (bb.y2 + dy) << 16 | (bb.x2 + dx);
 						sna->kgem.nbatch += 3;
 					}
 				}
@@ -9662,6 +9682,7 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 	}
 done:
 	assert_pixmap_damage(pixmap);
+	sna->blt_state.fill_bo = 0;
 	return true;
 }
 
commit ff54d4af39edcddd55f597c404ab6dd548a24aa9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 10 21:41:38 2012 +0100

    sna: Add a couple of debug options to control use of BLT patterns
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=53353
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 496f57b..9e7e61c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -88,6 +88,9 @@
 #define ACCEL_IMAGE_GLYPH 1
 #define ACCEL_PUSH_PIXELS 1
 
+#define NO_TILE_8x8 0
+#define NO_STIPPLE_8x8 0
+
 #if 0
 static void __sna_fallback_flush(DrawablePtr d)
 {
@@ -9411,6 +9414,9 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 	int16_t dx, dy;
 	uint32_t *b;
 
+	if (NO_TILE_8x8)
+		return false;
+
 	DBG(("%s x %d [(%d, %d)+(%d, %d)...], clipped=%x\n",
 	     __FUNCTION__, n, r->x, r->y, r->width, r->height, clipped));
 
@@ -9991,6 +9997,9 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 	int16_t dx, dy;
 	uint32_t *b;
 
+	if (NO_STIPPLE_8x8)
+		return false;
+
 	DBG(("%s: alu=%d, upload (%d, %d), (%d, %d), origin (%d, %d)\n",
 	     __FUNCTION__, gc->alu,
 	     extents->x1, extents->y1,
commit 22306144030b5d37df6d46321555bced6e33c50c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 19:45:09 2012 +0100

    sna: Use the HAS_SEMAPHORES parameter when available
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d7458ec..50e0321 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -92,6 +92,8 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
 #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
 
+#define LOCAL_I915_PARAM_HAS_SEMAPHORES	 20
+
 #define LOCAL_I915_GEM_USERPTR       0x32
 #define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
 struct local_i915_gem_userptr {
@@ -616,14 +618,19 @@ static int gem_param(struct kgem *kgem, int name)
 	return v;
 }
 
-static bool test_has_semaphores_enabled(void)
+static bool test_has_semaphores_enabled(struct kgem *kgem)
 {
 	FILE *file;
 	bool detected = false;
+	int ret;
 
 	if (DBG_NO_SEMAPHORES)
 		return false;
 
+	ret = gem_param(kgem, LOCAL_I915_PARAM_HAS_SEMAPHORES);
+	if (ret != -1)
+		return ret > 0;
+
 	file = fopen("/sys/module/i915/parameters/semaphores", "r");
 	if (file) {
 		int value;
@@ -788,7 +795,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	     kgem->has_userptr));
 
 	kgem->has_semaphores = false;
-	if (kgem->has_blt && test_has_semaphores_enabled())
+	if (kgem->has_blt && test_has_semaphores_enabled(kgem))
 		kgem->has_semaphores = true;
 	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
 	     kgem->has_semaphores));
commit 465ae1840a883c50d5a5aab7380b30527f00e17b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 12:23:25 2012 +0100

    sna: Use a compile time constant for determine the zeroth bit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index fb6c323..bba6ea3 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -48,6 +48,8 @@ static inline void add(RegionPtr region,
 		region->extents.x2 = x2;
 }
 
+#define MASK_0 (FB_ALLONES & ~FbScrRight(FB_ALLONES, 1))
+
 /* Convert bitmap clip mask into clipping region.
  * First, goes through each line and makes boxes by noting the transitions
  * from 0 to 1 and 1 to 0.
@@ -57,7 +59,6 @@ static inline void add(RegionPtr region,
 RegionPtr
 fbBitmapToRegion(PixmapPtr pixmap)
 {
-	const register FbBits mask0 = FB_ALLONES & ~FbScrRight(FB_ALLONES, 1);
 	FbBits maskw;
 	register RegionPtr region;
 	const FbBits *bits, *line, *end;
@@ -92,7 +93,7 @@ fbBitmapToRegion(PixmapPtr pixmap)
 			(line[width >> (FB_SHIFT - 3)] & maskw)))
 			line += stride, y2++;
 
-		if (READ(bits) & mask0)
+		if (READ(bits) & MASK_0)
 			x1 = 0;
 		else
 			x1 = -1;
@@ -109,7 +110,7 @@ fbBitmapToRegion(PixmapPtr pixmap)
 					continue;
 			}
 			for (i = 0; i < FB_UNIT; i++) {
-				if (w & mask0) {
+				if (w & MASK_0) {
 					if (x1 < 0)
 						x1 = base + i;
 				} else {
@@ -124,7 +125,7 @@ fbBitmapToRegion(PixmapPtr pixmap)
 		if (width & FB_MASK) {
 			FbBits w = READ(bits++);
 			for (i = 0; i < (width & FB_MASK); i++) {
-				if (w & mask0) {
+				if (w & MASK_0) {
 					if (x1 < 0)
 						x1 = base + i;
 				} else {
commit 85192f00e345830541e3715e211b1f98154bbef4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 12:11:50 2012 +0100

    sna: Ignore trailing bits when comparing lines inside the bitmap
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=51422
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index 0758728..fb6c323 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -58,6 +58,7 @@ RegionPtr
 fbBitmapToRegion(PixmapPtr pixmap)
 {
 	const register FbBits mask0 = FB_ALLONES & ~FbScrRight(FB_ALLONES, 1);
+	FbBits maskw;
 	register RegionPtr region;
 	const FbBits *bits, *line, *end;
 	int width, y1, y2, base, x1;
@@ -74,6 +75,9 @@ fbBitmapToRegion(PixmapPtr pixmap)
 	stride = pixmap->devKind >> (FB_SHIFT - 3);
 
 	width = pixmap->drawable.width;
+	maskw = 0;
+	if (width & 7)
+		maskw = FB_ALLONES & ~FbScrRight(FB_ALLONES, width & FB_MASK);
 	region->extents.x1 = width;
 	region->extents.x2 = 0;
 	y2 = 0;
@@ -82,7 +86,10 @@ fbBitmapToRegion(PixmapPtr pixmap)
 		bits = line;
 		line += stride;
 		while (y2 < pixmap->drawable.height &&
-		       memcmp(bits, line, (width+7)>>3) == 0)
+		       memcmp(bits, line, width >> 3) == 0 &&
+		       (maskw == 0 ||
+			(bits[width >> (FB_SHIFT - 3)] & maskw) ==
+			(line[width >> (FB_SHIFT - 3)] & maskw)))
 			line += stride, y2++;
 
 		if (READ(bits) & mask0)
commit edc1427f3dcddb73acdb5b5e03756ecb30cb3797
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 10:53:40 2012 +0100

    sna/gen5: Add DBG for context switching
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 22dabd2..f50e785 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3516,6 +3516,8 @@ gen5_render_context_switch(struct kgem *kgem,
 	 */
 	if (kgem->mode == KGEM_BLT) {
 		struct sna *sna = to_sna_from_kgem(kgem);
+		DBG(("%s: forcing drawrect on next state emission\n",
+		     __FUNCTION__));
 		sna->render_state.gen5.drawrect_limit = -1;
 	}
 }
commit fec78ddc60b1f03097b334672546c2c7ef21ab4f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 10:08:52 2012 +0100

    sna: Add DBG option to disable discarding of glyph masks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index af0f211..e299ac5 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -75,6 +75,7 @@
 #define NO_GLYPHS_VIA_MASK 0
 #define NO_SMALL_MASK 0
 #define NO_GLYPHS_SLOW 0
+#define NO_DISCARD_MASK 0
 
 #define CACHE_PICTURE_SIZE 1024
 #define GLYPH_MIN_SIZE 8
@@ -1254,6 +1255,9 @@ static bool can_discard_mask(uint8_t op, PicturePtr src, PictFormatPtr mask,
 	PictFormatPtr g;
 	uint32_t color;
 
+	if (NO_DISCARD_MASK)
+		return false;
+
 	if (nlist == 1 && list->len == 1)
 		return true;
 
commit b0cd430082d219500729d20c4740440e58773892
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 10:07:06 2012 +0100

    sna/gen5: Fix use of uninitialised rgba value for DBG
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 90b0bdd..22dabd2 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3123,8 +3123,10 @@ gen5_render_fill_boxes(struct sna *sna,
 	struct sna_composite_op tmp;
 	uint32_t pixel;
 
-	DBG(("%s op=%x, color=%08x, boxes=%d x [((%d, %d), (%d, %d))...]\n",
-	     __FUNCTION__, op, pixel, n, box->x1, box->y1, box->x2, box->y2));
+	DBG(("%s op=%x, color=(%04x,%04x,%04x,%04x), boxes=%d x [((%d, %d), (%d, %d))...]\n",
+	     __FUNCTION__, op,
+	     color->red, color->green, color->blue, color->alpha,
+	     n, box->x1, box->y1, box->x2, box->y2));
 
 	if (op >= ARRAY_SIZE(gen5_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
commit 9ad923f95c59cdf42363f95c7dd52188c0987296
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 8 09:59:50 2012 +0100

    sna: Minor DBG for fallback glyphs and discarding the mask
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index ccf6497..af0f211 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1347,8 +1347,10 @@ glyphs_fallback(CARD8 op,
 	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
 
 	if (mask_format &&
-	    can_discard_mask(op, src, mask_format, nlist, list, glyphs))
+	    can_discard_mask(op, src, mask_format, nlist, list, glyphs)) {
+		DBG(("%s: discarding mask\n", __FUNCTION__));
 		mask_format = NULL;
+	}
 
 	cache = sna->render.glyph_cache;
 	pixman_glyph_cache_freeze(cache);
commit 1a0590d133ea6991e0939d1f170f9c10df6856a0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Aug 7 17:16:35 2012 +0100

    sna: Check the composite extents against the clip
    
    When computing the composite extents (as opposed to the composite
    region) also check if the resultant box overlaps the destination clip
    region (we know it already fits into the extents). This helps in cases
    with small roi against clipped drawables, such as drawing text onto
    expose events.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=51422
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 0ca66c7..48b6836 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -364,7 +364,14 @@ sna_compute_composite_extents(BoxPtr extents,
 		trim_source_extents(extents, mask,
 				    dst_x - mask_x, dst_y - mask_y);
 
-	return extents->x1 < extents->x2 && extents->y1 < extents->y2;
+	if (extents->x1 >= extents->x2 || extents->y1 >= extents->y2)
+		return false;
+
+	if (region_is_singular(dst->pCompositeClip))
+		return true;
+
+	return pixman_region_contains_rectangle(dst->pCompositeClip,
+						extents) != PIXMAN_REGION_OUT;
 }
 
 #if HAS_DEBUG_FULL
commit 5f5a10ef04a8c01b22da2284583851d84273dc2a
Author: Paulo Zanoni <paulo.r.zanoni at intel.com>
Date:   Mon Aug 6 18:48:09 2012 -0300

    Add Haswell PCI IDs
    
    Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
    Reviewed-by: Rodrigo Vivi <rodrigo.vivi at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.h b/src/intel_driver.h
index 882d889..ac02cc7 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -192,6 +192,43 @@
 #define PCI_CHIP_IVYBRIDGE_S_GT1	0x015a
 #define PCI_CHIP_IVYBRIDGE_S_GT2	0x016a
 
+#define PCI_CHIP_HASWELL_D_GT1		0x0402
+#define PCI_CHIP_HASWELL_D_GT2		0x0412
+#define PCI_CHIP_HASWELL_D_GT2_PLUS	0x0422
+#define PCI_CHIP_HASWELL_M_GT1		0x0406
+#define PCI_CHIP_HASWELL_M_GT2		0x0416
+#define PCI_CHIP_HASWELL_M_GT2_PLUS	0x0426
+#define PCI_CHIP_HASWELL_S_GT1		0x040A
+#define PCI_CHIP_HASWELL_S_GT2		0x041A
+#define PCI_CHIP_HASWELL_S_GT2_PLUS	0x042A
+#define PCI_CHIP_HASWELL_SDV_D_GT1	0x0C02
+#define PCI_CHIP_HASWELL_SDV_D_GT2	0x0C12
+#define PCI_CHIP_HASWELL_SDV_D_GT2_PLUS	0x0C22
+#define PCI_CHIP_HASWELL_SDV_M_GT1	0x0C06
+#define PCI_CHIP_HASWELL_SDV_M_GT2	0x0C16
+#define PCI_CHIP_HASWELL_SDV_M_GT2_PLUS	0x0C26
+#define PCI_CHIP_HASWELL_SDV_S_GT1	0x0C0A
+#define PCI_CHIP_HASWELL_SDV_S_GT2	0x0C1A
+#define PCI_CHIP_HASWELL_SDV_S_GT2_PLUS	0x0C2A
+#define PCI_CHIP_HASWELL_ULT_D_GT1	0x0A02
+#define PCI_CHIP_HASWELL_ULT_D_GT2	0x0A12
+#define PCI_CHIP_HASWELL_ULT_D_GT2_PLUS	0x0A22
+#define PCI_CHIP_HASWELL_ULT_M_GT1	0x0A06
+#define PCI_CHIP_HASWELL_ULT_M_GT2	0x0A16
+#define PCI_CHIP_HASWELL_ULT_M_GT2_PLUS	0x0A26
+#define PCI_CHIP_HASWELL_ULT_S_GT1	0x0A0A
+#define PCI_CHIP_HASWELL_ULT_S_GT2	0x0A1A
+#define PCI_CHIP_HASWELL_ULT_S_GT2_PLUS	0x0A2A
+#define PCI_CHIP_HASWELL_CRW_D_GT1	0x0D12
+#define PCI_CHIP_HASWELL_CRW_D_GT2	0x0D22
+#define PCI_CHIP_HASWELL_CRW_D_GT2_PLUS	0x0D32
+#define PCI_CHIP_HASWELL_CRW_M_GT1	0x0D16
+#define PCI_CHIP_HASWELL_CRW_M_GT2	0x0D26
+#define PCI_CHIP_HASWELL_CRW_M_GT2_PLUS	0x0D36
+#define PCI_CHIP_HASWELL_CRW_S_GT1	0x0D1A
+#define PCI_CHIP_HASWELL_CRW_S_GT2	0x0D2A
+#define PCI_CHIP_HASWELL_CRW_S_GT2_PLUS	0x0D3A
+
 #endif
 
 #define I85X_CAPID			0x44
diff --git a/src/intel_module.c b/src/intel_module.c
index ae19f75..c0403ca 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -149,6 +149,42 @@ static const SymTabRec _intel_chipsets[] = {
 	{PCI_CHIP_IVYBRIDGE_D_GT2,		"Ivybridge Desktop (GT2)" },
 	{PCI_CHIP_IVYBRIDGE_S_GT1,		"Ivybridge Server" },
 	{PCI_CHIP_IVYBRIDGE_S_GT2,		"Ivybridge Server (GT2)" },
+	{PCI_CHIP_HASWELL_D_GT1,		"Haswell Desktop (GT1)" },
+	{PCI_CHIP_HASWELL_D_GT2,		"Haswell Desktop (GT2)" },
+	{PCI_CHIP_HASWELL_D_GT2_PLUS,		"Haswell Desktop (GT2+)" },
+	{PCI_CHIP_HASWELL_M_GT1,		"Haswell Mobile (GT1)" },
+	{PCI_CHIP_HASWELL_M_GT2,		"Haswell Mobile (GT2)" },
+	{PCI_CHIP_HASWELL_M_GT2_PLUS,		"Haswell Mobile (GT2+)" },
+	{PCI_CHIP_HASWELL_S_GT1,		"Haswell Server (GT1)" },
+	{PCI_CHIP_HASWELL_S_GT2,		"Haswell Server (GT2)" },
+	{PCI_CHIP_HASWELL_S_GT2_PLUS,		"Haswell Server (GT2+)" },
+	{PCI_CHIP_HASWELL_SDV_D_GT1,		"Haswell SDV Desktop (GT1)" },
+	{PCI_CHIP_HASWELL_SDV_D_GT2,		"Haswell SDV Desktop (GT2)" },
+	{PCI_CHIP_HASWELL_SDV_D_GT2_PLUS,	"Haswell SDV Desktop (GT2+)" },
+	{PCI_CHIP_HASWELL_SDV_M_GT1,		"Haswell SDV Mobile (GT1)" },
+	{PCI_CHIP_HASWELL_SDV_M_GT2,		"Haswell SDV Mobile (GT2)" },
+	{PCI_CHIP_HASWELL_SDV_M_GT2_PLUS,	"Haswell SDV Mobile (GT2+)" },
+	{PCI_CHIP_HASWELL_SDV_S_GT1,		"Haswell SDV Server (GT1)" },
+	{PCI_CHIP_HASWELL_SDV_S_GT2,		"Haswell SDV Server (GT2)" },
+	{PCI_CHIP_HASWELL_SDV_S_GT2_PLUS,	"Haswell SDV Server (GT2+)" },
+	{PCI_CHIP_HASWELL_ULT_D_GT1,		"Haswell ULT Desktop (GT1)" },
+	{PCI_CHIP_HASWELL_ULT_D_GT2,		"Haswell ULT Desktop (GT2)" },
+	{PCI_CHIP_HASWELL_ULT_D_GT2_PLUS,	"Haswell ULT Desktop (GT2+)" },
+	{PCI_CHIP_HASWELL_ULT_M_GT1,		"Haswell ULT Mobile (GT1)" },
+	{PCI_CHIP_HASWELL_ULT_M_GT2,		"Haswell ULT Mobile (GT2)" },
+	{PCI_CHIP_HASWELL_ULT_M_GT2_PLUS,	"Haswell ULT Mobile (GT2+)" },
+	{PCI_CHIP_HASWELL_ULT_S_GT1,		"Haswell ULT Server (GT1)" },
+	{PCI_CHIP_HASWELL_ULT_S_GT2,		"Haswell ULT Server (GT2)" },
+	{PCI_CHIP_HASWELL_ULT_S_GT2_PLUS,	"Haswell ULT Server (GT2+)" },
+	{PCI_CHIP_HASWELL_CRW_D_GT1,		"Haswell CRW Desktop (GT1)" },
+	{PCI_CHIP_HASWELL_CRW_D_GT2,		"Haswell CRW Desktop (GT2)" },
+	{PCI_CHIP_HASWELL_CRW_D_GT2_PLUS,	"Haswell CRW Desktop (GT2+)" },
+	{PCI_CHIP_HASWELL_CRW_M_GT1,		"Haswell CRW Mobile (GT1)" },
+	{PCI_CHIP_HASWELL_CRW_M_GT2,		"Haswell CRW Mobile (GT2)" },
+	{PCI_CHIP_HASWELL_CRW_M_GT2_PLUS,	"Haswell CRW Mobile (GT2+)" },
+	{PCI_CHIP_HASWELL_CRW_S_GT1,		"Haswell CRW Server (GT1)" },
+	{PCI_CHIP_HASWELL_CRW_S_GT2,		"Haswell CRW Server (GT2)" },
+	{PCI_CHIP_HASWELL_CRW_S_GT2_PLUS,	"Haswell CRW Server (GT2+)" },
 	{-1,					NULL}
 };
 #define NUM_CHIPSETS (sizeof(_intel_chipsets) / sizeof(_intel_chipsets[0]))
@@ -221,6 +257,43 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
 
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
+
 	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
 	{ 0, 0, 0 },
 };
commit d8f7f933bc2d30e529730521d5628c10df0361de
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Aug 5 17:18:54 2012 +0100

    sna: Format markup to suppress compiler warning
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/brw/brw_disasm.c b/src/sna/brw/brw_disasm.c
index 106eed3..e6da174 100644
--- a/src/sna/brw/brw_disasm.c
+++ b/src/sna/brw/brw_disasm.c
@@ -415,6 +415,9 @@ static int string(FILE *file, const char *str)
 	return 0;
 }
 
+#if defined(__GNUC__) && (__GNUC__ > 2)
+__attribute__((format(printf, 2, 3)))
+#endif
 static int format(FILE *f, const char *fmt, ...)
 {
 	char buf[1024];
@@ -833,10 +836,10 @@ void brw_disasm(FILE *file, const struct brw_instruction *inst, int gen)
 	int space = 0;
 
 	format(file, "%08x %08x %08x %08x\n",
-	       ((uint32_t*)inst)[0],
-	       ((uint32_t*)inst)[1],
-	       ((uint32_t*)inst)[2],
-	       ((uint32_t*)inst)[3]);
+	       ((const uint32_t*)inst)[0],
+	       ((const uint32_t*)inst)[1],
+	       ((const uint32_t*)inst)[2],
+	       ((const uint32_t*)inst)[3]);
 
 	if (inst->header.predicate_control) {
 		string(file, "(");
commit 4d0a259988f67f5c569c5d8000c010a7b662efd3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 15:54:19 2012 +0100

    sna/gen6: Compile fix for DBG
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1d51300..6d4d79e 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3284,7 +3284,8 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
-	     overlaps(src_bo, src_dx, src_dy,
+	     overlaps(sna,
+		      src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n, &extents)));
 
commit d917a276bff4e933590e78404526b929b6b3b497
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 18:05:03 2012 +0100

    sna/gen4: Restore the w/a flush for fill/copy as well
    
    So far, it looks like that the only one we can indeed drop is the
    composite with mask.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d2dc366..7dab92b 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -60,13 +60,15 @@
 #define NO_VIDEO 0
 
 #if FLUSH_EVERY_VERTEX
+#define _FLUSH() do { \
+	gen4_vertex_flush(sna); \
+	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
+} while (0)
 #define FLUSH(OP) do { \
-	if ((OP)->mask.bo == NULL) { \
-		gen4_vertex_flush(sna); \
-		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
-	} \
+	if ((OP)->mask.bo == NULL) _FLUSH(); \
 } while (0)
 #else
+#define _FLUSH()
 #define FLUSH(OP)
 #endif
 
@@ -1682,7 +1684,7 @@ gen4_render_video(struct sna *sna,
 		OUT_VERTEX_F((box->x1 - dxo) * src_scale_x);
 		OUT_VERTEX_F((box->y1 - dyo) * src_scale_y);
 
-		FLUSH(&tmp);
+		_FLUSH();
 
 		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 			sna_damage_add_box(&priv->gpu_damage, &r);
@@ -2750,6 +2752,8 @@ gen4_render_copy_one(struct sna *sna,
 	OUT_VERTEX(dx, dy);
 	OUT_VERTEX_F(sx*op->src.scale[0]);
 	OUT_VERTEX_F(sy*op->src.scale[1]);
+
+	_FLUSH();
 }
 
 static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
@@ -3075,6 +3079,8 @@ gen4_render_fill_rectangle(struct sna *sna,
 	OUT_VERTEX(x, y);
 	OUT_VERTEX_F(0);
 	OUT_VERTEX_F(0);
+
+	_FLUSH();
 }
 
 static bool
commit d754bc3435889b5d19085588c9c888dc08f9522e
Author: Cyril Brulebois <kibi at debian.org>
Date:   Mon Nov 7 23:29:29 2011 +0100

    Fix build failure when passing --with-builderstring.
    
    If --with-builderstring="foo" is passed to configure (and sna enabled):
      CC     sna_driver.lo
    src/sna/sna_driver.c: In function 'sna_init_scrn':
    src/sna/sna_driver.c:1023:7: error: token ""foo"" is not valid in preprocessor expressions
    
    Fix the missing defined() around the BUILDER_DESCRIPTION variable.
    
    Signed-off-by: Cyril Brulebois <kibi at debian.org>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 2ccad59..5d2915f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -1071,7 +1071,7 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 #if defined(USE_GIT_DESCRIBE)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled from %s\n", git_version);
-#elif BUILDER_DESCRIPTION
+#elif defined(BUILDER_DESCRIPTION)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled: %s\n", BUILDER_DESCRIPTION);
 #endif
commit 4bad76b4510d8cf4ccf3f64bcd13ebb80fae4d26
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 14:28:41 2012 +0100

    sna/dri: Don't force ring selection if we have semaphores
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 89ecb65..435d22e 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -461,6 +461,12 @@ static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
 		return;
 	}
 
+	if (sna->kgem.has_semaphores) {
+		DBG(("%s: have sempahores, prefering RENDER\n", __FUNCTION__));
+		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+		return;
+	}
+
 	VG_CLEAR(busy);
 	busy.handle = src->handle;
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
commit fb7b584ec9d0ea96175ba3a621699378c6724940
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 14:27:46 2012 +0100

    sna/dri: Correct ring selection for a busy bo
    
    Confused the RENDER ring with the BLT, limiting swap performance.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 8d6c305..89ecb65 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -488,7 +488,7 @@ static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
 	 * the cost of the query.
 	 */
 	mode = KGEM_RENDER;
-	if (busy.busy & (1 << 16))
+	if (busy.busy & (1 << 17))
 		mode = KGEM_BLT;
 	_kgem_set_mode(&sna->kgem, mode);
 }
commit 20e58077fe071fd03d44a97194aa163376dc46a4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 11:21:10 2012 +0100

    sna/gen6: Reduce ring switching for overlapping copy boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 19b8a2d..1d51300 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3243,34 +3243,33 @@ static inline bool prefer_blt_copy(struct sna *sna,
 }
 
 static inline bool
-overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+overlaps(struct sna *sna,
+	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-	 const BoxRec *box, int n)
+	 const BoxRec *box, int n, BoxRec *extents)
 {
-	BoxRec extents;
-
 	if (src_bo != dst_bo)
 		return false;
 
-	extents = box[0];
+	*extents = box[0];
 	while (--n) {
 		box++;
 
-		if (box->x1 < extents.x1)
-			extents.x1 = box->x1;
-		if (box->x2 > extents.x2)
-			extents.x2 = box->x2;
+		if (box->x1 < extents->x1)
+			extents->x1 = box->x1;
+		if (box->x2 > extents->x2)
+			extents->x2 = box->x2;
 
-		if (box->y1 < extents.y1)
-			extents.y1 = box->y1;
-		if (box->y2 > extents.y2)
-			extents.y2 = box->y2;
+		if (box->y1 < extents->y1)
+			extents->y1 = box->y1;
+		if (box->y2 > extents->y2)
+			extents->y2 = box->y2;
 	}
 
-	return (extents.x2 + src_dx > extents.x1 + dst_dx &&
-		extents.x1 + src_dx < extents.x2 + dst_dx &&
-		extents.y2 + src_dy > extents.y1 + dst_dy &&
-		extents.y1 + src_dy < extents.y2 + dst_dy);
+	return (extents->x2 + src_dx > extents->x1 + dst_dx &&
+		extents->x1 + src_dx < extents->x2 + dst_dx &&
+		extents->y2 + src_dy > extents->y1 + dst_dy &&
+		extents->y1 + src_dy < extents->y2 + dst_dy);
 }
 
 static bool
@@ -3280,13 +3279,14 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
+	BoxRec extents;
 
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
 	     overlaps(src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
-		      box, n)));
+		      box, n, &extents)));
 
 	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
@@ -3297,18 +3297,37 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return true;
 
-	if (!(alu == GXcopy || alu == GXclear) ||
-	    overlaps(src_bo, src_dx, src_dy,
-		     dst_bo, dst_dx, dst_dy,
-		     box, n)) {
+	if (!(alu == GXcopy || alu == GXclear)) {
 fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
-						 src, src_bo, src_dx, src_dy,
-						 dst, dst_bo, dst_dx, dst_dy,
-						 box, n);
+						   src, src_bo, src_dx, src_dy,
+						   dst, dst_bo, dst_dx, dst_dy,
+						   box, n);
+	}
+
+	if (overlaps(sna,
+		     src_bo, src_dx, src_dy,
+		     dst_bo, dst_dx, dst_dy,
+		     box, n, &extents)) {
+		if (too_large(extents.x2-extents.x1, extents.y2-extents.y1))
+			goto fallback_blt;
+
+		if ((flags & COPY_LAST || can_switch_rings(sna)) &&
+		    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+		    sna_blt_copy_boxes(sna, alu,
+				       src_bo, src_dx, src_dy,
+				       dst_bo, dst_dx, dst_dy,
+				       dst->drawable.bitsPerPixel,
+				       box, n))
+			return true;
+
+		return sna_render_copy_boxes__overlap(sna, alu,
+						      src, src_bo, src_dx, src_dy,
+						      dst, dst_bo, dst_dx, dst_dy,
+						      box, n, &extents);
 	}
 
 	if (dst->drawable.depth == src->drawable.depth) {
@@ -3330,9 +3349,9 @@ fallback_blt:
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
-		BoxRec extents = box[0];
 		int i;
 
+		extents = box[0];
 		for (i = 1; i < n; i++) {
 			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
@@ -3360,9 +3379,9 @@ fallback_blt:
 
 	tmp.src.card_format = gen6_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
-		BoxRec extents = box[0];
 		int i;
 
+		extents = box[0];
 		for (i = 1; i < n; i++) {
 			if (extents.x1 < box[i].x1)
 				extents.x1 = box[i].x1;
commit 2345227663f443baa3c30bdf8b8c33827b7f4947
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 11:12:12 2012 +0100

    sna/gen7: Use a temporary to avoid switching rings for overlapping copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 9ef5e17..954e42f 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3332,35 +3332,30 @@ static inline bool
 overlaps(struct sna *sna,
 	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-	 const BoxRec *box, int n)
+	 const BoxRec *box, int n, BoxRec *extents)
 {
-	BoxRec extents;
-
 	if (src_bo != dst_bo)
 		return false;
 
-	if (can_switch_rings(sna))
-		return true;
-
-	extents = box[0];
+	*extents = box[0];
 	while (--n) {
 		box++;
 
-		if (box->x1 < extents.x1)
-			extents.x1 = box->x1;
-		if (box->x2 > extents.x2)
-			extents.x2 = box->x2;
+		if (box->x1 < extents->x1)
+			extents->x1 = box->x1;
+		if (box->x2 > extents->x2)
+			extents->x2 = box->x2;
 
-		if (box->y1 < extents.y1)
-			extents.y1 = box->y1;
-		if (box->y2 > extents.y2)
-			extents.y2 = box->y2;
+		if (box->y1 < extents->y1)
+			extents->y1 = box->y1;
+		if (box->y2 > extents->y2)
+			extents->y2 = box->y2;
 	}
 
-	return (extents.x2 + src_dx > extents.x1 + dst_dx &&
-		extents.x1 + src_dx < extents.x2 + dst_dx &&
-		extents.y2 + src_dy > extents.y1 + dst_dy &&
-		extents.y1 + src_dy < extents.y2 + dst_dy);
+	return (extents->x2 + src_dx > extents->x1 + dst_dx &&
+		extents->x1 + src_dx < extents->x2 + dst_dx &&
+		extents->y2 + src_dy > extents->y1 + dst_dy &&
+		extents->y1 + src_dy < extents->y2 + dst_dy);
 }
 
 static bool
@@ -3370,6 +3365,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
+	BoxRec extents;
 
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
@@ -3377,7 +3373,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	     overlaps(sna,
 		      src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
-		      box, n)));
+		      box, n, &extents)));
 
 	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
@@ -3388,44 +3384,37 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return true;
 
-	if (too_large(dst->drawable.width, dst->drawable.height) ||
-	    sna_blt_compare_depth(&src->drawable, &dst->drawable)) {
-		BoxRec extents = box[0];
-		int i;
+	if (!(alu == GXcopy || alu == GXclear)) {
+fallback_blt:
+		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
+			return false;
 
-		for (i = 1; i < n; i++) {
-			if (box[i].x1 < extents.x1)
-				extents.x1 = box[i].x1;
-			if (box[i].y1 < extents.y1)
-				extents.y1 = box[i].y1;
+		return sna_blt_copy_boxes_fallback(sna, alu,
+						   src, src_bo, src_dx, src_dy,
+						   dst, dst_bo, dst_dx, dst_dy,
+						   box, n);
+	}
 
-			if (box[i].x2 > extents.x2)
-				extents.x2 = box[i].x2;
-			if (box[i].y2 > extents.y2)
-				extents.y2 = box[i].y2;
-		}
-		if (too_large(extents.x2 - extents.x1, extents.y2 - extents.y1) &&
+	if (overlaps(sna,
+		     src_bo, src_dx, src_dy,
+		     dst_bo, dst_dx, dst_dy,
+		     box, n, &extents)) {
+		if (too_large(extents.x2-extents.x1, extents.y2-extents.y1))
+			goto fallback_blt;
+
+		if ((flags & COPY_LAST || can_switch_rings(sna)) &&
+		    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 		    sna_blt_copy_boxes(sna, alu,
 				       src_bo, src_dx, src_dy,
 				       dst_bo, dst_dx, dst_dy,
 				       dst->drawable.bitsPerPixel,
 				       box, n))
 			return true;
-	}
 
-	if (!(alu == GXcopy || alu == GXclear) ||
-	    overlaps(sna,
-		     src_bo, src_dx, src_dy,
-		     dst_bo, dst_dx, dst_dy,
-		     box, n)) {
-fallback_blt:
-		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return false;
-
-		return sna_blt_copy_boxes_fallback(sna, alu,
-						 src, src_bo, src_dx, src_dy,
-						 dst, dst_bo, dst_dx, dst_dy,
-						 box, n);
+		return sna_render_copy_boxes__overlap(sna, alu,
+						      src, src_bo, src_dx, src_dy,
+						      dst, dst_bo, dst_dx, dst_dy,
+						      box, n, &extents);
 	}
 
 	if (dst->drawable.depth == src->drawable.depth) {
@@ -3447,9 +3436,9 @@ fallback_blt:
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
-		BoxRec extents = box[0];
 		int i;
 
+		extents = box[0];
 		for (i = 1; i < n; i++) {
 			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
@@ -3477,9 +3466,9 @@ fallback_blt:
 
 	tmp.src.card_format = gen7_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
-		BoxRec extents = box[0];
 		int i;
 
+		extents = box[0];
 		for (i = 1; i < n; i++) {
 			if (extents.x1 < box[i].x1)
 				extents.x1 = box[i].x1;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 9d4b926..f507f49 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1950,3 +1950,40 @@ sna_render_composite_redirect_done(struct sna *sna,
 		kgem_bo_destroy(&sna->kgem, op->dst.bo);
 	}
 }
+
+bool
+sna_render_copy_boxes__overlap(struct sna *sna, uint8_t alu,
+			       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			       const BoxRec *box, int n, const BoxRec *extents)
+{
+	ScreenPtr screen = dst->drawable.pScreen;
+	struct kgem_bo *bo;
+	PixmapPtr tmp;
+	bool ret = false;
+
+	tmp = screen->CreatePixmap(screen,
+				   extents->x2 - extents->x1,
+				   extents->y2 - extents->y1,
+				   dst->drawable.depth,
+				   SNA_CREATE_SCRATCH);
+	if (tmp == NULL)
+		return false;
+
+	bo = sna_pixmap_get_bo(tmp);
+	if (bo == NULL)
+		goto out;
+
+	ret = (sna->render.copy_boxes(sna, alu,
+				      src, src_bo, src_dx, src_dy,
+				      tmp, bo, -extents->x1, -extents->y1,
+				      box, n , 0) &&
+	       sna->render.copy_boxes(sna, alu,
+				      tmp, bo, -extents->x1, -extents->y1,
+				      dst, dst_bo, dst_dx, dst_dy,
+				      box, n , 0));
+
+out:
+	screen->DestroyPixmap(tmp);
+	return ret;
+}
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 68bb901..a2bcb45 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -704,6 +704,12 @@ sna_render_composite_redirect_done(struct sna *sna,
 				   const struct sna_composite_op *op);
 
 bool
+sna_render_copy_boxes__overlap(struct sna *sna, uint8_t alu,
+			       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			       const BoxRec *box, int n, const BoxRec *extents);
+
+bool
 sna_composite_mask_is_opaque(PicturePtr mask);
 
 #endif /* SNA_RENDER_H */
commit 61ec2999afd5dad22580e024421f13afe7c82f3a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 10:33:05 2012 +0100

    sna/gen4: Restore w/a flush for video
    
    One flush removal too many, keep those fingers crossed that the others
    do not make an unwanted return.
    
    Reported-by: Roman Jarosz <kedgedev at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=53119
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d89ba5f..d2dc366 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1682,6 +1682,8 @@ gen4_render_video(struct sna *sna,
 		OUT_VERTEX_F((box->x1 - dxo) * src_scale_x);
 		OUT_VERTEX_F((box->y1 - dyo) * src_scale_y);
 
+		FLUSH(&tmp);
+
 		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 			sna_damage_add_box(&priv->gpu_damage, &r);
 			sna_damage_subtract_box(&priv->cpu_damage, &r);
commit 5833ef173a01afb710acf10e806b83c5ca6efc09
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Aug 4 09:31:41 2012 +0100

    2.20.3 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 7e267a6..5a9c495 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,32 @@
+Release 2.20.3 (2012-08-04)
+===========================
+Just a minor bugfix for gen4 chipsets (965gm, gm45 and friends) that
+crept into 2.20.2. As an added bonus, the pessimistic workaround for a
+GPU hang on gen4 has been relaxed and the shaders have been overhauled
+which should pave the way to eliminating the last of the uncommon CPU
+operations, along with immediately realising a small perforamnce
+improvement.
+
+Bugs fixed since 2.20.2:
+
+ * Update DPMS bookkeeping after modeset
+   https://bugs.freedesktop.org/show_bug.cgi?id=52142
+
+ * Avoid overlapping gpu/cpu damage after ignoring cpu damage in the
+   consideration of placement for the operation.
+
+ * Enable acceleration by default on 830gm/845g. The GMCH on this pair
+   of chipsets is notoriously incoherent, so the GPU is almost certainly
+   going to hang at some point, though unlikely to hang the system and
+   should automatically disable acceleration (and thence behave
+   identically as if the acceleration was disabled from the start).
+   Option "NoAccel" can be used to disable all 2D acceleration and
+   Option "DRI" can be used to disable all 3D acceleration.
+   https://bugs.freedesktop.org/show_bug.cgi?id=52624
+
+ * Fix vertex bookkeeping for gen4 that was causing corruption in the
+   command stream.
+
 Release 2.20.2 (2012-07-27)
 ===========================
 For the last 9 months, since 2.16.901, we have been shipping a driver that
diff --git a/configure.ac b/configure.ac
index 2a8d08b..7ffbb75 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.2],
+        [2.20.3],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 036b90f099af21e60fb4c3684616daf1927f705e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 3 21:41:59 2012 +0100

    sna/gen7: Correct number of texture coordinates used for video
    
    Fixes regresion from
    
    commit 33c028f8be829caa4fdb9416ff177dc71f24b68e
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Wed Aug 1 01:17:50 2012 +0100
    
        sna/gen6+: Reduce floats-per-vertex for spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index b4a9223..9ef5e17 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2086,7 +2086,7 @@ gen7_render_video(struct sna *sna,
 			       is_planar_fourcc(frame->id) ?
 			       GEN7_WM_KERNEL_VIDEO_PLANAR :
 			       GEN7_WM_KERNEL_VIDEO_PACKED,
-			       1);
+			       2);
 	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
commit 05dcc5f1699ba90fc14c50882e8d4be89bc4a4f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 3 15:08:45 2012 +0100

    Pass the chipset info through driverPrivate rather than a global pointer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 0e27c48..1ef06fb 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -186,7 +186,7 @@ static void PreInitCleanup(ScrnInfoPtr scrn)
 static void intel_check_chipset_option(ScrnInfoPtr scrn)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
-	intel->info = intel_detect_chipset(scrn, intel->pEnt, intel->PciInfo);
+	intel_detect_chipset(scrn, intel->pEnt, intel->PciInfo);
 }
 
 static Bool I830GetEarlyOptions(ScrnInfoPtr scrn)
@@ -458,14 +458,15 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 	if (flags & PROBE_DETECT)
 		return TRUE;
 
-	intel = intel_get_screen_private(scrn);
-	if (intel == NULL) {
-		intel = xnfcalloc(sizeof(intel_screen_private), 1);
+	if (((uintptr_t)scrn->driverPrivate) & 1) {
+		intel = xnfcalloc(sizeof(*intel), 1);
 		if (intel == NULL)
 			return FALSE;
 
+		intel->info = (void *)((uintptr_t)scrn->driverPrivate & ~1);
 		scrn->driverPrivate = intel;
 	}
+	intel = intel_get_screen_private(scrn);
 	intel->scrn = scrn;
 	intel->pEnt = pEnt;
 
diff --git a/src/intel_driver.h b/src/intel_driver.h
index d760cb4..882d889 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -238,9 +238,9 @@ struct intel_device_info {
 	int gen;
 };
 
-const struct intel_device_info *
-intel_detect_chipset(ScrnInfoPtr scrn,
-		     EntityInfoPtr ent, struct pci_device *pci);
+void intel_detect_chipset(ScrnInfoPtr scrn,
+			  EntityInfoPtr ent,
+			  struct pci_device *pci);
 
 
 #endif /* INTEL_DRIVER_H */
diff --git a/src/intel_module.c b/src/intel_module.c
index f1d9fc0..ae19f75 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -44,8 +44,6 @@
 #include "legacy/legacy.h"
 #include "sna/sna_module.h"
 
-static struct intel_device_info *chipset_info;
-
 static const struct intel_device_info intel_generic_info = {
 	.gen = -1,
 };
@@ -227,9 +225,10 @@ static const struct pci_id_match intel_device_match[] = {
 	{ 0, 0, 0 },
 };
 
-const struct intel_device_info *
+void
 intel_detect_chipset(ScrnInfoPtr scrn,
-		     EntityInfoPtr ent, struct pci_device *pci)
+		     EntityInfoPtr ent,
+		     struct pci_device *pci)
 {
 	MessageType from = X_PROBED;
 	const char *name = NULL;
@@ -258,7 +257,6 @@ intel_detect_chipset(ScrnInfoPtr scrn,
 	}
 
 	scrn->chipset = name;
-	return chipset_info;
 }
 
 /*
@@ -368,8 +366,6 @@ static Bool intel_pci_probe(DriverPtr		driver,
 	PciChipsets intel_pci_chipsets[NUM_CHIPSETS];
 	unsigned i;
 
-	chipset_info = (void *)match_data;
-
 	if (!has_kernel_mode_setting(device)) {
 #if KMS_ONLY
 		return FALSE;
@@ -404,6 +400,7 @@ static Bool intel_pci_probe(DriverPtr		driver,
 	scrn->driverVersion = INTEL_VERSION;
 	scrn->driverName = INTEL_DRIVER_NAME;
 	scrn->name = INTEL_NAME;
+	scrn->driverPrivate = (void *)(match_data | 1);
 	scrn->Probe = NULL;
 
 #if !KMS_ONLY
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 949fd27..fc6369e 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -152,7 +152,7 @@ static int i810_pitches[] = {
 static Bool
 I810GetRec(ScrnInfoPtr scrn)
 {
-   if (scrn->driverPrivate)
+   if (((uintptr_t)scrn->driverPrivate & 1) == 0)
       return TRUE;
 
    scrn->driverPrivate = xnfcalloc(sizeof(I810Rec), 1);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index bd31996..2ccad59 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -383,14 +383,15 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 
 	sna_selftest();
 
-	sna = to_sna(scrn);
-	if (sna == NULL) {
+	if (((uintptr_t)scrn->driverPrivate) & 1) {
 		sna = xnfcalloc(sizeof(struct sna), 1);
 		if (sna == NULL)
 			return FALSE;
 
+		sna->info = (void *)((uintptr_t)scrn->driverPrivate & ~1);
 		scrn->driverPrivate = sna;
 	}
+	sna = to_sna(scrn);
 	sna->scrn = scrn;
 	sna->pEnt = pEnt;
 
@@ -438,7 +439,7 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (sna->Options == NULL)
 		return FALSE;
 
-	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
+	intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
 
 	kgem_init(&sna->kgem, fd, sna->PciInfo, sna->info->gen);
 	if (xf86ReturnOptValBool(sna->Options, OPTION_ACCEL_DISABLE, FALSE)) {
commit 2b3f4ca33a00440a7005fef69099f8dbaddbbad1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Aug 3 14:27:51 2012 +0100

    Unexport intel_chipsets
    
    Only used by the core module code, so make it static.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.h b/src/intel_driver.h
index d88f225..d760cb4 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -234,7 +234,6 @@
 #define SUPPORTS_YTILING(pI810) (INTEL_INFO(intel)->gen >= 40)
 #define HAS_BLT(pI810) (INTEL_INFO(intel)->gen >= 60)
 
-extern SymTabRec *intel_chipsets;
 struct intel_device_info {
 	int gen;
 };
diff --git a/src/intel_module.c b/src/intel_module.c
index 7640916..f1d9fc0 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -155,7 +155,7 @@ static const SymTabRec _intel_chipsets[] = {
 };
 #define NUM_CHIPSETS (sizeof(_intel_chipsets) / sizeof(_intel_chipsets[0]))
 
-SymTabRec *intel_chipsets = (SymTabRec *) _intel_chipsets;
+static SymTabRec *intel_chipsets = (SymTabRec *) _intel_chipsets;
 
 #define INTEL_DEVICE_MATCH(d,i) \
     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
commit 5ff749727d3590368806508ac0d0fa8efd1d1d51
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 22:21:29 2012 +0100

    sna/gen7: Add constant variations and hookup a basic GT descriptor for Haswell
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 193de00..b4a9223 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -83,20 +83,29 @@ struct gt_info {
 	} urb;
 };
 
-static const struct gt_info gt1_info = {
+static const struct gt_info ivb_gt1_info = {
 	.max_vs_threads = 36,
 	.max_gs_threads = 36,
-	.max_wm_threads = (48-1) << GEN7_PS_MAX_THREADS_SHIFT,
+	.max_wm_threads = (48-1) << IVB_PS_MAX_THREADS_SHIFT,
 	.urb = { 128, 512, 192 },
 };
 
-static const struct gt_info gt2_info = {
+static const struct gt_info ivb_gt2_info = {
 	.max_vs_threads = 128,
 	.max_gs_threads = 128,
-	.max_wm_threads = (172-1) << GEN7_PS_MAX_THREADS_SHIFT,
+	.max_wm_threads = (172-1) << IVB_PS_MAX_THREADS_SHIFT,
 	.urb = { 256, 704, 320 },
 };
 
+static const struct gt_info hsw_gt_info = {
+	.max_vs_threads = 8,
+	.max_gs_threads = 8,
+	.max_wm_threads =
+		(8 - 1) << HSW_PS_MAX_THREADS_SHIFT |
+		1 << HSW_PS_SAMPLE_MASK_SHIFT,
+	.urb = { 128, 64, 64 },
+};
+
 static const uint32_t ps_kernel_packed[][4] = {
 #include "exa_wm_src_affine.g7b"
 #include "exa_wm_src_sample_argb.g7b"
@@ -1363,6 +1372,8 @@ gen7_bind_bo(struct sna *sna,
 	ss[5] = 0;
 	ss[6] = 0;
 	ss[7] = 0;
+	if (sna->kgem.gen == 75)
+		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
 
 	kgem_bo_set_binding(bo, format, offset);
 
@@ -4234,9 +4245,14 @@ static bool gen7_render_setup(struct sna *sna)
 	struct gen7_sampler_state *ss;
 	int i, j, k, l, m;
 
-	state->info = &gt1_info;
-	if (DEVICE_ID(sna->PciInfo) & 0x20)
-		state->info = &gt2_info; /* XXX requires GT_MODE WiZ disabled */
+	if (sna->kgem.gen == 70) {
+		state->info = &ivb_gt1_info;
+		if (DEVICE_ID(sna->PciInfo) & 0x20)
+			state->info = &ivb_gt2_info; /* XXX requires GT_MODE WiZ disabled */
+	} else if (sna->kgem.gen == 75) {
+		state->info = &hsw_gt_info;
+	} else
+		return false;
 
 	sna_static_stream_init(&general);
 
diff --git a/src/sna/gen7_render.h b/src/sna/gen7_render.h
index 8de52a4..1661d4c 100644
--- a/src/sna/gen7_render.h
+++ b/src/sna/gen7_render.h
@@ -1237,6 +1237,17 @@ struct gen7_sampler_state {
 #define GEN7_SURFACE_DEPTH_SHIFT         21
 #define GEN7_SURFACE_PITCH_SHIFT         0
 
+#define HSW_SWIZZLE_ZERO		0
+#define HSW_SWIZZLE_ONE			1
+#define HSW_SWIZZLE_RED			4
+#define HSW_SWIZZLE_GREEN		5
+#define HSW_SWIZZLE_BLUE		6
+#define HSW_SWIZZLE_ALPHA		7
+#define __HSW_SURFACE_SWIZZLE(r,g,b,a) \
+	((a) << 16 | (b) << 19 | (g) << 22 | (r) << 25)
+#define HSW_SURFACE_SWIZZLE(r,g,b,a) \
+	__HSW_SURFACE_SWIZZLE(HSW_SWIZZLE_##r, HSW_SWIZZLE_##g, HSW_SWIZZLE_##b, HSW_SWIZZLE_##a)
+
 /* _3DSTATE_VERTEX_BUFFERS on GEN7*/
 /* DW1 */
 #define GEN7_VB0_ADDRESS_MODIFYENABLE   (1 << 14)
@@ -1281,7 +1292,9 @@ struct gen7_sampler_state {
 # define GEN7_PS_FLOATING_POINT_MODE_ALT                (1 << 16)
 /* DW3: scratch space */
 /* DW4 */
-# define GEN7_PS_MAX_THREADS_SHIFT                      24
+# define IVB_PS_MAX_THREADS_SHIFT                      24
+# define HSW_PS_MAX_THREADS_SHIFT                      23
+# define HSW_PS_SAMPLE_MASK_SHIFT                      12
 # define GEN7_PS_PUSH_CONSTANT_ENABLE                   (1 << 11)
 # define GEN7_PS_ATTRIBUTE_ENABLE                       (1 << 10)
 # define GEN7_PS_OMASK_TO_RENDER_TARGET                 (1 << 9)
commit cd028cad3dc9b059a3d83b818d581f86e16ec317
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 13:17:11 2012 +0100

    sna: Limit the batch size on all gen7 variants
    
    Seems the limit on the surface state size is common across the family
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 635dd24..d7458ec 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -811,7 +811,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
 		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
-	if (gen == 70)
+	if (gen >= 70 && gen < 80)
 		kgem->batch_size = 16*1024;
 	if (!kgem->has_relaxed_delta)
 		kgem->batch_size = 4*1024;
commit 4cd9ec9d404d934268952a1058afa07741b09efe
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri May 4 18:26:46 2012 +0200

    uxa: fix 3DSTATE_PS to fill in number of samples for Haswell
    
    The sample mask value must match what is set for 3DSTATE_SAMPLE_MASK,
    through gen6_upload_invariant_states().
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
    Reviewed-by: Kenneth Graunke <kenneth at whitecape.org>

diff --git a/src/i965_reg.h b/src/i965_reg.h
index 45b6d08..4bb5e4d 100644
--- a/src/i965_reg.h
+++ b/src/i965_reg.h
@@ -221,6 +221,7 @@
 /* DW4 */
 # define GEN7_PS_MAX_THREADS_SHIFT_IVB                  24
 # define GEN7_PS_MAX_THREADS_SHIFT_HSW                  23
+# define GEN7_PS_SAMPLE_MASK_SHIFT_HSW			12
 # define GEN7_PS_PUSH_CONSTANT_ENABLE                   (1 << 11)
 # define GEN7_PS_ATTRIBUTE_ENABLE                       (1 << 10)
 # define GEN7_PS_OMASK_TO_RENDER_TARGET                 (1 << 9)
diff --git a/src/i965_render.c b/src/i965_render.c
index f7b21c8..42b1959 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2695,9 +2695,11 @@ gen7_composite_wm_state(intel_screen_private *intel,
 {
 	int num_surfaces = has_mask ? 3 : 2;
 	unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
+	unsigned int num_samples = 0;
 
 	if (IS_HSW(intel)) {
 		max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
+		num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
 	}
 
 	if (intel->gen6_render_state.kernel == bo)
@@ -2715,7 +2717,7 @@ gen7_composite_wm_state(intel_screen_private *intel,
 	OUT_BATCH((1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
 		  (num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
 	OUT_BATCH(0); /* scratch space base offset */
-	OUT_BATCH(((48 - 1) << max_threads_shift) |
+	OUT_BATCH(((48 - 1) << max_threads_shift) | num_samples |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
 		  GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH((6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
diff --git a/src/i965_video.c b/src/i965_video.c
index 58b6222..3276788 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -1641,9 +1641,11 @@ gen7_upload_wm_state(ScrnInfoPtr scrn, Bool is_packed)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
+	unsigned int num_samples = 0;
 
 	if (IS_HSW(intel)) {
 		max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
+		num_samples = 1 << GEN7_PS_SAMPLE_MASK_SHIFT_HSW;
 	}
 
 	/* disable WM constant buffer */
@@ -1678,7 +1680,7 @@ gen7_upload_wm_state(ScrnInfoPtr scrn, Bool is_packed)
 
 	OUT_BATCH(0); /* scratch space base offset */
 	OUT_BATCH(
-		((48 - 1) << max_threads_shift) |
+		((48 - 1) << max_threads_shift) | num_samples |
 		GEN7_PS_ATTRIBUTE_ENABLE |
 		GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH(
commit 412668464cf9505629eac20001701af3402dc6e8
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri May 4 17:55:10 2012 +0200

    uxa: set "Shader Channel Select" fields in surface state for Haswell
    
    For normal behaviour, each Shader Channel Select should be set to the
    value indicating that same channel. i.e. Shader Channel Select Red is
    set to SCS_RED, Shader Channel Select Green is set to SCS_GREEN, etc.
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
    Reviewed-by: Kenneth Graunke <kenneth at whitecape.org>

diff --git a/src/brw_defines.h b/src/brw_defines.h
index 0df2491..e580a8f 100644
--- a/src/brw_defines.h
+++ b/src/brw_defines.h
@@ -469,6 +469,13 @@
 #define BRW_BORDER_COLOR_MODE_DEFAULT	0
 #define BRW_BORDER_COLOR_MODE_LEGACY	1
 
+#define HSW_SCS_ZERO					0
+#define HSW_SCS_ONE						1
+#define HSW_SCS_RED						4
+#define HSW_SCS_GREEN					5
+#define HSW_SCS_BLUE					6
+#define HSW_SCS_ALPHA					7
+
 #define BRW_TEXCOORDMODE_WRAP            0
 #define BRW_TEXCOORDMODE_MIRROR          1
 #define BRW_TEXCOORDMODE_CLAMP           2
diff --git a/src/brw_structs.h b/src/brw_structs.h
index f4dc927..20c2f85 100644
--- a/src/brw_structs.h
+++ b/src/brw_structs.h
@@ -1659,7 +1659,11 @@ struct gen7_surface_state
 
 	struct {
 		unsigned int resource_min_lod:12;
-		unsigned int pad0:16;
+		unsigned int pad0:4;
+		unsigned int shader_chanel_select_a:3;
+		unsigned int shader_chanel_select_b:3;
+		unsigned int shader_chanel_select_g:3;
+		unsigned int shader_chanel_select_r:3;
 		unsigned int alpha_clear_color:1;
 		unsigned int blue_clear_color:1;
 		unsigned int green_clear_color:1;
diff --git a/src/i965_render.c b/src/i965_render.c
index 30fef57..f7b21c8 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -1392,6 +1392,13 @@ gen7_set_picture_surface_state(intel_screen_private *intel,
 	ss->ss2.width = pixmap->drawable.width - 1;
 	ss->ss3.pitch = intel_pixmap_pitch(pixmap) - 1;
 
+	if (IS_HSW(intel)) {
+		ss->ss7.shader_chanel_select_r = HSW_SCS_RED;
+		ss->ss7.shader_chanel_select_g = HSW_SCS_GREEN;
+		ss->ss7.shader_chanel_select_b = HSW_SCS_BLUE;
+		ss->ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
+	}
+
 	dri_bo_emit_reloc(intel->surface_bo,
 			  read_domains, write_domain,
 			  0,
diff --git a/src/i965_video.c b/src/i965_video.c
index bba282d..58b6222 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -510,6 +510,13 @@ static void gen7_create_dst_surface_state(ScrnInfoPtr scrn,
 
 	dest_surf_state.ss3.pitch = intel_pixmap_pitch(pixmap) - 1;
 
+	if (IS_HSW(intel)) {
+		dest_surf_state.ss7.shader_chanel_select_r = HSW_SCS_RED;
+		dest_surf_state.ss7.shader_chanel_select_g = HSW_SCS_GREEN;
+		dest_surf_state.ss7.shader_chanel_select_b = HSW_SCS_BLUE;
+		dest_surf_state.ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
+	}
+
 	dri_bo_subdata(surf_bo,
 		       offset, sizeof(dest_surf_state),
 		       &dest_surf_state);
@@ -525,6 +532,7 @@ static void gen7_create_src_surface_state(ScrnInfoPtr scrn,
 					drm_intel_bo *surface_bo,
 					uint32_t offset)
 {
+	intel_screen_private * const intel = intel_get_screen_private(scrn);
 	struct gen7_surface_state src_surf_state;
 
 	memset(&src_surf_state, 0, sizeof(src_surf_state));
@@ -547,6 +555,13 @@ static void gen7_create_src_surface_state(ScrnInfoPtr scrn,
 
 	src_surf_state.ss3.pitch = src_pitch - 1;
 
+	if (IS_HSW(intel)) {
+		src_surf_state.ss7.shader_chanel_select_r = HSW_SCS_RED;
+		src_surf_state.ss7.shader_chanel_select_g = HSW_SCS_GREEN;
+		src_surf_state.ss7.shader_chanel_select_b = HSW_SCS_BLUE;
+		src_surf_state.ss7.shader_chanel_select_a = HSW_SCS_ALPHA;
+	}
+
 	dri_bo_subdata(surface_bo,
 		       offset, sizeof(src_surf_state),
 		       &src_surf_state);
commit a47ba68996f117fabcb601d35bcc5f99cbcd6122
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri May 4 17:17:22 2012 +0200

    uxa: fix max PS threads shift value for Haswell
    
    The maximum number of threads is now a 9-bit value. Thus, one more bit
    towards LSB was re-used. i.e. bit position is now 23 instead of 24 on
    Ivy Bridge.
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
    Reviewed-by: Kenneth Graunke <kenneth at whitecape.org>

diff --git a/src/i965_reg.h b/src/i965_reg.h
index e7b0d15..45b6d08 100644
--- a/src/i965_reg.h
+++ b/src/i965_reg.h
@@ -219,7 +219,8 @@
 # define GEN7_PS_FLOATING_POINT_MODE_ALT                (1 << 16)
 /* DW3: scratch space */
 /* DW4 */
-# define GEN7_PS_MAX_THREADS_SHIFT                      24
+# define GEN7_PS_MAX_THREADS_SHIFT_IVB                  24
+# define GEN7_PS_MAX_THREADS_SHIFT_HSW                  23
 # define GEN7_PS_PUSH_CONSTANT_ENABLE                   (1 << 11)
 # define GEN7_PS_ATTRIBUTE_ENABLE                       (1 << 10)
 # define GEN7_PS_OMASK_TO_RENDER_TARGET                 (1 << 9)
diff --git a/src/i965_render.c b/src/i965_render.c
index 2182df8..30fef57 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2687,6 +2687,11 @@ gen7_composite_wm_state(intel_screen_private *intel,
 			drm_intel_bo *bo)
 {
 	int num_surfaces = has_mask ? 3 : 2;
+	unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
+
+	if (IS_HSW(intel)) {
+		max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
+	}
 
 	if (intel->gen6_render_state.kernel == bo)
 		return;
@@ -2703,7 +2708,7 @@ gen7_composite_wm_state(intel_screen_private *intel,
 	OUT_BATCH((1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
 		  (num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
 	OUT_BATCH(0); /* scratch space base offset */
-	OUT_BATCH(((48 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
+	OUT_BATCH(((48 - 1) << max_threads_shift) |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
 		  GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH((6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
diff --git a/src/i965_video.c b/src/i965_video.c
index bcd6063..bba282d 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -1625,6 +1625,11 @@ static void
 gen7_upload_wm_state(ScrnInfoPtr scrn, Bool is_packed)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
+	unsigned int max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_IVB;
+
+	if (IS_HSW(intel)) {
+		max_threads_shift = GEN7_PS_MAX_THREADS_SHIFT_HSW;
+	}
 
 	/* disable WM constant buffer */
 	OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (7 - 2));
@@ -1658,7 +1663,7 @@ gen7_upload_wm_state(ScrnInfoPtr scrn, Bool is_packed)
 
 	OUT_BATCH(0); /* scratch space base offset */
 	OUT_BATCH(
-		((48 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
+		((48 - 1) << max_threads_shift) |
 		GEN7_PS_ATTRIBUTE_ENABLE |
 		GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH(
commit ce4421e175ceb9259208c7c223af8d66282c3db3
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri May 4 17:09:19 2012 +0200

    uxa: use at least 64 URB entries for Haswell
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
    Reviewed-by: Kenneth Graunke <kenneth at whitecape.org>

diff --git a/src/i965_3d.c b/src/i965_3d.c
index d4d38e5..a18db12 100644
--- a/src/i965_3d.c
+++ b/src/i965_3d.c
@@ -104,12 +104,17 @@ gen6_upload_urb(intel_screen_private *intel)
 void
 gen7_upload_urb(intel_screen_private *intel)
 {
+	unsigned int num_urb_entries = 32;
+
+	if (IS_HSW(intel))
+		num_urb_entries = 64;
+
 	OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
 	OUT_BATCH(8); /* in 1KBs */
 
 	OUT_BATCH(GEN7_3DSTATE_URB_VS | (2 - 2));
 	OUT_BATCH(
-		(32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
+		(num_urb_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
 		(2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
 		(1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
 
commit 8c880aa34c522b0d67cbb932771f00c947d00dec
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri May 4 17:43:19 2012 +0200

    uxa: add IS_HSW() macro to distinguish Haswell from Ivybridge
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>

diff --git a/src/intel_driver.h b/src/intel_driver.h
index 31c11f6..d88f225 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -218,6 +218,7 @@
 #define IS_GEN5(intel) IS_GENx(intel, 5)
 #define IS_GEN6(intel) IS_GENx(intel, 6)
 #define IS_GEN7(intel) IS_GENx(intel, 7)
+#define IS_HSW(intel) (INTEL_INFO(intel)->gen == 75)
 
 /* Some chips have specific errata (or limits) that we need to workaround. */
 #define IS_I830(intel) (DEVICE_ID((intel)->PciInfo) == PCI_CHIP_I830_M)
commit 0c0d1d956a8ba37d9e6f4a5e4f52018c8ce498e5
Author: Gwenole Beauchesne <gwenole.beauchesne at intel.com>
Date:   Fri Aug 3 12:03:00 2012 +0100

    Introduce a chipset identifier for Haswell (Ivybridge successor)
    
    Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne at intel.com>

diff --git a/src/intel_module.c b/src/intel_module.c
index e1755ff..7640916 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -98,6 +98,10 @@ static const struct intel_device_info intel_ivybridge_info = {
 	.gen = 70,
 };
 
+static const struct intel_device_info intel_haswell_info = {
+	.gen = 75,
+};
+
 static const SymTabRec _intel_chipsets[] = {
 	{PCI_CHIP_I810,				"i810"},
 	{PCI_CHIP_I810_DC100,			"i810-dc100"},
commit 146959dd5ef28384a3db4fce4bf7840f2b3ec58c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 23:43:15 2012 +0100

    sna: Drop the clear flag as we discard the GPU damage
    
    Hopefully only to keep the sanity checks happy...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 06c9fcc..496f57b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -388,6 +388,7 @@ sna_copy_init_blt(struct sna_copy_op *copy,
 static void sna_pixmap_free_gpu(struct sna *sna, struct sna_pixmap *priv)
 {
 	sna_damage_destroy(&priv->gpu_damage);
+	priv->clear = false;
 
 	if (priv->gpu_bo && !priv->pinned) {
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
commit 7404e3085b2ee36fa24f77a02d156b4b1d2dff60
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 23:37:35 2012 +0100

    sna: Ensure we only mark a clear for a fill on the GPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8760f91..06c9fcc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9035,7 +9035,6 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 						sna_damage_all(damage,
 							       pixmap->drawable.width,
 							       pixmap->drawable.height);
-						sna_pixmap(pixmap)->undamaged = false;
 					} else
 						sna_damage_add_box(damage, &r);
 				}
@@ -9045,12 +9044,19 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 				    r.x2 - r.x1 == pixmap->drawable.width &&
 				    r.y2 - r.y1 == pixmap->drawable.height) {
 					struct sna_pixmap *priv = sna_pixmap(pixmap);
+					if (bo == priv->gpu_bo) {
+						sna_damage_all(&priv->gpu_damage,
+							       pixmap->drawable.width,
+							       pixmap->drawable.height);
+						sna_damage_destroy(&priv->cpu_damage);
+						list_del(&priv->list);
+						priv->undamaged = false;
+						priv->clear = true;
+						priv->clear_color = gc->alu == GXcopy ? pixel : 0;
 
-					priv->clear = true;
-					priv->clear_color = gc->alu == GXcopy ? pixel : 0;
-
-					DBG(("%s: pixmap=%ld, marking clear [%08x]\n",
-					     __FUNCTION__, pixmap->drawable.serialNumber, priv->clear_color));
+						DBG(("%s: pixmap=%ld, marking clear [%08x]\n",
+						     __FUNCTION__, pixmap->drawable.serialNumber, priv->clear_color));
+					}
 				}
 			} else
 				success = false;
commit ca46d1c7a18596ea9fe2b0577ccf1d110e3e42ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 20:20:29 2012 +0100

    sna/gen7: Prefer the BLT for self-copies
    
    Looking at the test results for a third time, gives the edge to the BLT
    again.

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 96eb86a..193de00 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2470,6 +2470,13 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return true;
+
+		if (dst->pDrawable == src->pDrawable)
+			return true;
+
+		if (src->pDrawable &&
+		    get_drawable_pixmap(dst->pDrawable) == get_drawable_pixmap(src->pDrawable))
+			return true;
 	}
 
 	return false;
@@ -3311,7 +3318,8 @@ static inline bool prefer_blt_copy(struct sna *sna,
 }
 
 static inline bool
-overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+overlaps(struct sna *sna,
+	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 	 const BoxRec *box, int n)
 {
@@ -3320,6 +3328,9 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	if (src_bo != dst_bo)
 		return false;
 
+	if (can_switch_rings(sna))
+		return true;
+
 	extents = box[0];
 	while (--n) {
 		box++;
@@ -3352,7 +3363,8 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
-	     overlaps(src_bo, src_dx, src_dy,
+	     overlaps(sna,
+		      src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
@@ -3391,7 +3403,8 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	if (!(alu == GXcopy || alu == GXclear) ||
-	    overlaps(src_bo, src_dx, src_dy,
+	    overlaps(sna,
+		     src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
 		     box, n)) {
 fallback_blt:
commit e4a3cd3d16447b5d83d1c8c63c342f1240935267
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 17:37:33 2012 +0100

    sna: Add validation of the clear flag to pixmap debugging
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a4287f7..8760f91 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -314,6 +314,11 @@ static void assert_pixmap_damage(PixmapPtr p)
 	if (priv == NULL)
 		return;
 
+	if (priv->clear) {
+		assert(DAMAGE_IS_ALL(priv->gpu_damage));
+		assert(priv->cpu_damage == NULL);
+	}
+
 	if (DAMAGE_IS_ALL(priv->gpu_damage) && DAMAGE_IS_ALL(priv->cpu_damage)) {
 		/* special upload buffer */
 		assert(priv->gpu_bo && priv->gpu_bo->proxy);
commit eaeda34bef711cc566f51dee092a19a3c4ac1a16
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 16:23:05 2012 +0100

    sna: Fix computation of st values for SIMD8 dispatch
    
    Fixes regression with enabling 8-pixels.
    
    Reported-by: Mehran Kholdi <semekh.dev at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=53044
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/brw/brw_wm.c b/src/sna/brw/brw_wm.c
index bd4003d..f54e55e 100644
--- a/src/sna/brw/brw_wm.c
+++ b/src/sna/brw/brw_wm.c
@@ -42,12 +42,11 @@ static void brw_wm_affine_st(struct brw_compile *p, int dw,
 	if (dw == 16) {
 		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
 		uv = p->gen >= 60 ? 6 : 3;
-		uv += 2*channel;
 	} else {
 		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
 		uv = p->gen >= 60 ? 4 : 3;
-		uv += channel;
 	}
+	uv += 2*channel;
 
 	msg++;
 	if (p->gen >= 60) {
@@ -462,9 +461,6 @@ brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch)
 {
 	int src, mask;
 
-	if (dispatch == 8)
-		return false; /* XXX sampler alpha retuns all 0 */
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
 
@@ -495,9 +491,6 @@ brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch)
 {
 	int src, mask;
 
-	if (dispatch == 8)
-		return false; /* XXX sampler alpha retuns all 0 */
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
 
@@ -518,12 +511,11 @@ static void brw_wm_projective_st(struct brw_compile *p, int dw,
 	if (dw == 16) {
 		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
 		uv = p->gen >= 60 ? 6 : 3;
-		uv += 2*channel;
 	} else {
 		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
 		uv = p->gen >= 60 ? 4 : 3;
-		uv += channel;
 	}
+	uv += 2*channel;
 
 	msg++;
 	if (p->gen >= 60) {
@@ -614,9 +606,6 @@ brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch)
 {
 	int src, mask;
 
-	if (dispatch == 8)
-		return false; /* XXX sampler alpha retuns all 0 */
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
 
@@ -647,9 +636,6 @@ brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch)
 {
 	int src, mask;
 
-	if (dispatch == 8)
-		return false; /* XXX sampler alpha retuns all 0 */
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
 
commit 55231eca818c82620c0146384b19b5d5659f6cd6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 16:22:23 2012 +0100

    sna/gen6: Install a fallback 16-pixel shader
    
    In case the DBG options leave no shader compiled, make sure we always
    supply one.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index db7599d..19b8a2d 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -4193,7 +4193,11 @@ static bool gen6_render_setup(struct sna *sna)
 								     wm_kernels[m].data, 32);
 			}
 		}
-		assert(state->wm_kernel[m][0]|state->wm_kernel[m][1]|state->wm_kernel[m][2]);
+		if ((state->wm_kernel[m][0]|state->wm_kernel[m][1]|state->wm_kernel[m][2]) == 0) {
+			state->wm_kernel[m][1] =
+				sna_static_stream_compile_wm(sna, &general,
+							     wm_kernels[m].data, 16);
+		}
 	}
 
 	ss = sna_static_stream_map(&general,
commit 4e79c1fef064ce68914eb644edd7f588be3d7300
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 15:58:43 2012 +0100

    Revert "sna/gen7: Prefer the BLT for self-copies"
    
    This reverts commit 89e75dbcb6749bde7587ecc08abed276c255e7f9.
    
    Having removed the forced stall for a RENDER self-copy there is no
    longer a need to encourage ring switching.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 193de00..96eb86a 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2470,13 +2470,6 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return true;
-
-		if (dst->pDrawable == src->pDrawable)
-			return true;
-
-		if (src->pDrawable &&
-		    get_drawable_pixmap(dst->pDrawable) == get_drawable_pixmap(src->pDrawable))
-			return true;
 	}
 
 	return false;
@@ -3318,8 +3311,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 }
 
 static inline bool
-overlaps(struct sna *sna,
-	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 	 const BoxRec *box, int n)
 {
@@ -3328,9 +3320,6 @@ overlaps(struct sna *sna,
 	if (src_bo != dst_bo)
 		return false;
 
-	if (can_switch_rings(sna))
-		return true;
-
 	extents = box[0];
 	while (--n) {
 		box++;
@@ -3363,8 +3352,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
-	     overlaps(sna,
-		      src_bo, src_dx, src_dy,
+	     overlaps(src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
@@ -3403,8 +3391,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	if (!(alu == GXcopy || alu == GXclear) ||
-	    overlaps(sna,
-		     src_bo, src_dx, src_dy,
+	    overlaps(src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
 		     box, n)) {
 fallback_blt:
commit 85cef78a40c6a7a0254f8fba685f224eac6038e1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 13:39:36 2012 +0100

    sna/gen7: Simplify the force-stall detection
    
    After reducing the number of conditions where we think we need to force
    the stall on the results, we can then simplify the code to detect
    that remaining case.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5f11741..193de00 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1030,13 +1030,12 @@ gen7_emit_state(struct sna *sna,
 	gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
 	gen7_emit_vertex_elements(sna, op);
 
-	need_stall = false;
-	if (wm_binding_table & 1)
-		need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
-	need_stall |= gen7_emit_binding_table(sna, wm_binding_table & ~1);
+	need_stall = gen7_emit_binding_table(sna, wm_binding_table);
 	need_stall &= gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
+		if (op->dst.bo == op->src.bo || op->dst.bo == op->mask.bo)
+			need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
 		gen7_emit_pipe_invalidate(sna, need_stall);
 		kgem_clear_dirty(&sna->kgem);
 		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
@@ -1779,12 +1778,8 @@ static void gen7_emit_composite_state(struct sna *sna,
 {
 	uint32_t *binding_table;
 	uint16_t offset;
-	bool dirty;
 
 	gen7_get_batch(sna);
-	dirty = false;
-	if (op->dst.bo == op->src.bo || op->dst.bo == op->mask.bo)
-		dirty = kgem_bo_is_dirty(op->dst.bo);
 
 	binding_table = gen7_composite_get_binding_table(sna, &offset);
 
@@ -1816,7 +1811,7 @@ static void gen7_emit_composite_state(struct sna *sna,
 		offset = sna->render_state.gen7.surface_table;
 	}
 
-	gen7_emit_state(sna, op, offset | dirty);
+	gen7_emit_state(sna, op, offset);
 }
 
 static void
commit 9391a2c71f020541a2a62ae68eadd486216a38df
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 12:50:32 2012 +0100

    sna/gen7: Only force a stall for a dirty target if also used as a blend source
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c1993df..5f11741 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1782,7 +1782,9 @@ static void gen7_emit_composite_state(struct sna *sna,
 	bool dirty;
 
 	gen7_get_batch(sna);
-	dirty = kgem_bo_is_dirty(op->dst.bo);
+	dirty = false;
+	if (op->dst.bo == op->src.bo || op->dst.bo == op->mask.bo)
+		dirty = kgem_bo_is_dirty(op->dst.bo);
 
 	binding_table = gen7_composite_get_binding_table(sna, &offset);
 
@@ -3676,7 +3678,6 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 {
 	uint32_t *binding_table;
 	uint16_t offset;
-	bool dirty;
 
 	/* XXX Render Target Fast Clear
 	 * Set RTFC Enable in PS and render a rectangle.
@@ -3685,7 +3686,6 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 	 */
 
 	gen7_get_batch(sna);
-	dirty = kgem_bo_is_dirty(op->dst.bo);
 
 	binding_table = gen7_composite_get_binding_table(sna, &offset);
 
@@ -3707,7 +3707,7 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		offset = sna->render_state.gen7.surface_table;
 	}
 
-	gen7_emit_state(sna, op, offset | dirty);
+	gen7_emit_state(sna, op, offset);
 }
 
 static inline bool prefer_blt_fill(struct sna *sna,
commit 0a4bb8663b9fa9b39d13bfb49aea30f2aaecce78
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 11:19:27 2012 +0100

    sna/gen4: Flush not required between fill vertices, only nomaskcomposite
    
    A small breakthrough... Still need to flush the primitive between state
    changes though.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 35dacd0..d89ba5f 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -61,17 +61,13 @@
 
 #if FLUSH_EVERY_VERTEX
 #define FLUSH(OP) do { \
-	gen4_vertex_flush(sna); \
-	gen4_magic_ca_pass(sna, OP); \
-	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
-} while (0)
-#define FLUSH_NOCA() do { \
-	gen4_vertex_flush(sna); \
-	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
+	if ((OP)->mask.bo == NULL) { \
+		gen4_vertex_flush(sna); \
+		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
+	} \
 } while (0)
 #else
 #define FLUSH(OP)
-#define FLUSH_NOCA()
 #endif
 
 #define GEN4_GRF_BLOCKS(nreg)    ((nreg + 15) / 16 - 1)
@@ -1373,6 +1369,9 @@ gen4_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
 		uint16_t wm_binding_table)
 {
+	if (FLUSH_EVERY_VERTEX)
+		OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH);
+
 	gen4_emit_drawing_rectangle(sna, op);
 	gen4_emit_binding_table(sna, wm_binding_table);
 	gen4_emit_pipelined_pointers(sna, op, op->op, op->u.gen4.wm_kernel);
@@ -1441,15 +1440,6 @@ gen4_render_composite_blt(struct sna *sna,
 	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
 	     r->width, r->height));
 
-	if (FLUSH_EVERY_VERTEX && op->need_magic_ca_pass) {
-		/* We have to reset the state after every FLUSH */
-		if (kgem_check_batch(&sna->kgem, 20)) {
-			gen4_emit_pipelined_pointers(sna, op, op->op,
-						     op->u.gen4.wm_kernel);
-		} else
-			gen4_bind_surfaces(sna, op);
-	}
-
 	gen4_get_rectangles(sna, op, 1, gen4_bind_surfaces);
 	op->prim_emit(sna, op, r);
 
@@ -1692,8 +1682,6 @@ gen4_render_video(struct sna *sna,
 		OUT_VERTEX_F((box->x1 - dxo) * src_scale_x);
 		OUT_VERTEX_F((box->y1 - dyo) * src_scale_y);
 
-		FLUSH(&tmp);
-
 		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 			sna_damage_add_box(&priv->gpu_damage, &r);
 			sna_damage_subtract_box(&priv->cpu_damage, &r);
@@ -2558,7 +2546,6 @@ gen4_render_composite_spans_box(struct sna *sna,
 
 	gen4_get_rectangles(sna, &op->base, 1, gen4_bind_surfaces);
 	op->prim_emit(sna, op, box, opacity);
-	FLUSH_NOCA();
 }
 
 static void
@@ -2761,8 +2748,6 @@ gen4_render_copy_one(struct sna *sna,
 	OUT_VERTEX(dx, dy);
 	OUT_VERTEX_F(sx*op->src.scale[0]);
 	OUT_VERTEX_F(sy*op->src.scale[1]);
-
-	FLUSH(op);
 }
 
 static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
@@ -3088,8 +3073,6 @@ gen4_render_fill_rectangle(struct sna *sna,
 	OUT_VERTEX(x, y);
 	OUT_VERTEX_F(0);
 	OUT_VERTEX_F(0);
-
-	FLUSH(op);
 }
 
 static bool
commit 33c028f8be829caa4fdb9416ff177dc71f24b68e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 01:17:50 2012 +0100

    sna/gen6+: Reduce floats-per-vertex for spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d8beed9..35dacd0 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -279,6 +279,7 @@ static int gen4_vertex_finish(struct sna *sna)
 	unsigned int i;
 
 	assert(sna->render.vertex_used);
+	assert(sna->render.nvertex_reloc);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -286,21 +287,18 @@ static int gen4_vertex_finish(struct sna *sna)
 	if (bo) {
 		gen4_vertex_flush(sna);
 
-		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-			if (sna->render.vertex_reloc[i]) {
-				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-				     i, sna->render.vertex_reloc[i]));
-
-				sna->kgem.batch[sna->render.vertex_reloc[i]] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i],
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       0);
-				sna->render.vertex_reloc[i] = 0;
-			}
+		for (i = 0; i < sna->render.nvertex_reloc; i++) {
+			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+			     i, sna->render.vertex_reloc[i]));
+
+			sna->kgem.batch[sna->render.vertex_reloc[i]] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i], bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       0);
 		}
 
+		sna->render.nvertex_reloc = 0;
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		sna->render_state.gen4.vb_id = 0;
@@ -335,13 +333,12 @@ static void gen4_vertex_close(struct sna *sna)
 	unsigned int i, delta = 0;
 
 	assert(sna->render_state.gen4.vertex_offset == 0);
+	if (!sna->render_state.gen4.vb_id)
+		return;
 
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used)
-		return;
-
 	bo = sna->render.vbo;
 	if (bo) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
@@ -386,20 +383,18 @@ static void gen4_vertex_close(struct sna *sna)
 		}
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-		if (sna->render.vertex_reloc[i]) {
-			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-			     i, sna->render.vertex_reloc[i]));
+	assert(sna->render.nvertex_reloc);
+	for (i = 0; i < sna->render.nvertex_reloc; i++) {
+		DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+		     i, sna->render.vertex_reloc[i]));
 
-			sna->kgem.batch[sna->render.vertex_reloc[i]] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i],
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta);
-			sna->render.vertex_reloc[i] = 0;
-		}
+		sna->kgem.batch[sna->render.vertex_reloc[i]] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i], bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta);
 	}
+	sna->render.nvertex_reloc = 0;
 
 	if (sna->render.vbo == NULL) {
 		sna->render.vertex_used = 0;
@@ -990,7 +985,7 @@ static void gen4_emit_vertex_buffer(struct sna *sna,
 	OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH((id << VB0_BUFFER_INDEX_SHIFT) | VB0_VERTEXDATA |
 		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
-	sna->render.vertex_reloc[id] = sna->kgem.nbatch;
+	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 1e0ee10..90b0bdd 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -246,6 +246,7 @@ static int gen5_vertex_finish(struct sna *sna)
 	unsigned int i;
 
 	assert(sna->render.vertex_used);
+	assert(sna->render.nvertex_reloc);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -254,27 +255,23 @@ static int gen5_vertex_finish(struct sna *sna)
 		if (sna->render_state.gen5.vertex_offset)
 			gen5_vertex_flush(sna);
 
-		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-			if (sna->render.vertex_reloc[i]) {
-				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-				     i, sna->render.vertex_reloc[i]));
-
-				sna->kgem.batch[sna->render.vertex_reloc[i]] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i],
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       0);
-				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i]+1,
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       sna->render.vertex_used * 4 - 1);
-				sna->render.vertex_reloc[i] = 0;
-			}
+		for (i = 0; i < sna->render.nvertex_reloc; i++) {
+			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+			     i, sna->render.vertex_reloc[i]));
+
+			sna->kgem.batch[sna->render.vertex_reloc[i]] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i], bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       0);
+			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i]+1, bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       sna->render.vertex_used * 4 - 1);
 		}
 
+		sna->render.nvertex_reloc = 0;
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		sna->render_state.gen5.vb_id = 0;
@@ -309,13 +306,12 @@ static void gen5_vertex_close(struct sna *sna)
 	unsigned int i, delta = 0;
 
 	assert(sna->render_state.gen5.vertex_offset == 0);
+	if (!sna->render_state.gen5.vb_id)
+		return;
 
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used)
-		return;
-
 	bo = sna->render.vbo;
 	if (bo) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
@@ -360,26 +356,23 @@ static void gen5_vertex_close(struct sna *sna)
 		}
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-		if (sna->render.vertex_reloc[i]) {
-			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-			     i, sna->render.vertex_reloc[i]));
+	assert(sna->render.nvertex_reloc);
+	for (i = 0; i < sna->render.nvertex_reloc; i++) {
+		DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+		     i, sna->render.vertex_reloc[i]));
 
-			sna->kgem.batch[sna->render.vertex_reloc[i]] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i],
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta);
-			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i]+1,
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta + sna->render.vertex_used * 4 - 1);
-			sna->render.vertex_reloc[i] = 0;
-		}
+		sna->kgem.batch[sna->render.vertex_reloc[i]] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i], bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta);
+		sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i]+1, bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta + sna->render.vertex_used * 4 - 1);
 	}
+	sna->render.nvertex_reloc = 0;
 
 	if (sna->render.vbo == NULL) {
 		sna->render.vertex_used = 0;
@@ -977,7 +970,7 @@ static void gen5_emit_vertex_buffer(struct sna *sna,
 	OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH((id << VB0_BUFFER_INDEX_SHIFT) | VB0_VERTEXDATA |
 		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
-	sna->render.vertex_reloc[id] = sna->kgem.nbatch;
+	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 3dc0729..db7599d 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -104,35 +104,34 @@ static const uint32_t ps_kernel_planar[][4] = {
 #include "exa_wm_write.g6b"
 };
 
-#define NOKERNEL(kernel_enum, func, ns, ni) \
-    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, func, 0, ns, ni}
-#define KERNEL(kernel_enum, kernel, ns, ni) \
-    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), ns, ni}
+#define NOKERNEL(kernel_enum, func, ns) \
+    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, func, 0, ns}
+#define KERNEL(kernel_enum, kernel, ns) \
+    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), ns}
 
 static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
 	unsigned int num_surfaces;
-	unsigned int num_inputs;
 } wm_kernels[] = {
-	NOKERNEL(NOMASK, brw_wm_kernel__affine, 2, 1),
-	NOKERNEL(NOMASK_P, brw_wm_kernel__projective, 2, 1),
+	NOKERNEL(NOMASK, brw_wm_kernel__affine, 2),
+	NOKERNEL(NOMASK_P, brw_wm_kernel__projective, 2),
 
-	NOKERNEL(MASK, brw_wm_kernel__affine_mask, 3, 2),
-	NOKERNEL(MASK_P, brw_wm_kernel__projective_mask, 3, 2),
+	NOKERNEL(MASK, brw_wm_kernel__affine_mask, 3),
+	NOKERNEL(MASK_P, brw_wm_kernel__projective_mask, 3),
 
-	NOKERNEL(MASKCA, brw_wm_kernel__affine_mask_ca, 3, 2),
-	NOKERNEL(MASKCA_P, brw_wm_kernel__projective_mask_ca, 3, 2),
+	NOKERNEL(MASKCA, brw_wm_kernel__affine_mask_ca, 3),
+	NOKERNEL(MASKCA_P, brw_wm_kernel__projective_mask_ca, 3),
 
-	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3, 2),
-	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3, 2),
+	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3),
+	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3),
 
-	NOKERNEL(OPACITY, brw_wm_kernel__affine_opacity, 2, 2),
-	NOKERNEL(OPACITY_P, brw_wm_kernel__projective_opacity, 2, 2),
+	NOKERNEL(OPACITY, brw_wm_kernel__affine_opacity, 2),
+	NOKERNEL(OPACITY_P, brw_wm_kernel__projective_opacity, 2),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7, 1),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2, 1),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2),
 };
 #undef KERNEL
 
@@ -176,7 +175,7 @@ static const struct blendinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me) \
 	(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me) + 2) * 2 * sizeof(struct gen6_sampler_state))
 
-#define VERTEX_2s2s 4
+#define VERTEX_2s2s 0
 
 #define COPY_SAMPLER 0
 #define COPY_VERTEX VERTEX_2s2s
@@ -621,7 +620,7 @@ gen6_emit_sf(struct sna *sna, bool has_mask)
 }
 
 static void
-gen6_emit_wm(struct sna *sna, unsigned int kernel)
+gen6_emit_wm(struct sna *sna, unsigned int kernel, bool has_mask)
 {
 	const uint32_t *kernels;
 
@@ -649,7 +648,7 @@ gen6_emit_wm(struct sna *sna, unsigned int kernel)
 		  (kernels[1] ? GEN6_3DSTATE_WM_16_DISPATCH_ENABLE : 0) |
 		  (kernels[2] ? GEN6_3DSTATE_WM_32_DISPATCH_ENABLE : 0) |
 		  GEN6_3DSTATE_WM_DISPATCH_ENABLE);
-	OUT_BATCH(wm_kernels[kernel].num_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
+	OUT_BATCH((1 + has_mask) << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
 		  GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
 	OUT_BATCH(kernels[2]);
 	OUT_BATCH(kernels[1]);
@@ -735,17 +734,17 @@ gen6_emit_vertex_elements(struct sna *sna,
 	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen6_render_state *render = &sna->render_state.gen6;
-	int nelem, selem;
-	uint32_t w_component;
-	uint32_t src_format;
+	uint32_t src_format, dw, offset;
 	int id = GEN6_VERTEX(op->u.gen6.flags);
+	bool has_mask;
+
+	DBG(("%s: setup id=%d\n", __FUNCTION__, id));
 
 	if (render->ve_id == id)
 		return;
 	render->ve_id = id;
 
-	switch (id) {
-	case VERTEX_2s2s:
+	if (id == VERTEX_2s2s) {
 		DBG(("%s: setup COPY\n", __FUNCTION__));
 
 		OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS |
@@ -762,7 +761,7 @@ gen6_emit_vertex_elements(struct sna *sna,
 		/* x,y */
 		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 			  GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
-			  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
+			  0 << VE0_OFFSET_SHIFT);
 		OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
 			  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
 			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
@@ -771,7 +770,7 @@ gen6_emit_vertex_elements(struct sna *sna,
 		/* u0, v0, w0 */
 		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 			  GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
-			  4 << VE0_OFFSET_SHIFT);	/* offset vb in bytes */
+			  4 << VE0_OFFSET_SHIFT);
 		OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
 			  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
 			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
@@ -779,17 +778,6 @@ gen6_emit_vertex_elements(struct sna *sna,
 		return;
 	}
 
-	nelem = op->mask.bo ? 2 : 1;
-	if (op->is_affine) {
-		src_format = GEN6_SURFACEFORMAT_R32G32_FLOAT;
-		w_component = GEN6_VFCOMPONENT_STORE_1_FLT;
-		selem = 2;
-	} else {
-		src_format = GEN6_SURFACEFORMAT_R32G32B32_FLOAT;
-		w_component = GEN6_VFCOMPONENT_STORE_SRC;
-		selem = 3;
-	}
-
 	/* The VUE layout
 	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
 	 *    dword 4-7: position (x, y, 1.0, 1.0),
@@ -798,8 +786,9 @@ gen6_emit_vertex_elements(struct sna *sna,
 	 *
 	 * dword 4-15 are fetched from vertex buffer
 	 */
+	has_mask = (id >> 2) != 0;
 	OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS |
-		((2 * (2 + nelem)) + 1 - 2));
+		((2 * (3 + has_mask)) + 1 - 2));
 
 	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 		  GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
@@ -812,30 +801,74 @@ gen6_emit_vertex_elements(struct sna *sna,
 	/* x,y */
 	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 		  GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
-		  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
+		  0 << VE0_OFFSET_SHIFT);
 	OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
 		  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
-		  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT |
+		  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
 		  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+	offset = 4;
 
 	/* u0, v0, w0 */
+	DBG(("%s: first channel %d floats, offset=%d\n", __FUNCTION__, id & 3, offset));
+	dw = GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
+	switch (id & 3) {
+	case 1:
+		src_format = GEN6_SURFACEFORMAT_R32_FLOAT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	default:
+		assert(0);
+	case 2:
+		src_format = GEN6_SURFACEFORMAT_R32G32_FLOAT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	case 3:
+		src_format = GEN6_SURFACEFORMAT_R32G32B32_FLOAT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	}
 	OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 		  src_format << VE0_FORMAT_SHIFT |
-		  4 << VE0_OFFSET_SHIFT);	/* offset vb in bytes */
-	OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
-		  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
-		  w_component << VE1_VFCOMPONENT_2_SHIFT |
-		  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+		  offset << VE0_OFFSET_SHIFT);
+	OUT_BATCH(dw);
+	offset += (id & 3) * sizeof(float);
 
 	/* u1, v1, w1 */
-	if (op->mask.bo) {
+	if (has_mask) {
+		DBG(("%s: second channel %d floats, offset=%d\n", __FUNCTION__, (id >> 2) & 3, offset));
+		dw = GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT;
+		switch ((id >> 2) & 3) {
+		case 1:
+			src_format = GEN6_SURFACEFORMAT_R32_FLOAT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		default:
+			assert(0);
+		case 2:
+			src_format = GEN6_SURFACEFORMAT_R32G32_FLOAT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		case 3:
+			src_format = GEN6_SURFACEFORMAT_R32G32B32_FLOAT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		}
 		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
 			  src_format << VE0_FORMAT_SHIFT |
-			  ((1 + selem) * 4) << VE0_OFFSET_SHIFT); /* vb offset in bytes */
-		OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
-			  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
-			  w_component << VE1_VFCOMPONENT_2_SHIFT |
-			  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+			  offset << VE0_OFFSET_SHIFT);
+		OUT_BATCH(dw);
 	}
 }
 
@@ -860,8 +893,8 @@ gen6_emit_state(struct sna *sna,
 	if (gen6_emit_cc(sna, GEN6_BLEND(op->u.gen6.flags)))
 		need_stall = false;
 	gen6_emit_sampler(sna, GEN6_SAMPLER(op->u.gen6.flags));
-	gen6_emit_sf(sna, op->mask.bo != NULL);
-	gen6_emit_wm(sna, GEN6_KERNEL(op->u.gen6.flags));
+	gen6_emit_sf(sna, GEN6_VERTEX(op->u.gen6.flags) >> 2);
+	gen6_emit_wm(sna, GEN6_KERNEL(op->u.gen6.flags), GEN6_VERTEX(op->u.gen6.flags) >> 2);
 	gen6_emit_vertex_elements(sna, op);
 
 	need_stall |= gen6_emit_binding_table(sna, wm_binding_table & ~1);
@@ -900,7 +933,8 @@ static void gen6_magic_ca_pass(struct sna *sna,
 	gen6_emit_wm(sna,
 		     gen6_choose_composite_kernel(PictOpAdd,
 						  true, true,
-						  op->is_affine));
+						  op->is_affine),
+		     true);
 
 	OUT_BATCH(GEN6_3DPRIMITIVE |
 		  GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -936,6 +970,7 @@ static int gen6_vertex_finish(struct sna *sna)
 	DBG(("%s: used=%d / %d\n", __FUNCTION__,
 	     sna->render.vertex_used, sna->render.vertex_size));
 	assert(sna->render.vertex_used);
+	assert(sna->render.nvertex_reloc);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -944,27 +979,23 @@ static int gen6_vertex_finish(struct sna *sna)
 		if (sna->render_state.gen6.vertex_offset)
 			gen6_vertex_flush(sna);
 
-		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-			if (sna->render.vertex_reloc[i]) {
-				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-				     i, sna->render.vertex_reloc[i]));
-
-				sna->kgem.batch[sna->render.vertex_reloc[i]] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i],
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       0);
-				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i]+1,
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       sna->render.vertex_used * 4 - 1);
-				sna->render.vertex_reloc[i] = 0;
-			}
+		for (i = 0; i < sna->render.nvertex_reloc; i++) {
+			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+			     i, sna->render.vertex_reloc[i]));
+
+			sna->kgem.batch[sna->render.vertex_reloc[i]] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i], bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       0);
+			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i]+1, bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       sna->render.vertex_used * 4 - 1);
 		}
 
+		sna->render.nvertex_reloc = 0;
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		sna->render_state.gen6.vb_id = 0;
@@ -984,6 +1015,8 @@ static int gen6_vertex_finish(struct sna *sna)
 		return 0;
 	}
 
+	DBG(("%s: create vbo handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
+
 	kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertex_used) {
 		DBG(("%s: copying initial buffer x %d to handle=%d\n",
@@ -1005,16 +1038,16 @@ static void gen6_vertex_close(struct sna *sna)
 
 	assert(sna->render_state.gen6.vertex_offset == 0);
 
-	DBG(("%s: used=%d, vbo active? %d\n",
-	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
-
-	if (!sna->render.vertex_used)
+	if (!sna->render_state.gen6.vb_id)
 		return;
 
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo ? sna->render.vbo->handle : 0));
+
 	bo = sna->render.vbo;
 	if (bo) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
-			DBG(("%s: discarding vbo (full)\n", __FUNCTION__));
+			DBG(("%s: discarding vbo (full), handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
 			sna->render.vbo = NULL;
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
@@ -1045,26 +1078,23 @@ static void gen6_vertex_close(struct sna *sna)
 		}
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-		if (sna->render.vertex_reloc[i]) {
-			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-			     i, sna->render.vertex_reloc[i]));
-
-			sna->kgem.batch[sna->render.vertex_reloc[i]] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i],
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta);
-			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i]+1,
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta + sna->render.vertex_used * 4 - 1);
-			sna->render.vertex_reloc[i] = 0;
-		}
+	assert(sna->render.nvertex_reloc);
+	for (i = 0; i < sna->render.nvertex_reloc; i++) {
+		DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+		     i, sna->render.vertex_reloc[i]));
+
+		sna->kgem.batch[sna->render.vertex_reloc[i]] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i], bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta);
+		sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i]+1, bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta + sna->render.vertex_used * 4 - 1);
 	}
+	sna->render.nvertex_reloc = 0;
 
 	if (sna->render.vbo == NULL) {
 		sna->render.vertex_used = 0;
@@ -1494,7 +1524,7 @@ static void gen6_emit_vertex_buffer(struct sna *sna,
 	OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA |
 		  4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT);
-	sna->render.vertex_reloc[id] = sna->kgem.nbatch;
+	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -1624,9 +1654,11 @@ inline static uint32_t *gen6_composite_get_binding_table(struct sna *sna,
 static uint32_t
 gen6_choose_composite_vertex_buffer(const struct sna_composite_op *op)
 {
-	int has_mask = op->mask.bo != NULL;
-	int is_affine = op->is_affine;
-	return has_mask << 1 | is_affine;
+	int id = 2 + !op->is_affine;
+	if (op->mask.bo)
+		id |= id << 2;
+	assert(id > 0 && id < 16);
+	return id;
 }
 
 static void
@@ -1954,7 +1986,7 @@ gen6_render_video(struct sna *sna,
 			       is_planar_fourcc(frame->id) ?
 			       GEN6_WM_KERNEL_VIDEO_PLANAR :
 			       GEN6_WM_KERNEL_VIDEO_PACKED,
-			       1);
+			       2);
 	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
@@ -2824,21 +2856,12 @@ gen6_emit_composite_spans_primitive(struct sna *sna,
 {
 	gen6_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 
 	gen6_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 
 	gen6_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(0);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 }
 
 fastcall static void
@@ -2849,15 +2872,15 @@ gen6_emit_composite_spans_solid(struct sna *sna,
 {
 	OUT_VERTEX(box->x2, box->y2);
 	OUT_VERTEX_F(1); OUT_VERTEX_F(1);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity);
 
 	OUT_VERTEX(box->x1, box->y2);
 	OUT_VERTEX_F(0); OUT_VERTEX_F(1);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity);
 
 	OUT_VERTEX(box->x1, box->y1);
 	OUT_VERTEX_F(0); OUT_VERTEX_F(0);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
+	OUT_VERTEX_F(opacity);
 }
 
 fastcall static void
@@ -2878,24 +2901,24 @@ gen6_emit_composite_spans_identity(struct sna *sna,
 	int16_t ty = op->base.src.offset[1];
 
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*5;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
 	v[1] = (box->x2 + tx) * sx;
-	v[7] = v[2] = (box->y2 + ty) * sy;
-	v[13] = v[8] = v[3] = opacity;
-	v[9] = v[4] = 1;
+	v[6] = v[2] = (box->y2 + ty) * sy;
 
 	dst.p.x = box->x1;
-	v[5] = dst.f;
-	v[11] = v[6] = (box->x1 + tx) * sx;
+	v[4] = dst.f;
+	v[9] = v[5] = (box->x1 + tx) * sx;
 
 	dst.p.y = box->y1;
-	v[10] = dst.f;
-	v[12] = (box->y1 + ty) * sy;
-	v[14] = 0;
+	v[8] = dst.f;
+	v[10] = (box->y1 + ty) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
 }
 
 fastcall static void
@@ -2920,24 +2943,24 @@ gen6_emit_composite_spans_simple(struct sna *sna,
 	int16_t ty = op->base.src.offset[1];
 
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*5;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
 	v[1] = ((box->x2 + tx) * xx + x0) * sx;
-	v[7] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-	v[13] = v[8] = v[3] = opacity;
-	v[9] = v[4] = 1;
+	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
 
 	dst.p.x = box->x1;
-	v[5] = dst.f;
-	v[11] = v[6] = ((box->x1 + tx) * xx + x0) * sx;
+	v[4] = dst.f;
+	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
 
 	dst.p.y = box->y1;
-	v[10] = dst.f;
-	v[12] = ((box->y1 + ty) * yy + y0) * sy;
-	v[14] = 0;
+	v[8] = dst.f;
+	v[10] = ((box->y1 + ty) * yy + y0) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
 }
 
 fastcall static void
@@ -2950,19 +2973,16 @@ gen6_emit_composite_spans_affine(struct sna *sna,
 	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x2, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
 
 	OUT_VERTEX(box->x1, box->y2);
 	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x1, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
 
 	OUT_VERTEX(box->x1, box->y1);
 	gen6_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x1, box->y1);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(0);
 }
 
 fastcall static void
@@ -3022,7 +3042,6 @@ gen6_render_composite_spans_done(struct sna *sna,
 	if (sna->render_state.gen6.vertex_offset)
 		gen6_vertex_flush(sna);
 
-	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -3103,9 +3122,7 @@ gen6_render_composite_spans(struct sna *sna,
 		gen6_composite_channel_convert(&tmp->base.src);
 		break;
 	}
-	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
-	if (tmp->base.mask.bo == NULL)
-		goto cleanup_src;
+	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.need_magic_ca_pass = false;
@@ -3124,7 +3141,7 @@ gen6_render_composite_spans(struct sna *sna,
 		} else
 			tmp->prim_emit = gen6_emit_composite_spans_affine;
 	}
-	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
+	tmp->base.floats_per_vertex = 4 + !tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
 	tmp->base.u.gen6.flags =
@@ -3134,7 +3151,7 @@ gen6_render_composite_spans(struct sna *sna,
 					      SAMPLER_EXTEND_PAD),
 			       gen6_get_blend(tmp->base.op, false, tmp->base.dst.format),
 			       GEN6_WM_KERNEL_OPACITY | !tmp->base.is_affine,
-			       1 << 1 | tmp->base.is_affine);
+			       1 << 2 | (2+!tmp->base.is_affine));
 
 	tmp->box   = gen6_render_composite_spans_box;
 	tmp->boxes = gen6_render_composite_spans_boxes;
@@ -4086,7 +4103,7 @@ gen6_render_retire(struct kgem *kgem)
 
 	sna = container_of(kgem, struct sna, kgem);
 	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
-		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		DBG(("%s: resetting idle vbo handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 	}
@@ -4099,7 +4116,7 @@ gen6_render_expire(struct kgem *kgem)
 
 	sna = container_of(kgem, struct sna, kgem);
 	if (sna->render.vbo && !sna->render.vertex_used) {
-		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		DBG(("%s: discarding vbo handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
 		kgem_bo_destroy(kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		sna->render.vertices = sna->render.vertex_data;
@@ -4114,7 +4131,7 @@ static void gen6_render_reset(struct sna *sna)
 	sna->render_state.gen6.needs_invariant = true;
 	sna->render_state.gen6.first_state_packet = true;
 	sna->render_state.gen6.vb_id = 0;
-	sna->render_state.gen6.ve_id = -1;
+	sna->render_state.gen6.ve_id = 3 << 2;
 	sna->render_state.gen6.last_primitive = -1;
 
 	sna->render_state.gen6.num_sf_outputs = 0;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index a199307..c1993df 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -181,7 +181,7 @@ static const struct blendinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me) \
 	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) + 2) * 2 * sizeof(struct gen7_sampler_state))
 
-#define VERTEX_2s2s 4
+#define VERTEX_2s2s 0
 
 #define COPY_SAMPLER 0
 #define COPY_VERTEX VERTEX_2s2s
@@ -847,23 +847,23 @@ gen7_emit_vertex_elements(struct sna *sna,
 	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen7_render_state *render = &sna->render_state.gen7;
-	int nelem, selem;
-	uint32_t w_component;
-	uint32_t src_format;
+	uint32_t src_format, dw, offset;
 	int id = GEN7_VERTEX(op->u.gen7.flags);
+	bool has_mask;
+
+	DBG(("%s: setup id=%d\n", __FUNCTION__, id));
 
 	if (render->ve_id == id)
 		return;
 	render->ve_id = id;
 
-	switch (id) {
-	case VERTEX_2s2s:
+	if (id == VERTEX_2s2s) {
 		DBG(("%s: setup COPY\n", __FUNCTION__));
 
 		OUT_BATCH(GEN7_3DSTATE_VERTEX_ELEMENTS |
 			  ((2 * (1 + 2)) + 1 - 2));
 
-		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+		OUT_BATCH(VERTEX_2s2s << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 			  GEN7_SURFACEFORMAT_R32G32B32A32_FLOAT << GEN7_VE0_FORMAT_SHIFT |
 			  0 << GEN7_VE0_OFFSET_SHIFT);
 		OUT_BATCH(GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_0_SHIFT |
@@ -872,7 +872,7 @@ gen7_emit_vertex_elements(struct sna *sna,
 			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_3_SHIFT);
 
 		/* x,y */
-		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+		OUT_BATCH(VERTEX_2s2s << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 			  GEN7_SURFACEFORMAT_R16G16_SSCALED << GEN7_VE0_FORMAT_SHIFT |
 			  0 << GEN7_VE0_OFFSET_SHIFT); /* offsets vb in bytes */
 		OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
@@ -880,7 +880,7 @@ gen7_emit_vertex_elements(struct sna *sna,
 			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
 			  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
 
-		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+		OUT_BATCH(VERTEX_2s2s << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 			  GEN7_SURFACEFORMAT_R16G16_SSCALED << GEN7_VE0_FORMAT_SHIFT |
 			  4 << GEN7_VE0_OFFSET_SHIFT);	/* offset vb in bytes */
 		OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
@@ -890,17 +890,6 @@ gen7_emit_vertex_elements(struct sna *sna,
 		return;
 	}
 
-	nelem = op->mask.bo ? 2 : 1;
-	if (op->is_affine) {
-		src_format = GEN7_SURFACEFORMAT_R32G32_FLOAT;
-		w_component = GEN7_VFCOMPONENT_STORE_0;
-		selem = 2;
-	} else {
-		src_format = GEN7_SURFACEFORMAT_R32G32B32_FLOAT;
-		w_component = GEN7_VFCOMPONENT_STORE_SRC;
-		selem = 3;
-	}
-
 	/* The VUE layout
 	 *    dword 0-3: pad (0.0, 0.0, 0.0. 0.0)
 	 *    dword 4-7: position (x, y, 1.0, 1.0),
@@ -909,11 +898,11 @@ gen7_emit_vertex_elements(struct sna *sna,
 	 *
 	 * dword 4-15 are fetched from vertex buffer
 	 */
+	has_mask = (id >> 2) != 0;
 	OUT_BATCH(GEN7_3DSTATE_VERTEX_ELEMENTS |
-		((2 * (2 + nelem)) + 1 - 2));
+		((2 * (3 + has_mask)) + 1 - 2));
 
-	OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT |
-		  GEN7_VE0_VALID |
+	OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 		  GEN7_SURFACEFORMAT_R32G32B32A32_FLOAT << GEN7_VE0_FORMAT_SHIFT |
 		  0 << GEN7_VE0_OFFSET_SHIFT);
 	OUT_BATCH(GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_0_SHIFT |
@@ -924,31 +913,74 @@ gen7_emit_vertex_elements(struct sna *sna,
 	/* x,y */
 	OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 		  GEN7_SURFACEFORMAT_R16G16_SSCALED << GEN7_VE0_FORMAT_SHIFT |
-		  0 << GEN7_VE0_OFFSET_SHIFT); /* offsets vb in bytes */
+		  0 << GEN7_VE0_OFFSET_SHIFT);
 	OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
 		  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
 		  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
 		  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+	offset = 4;
 
 	/* u0, v0, w0 */
+	DBG(("%s: first channel %d floats, offset=%d\n", __FUNCTION__, id & 3, offset));
+	dw = GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT;
+	switch (id & 3) {
+	case 1:
+		src_format = GEN7_SURFACEFORMAT_R32_FLOAT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	default:
+		assert(0);
+	case 2:
+		src_format = GEN7_SURFACEFORMAT_R32G32_FLOAT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	case 3:
+		src_format = GEN7_SURFACEFORMAT_R32G32B32_FLOAT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+		dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+		break;
+	}
 	OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 		  src_format << GEN7_VE0_FORMAT_SHIFT |
-		  4 << GEN7_VE0_OFFSET_SHIFT);	/* offset vb in bytes */
-	OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
-		  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
-		  w_component << GEN7_VE1_VFCOMPONENT_2_SHIFT |
-		  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+		  offset << GEN7_VE0_OFFSET_SHIFT);
+	OUT_BATCH(dw);
+	offset += (id & 3) * sizeof(float);
 
 	/* u1, v1, w1 */
-	if (op->mask.bo) {
-		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT |
-			  GEN7_VE0_VALID |
+	if (has_mask) {
+		DBG(("%s: second channel %d floats, offset=%d\n", __FUNCTION__, (id >> 2) & 3, offset));
+		dw = GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT;
+		switch ((id >> 2) & 3) {
+		case 1:
+			src_format = GEN7_SURFACEFORMAT_R32_FLOAT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		default:
+			assert(0);
+		case 2:
+			src_format = GEN7_SURFACEFORMAT_R32G32_FLOAT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		case 3:
+			src_format = GEN7_SURFACEFORMAT_R32G32B32_FLOAT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT;
+			dw |= GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_2_SHIFT;
+			break;
+		}
+		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
 			  src_format << GEN7_VE0_FORMAT_SHIFT |
-			  ((1 + selem) * 4) << GEN7_VE0_OFFSET_SHIFT); /* vb offset in bytes */
-		OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
-			  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
-			  w_component << GEN7_VE1_VFCOMPONENT_2_SHIFT |
-			  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+			  offset << GEN7_VE0_OFFSET_SHIFT);
+		OUT_BATCH(dw);
 	}
 }
 
@@ -994,7 +1026,7 @@ gen7_emit_state(struct sna *sna,
 
 	gen7_emit_cc(sna, GEN7_BLEND(op->u.gen7.flags));
 	gen7_emit_sampler(sna, GEN7_SAMPLER(op->u.gen7.flags));
-	gen7_emit_sf(sna, op->mask.bo != NULL);
+	gen7_emit_sf(sna, GEN7_VERTEX(op->u.gen7.flags) >> 2);
 	gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
 	gen7_emit_vertex_elements(sna, op);
 
@@ -1065,6 +1097,7 @@ static int gen7_vertex_finish(struct sna *sna)
 	unsigned int i;
 
 	assert(sna->render.vertex_used);
+	assert(sna->render.nvertex_reloc);
 
 	/* Note: we only need dword alignment (currently) */
 
@@ -1073,27 +1106,23 @@ static int gen7_vertex_finish(struct sna *sna)
 		if (sna->render_state.gen7.vertex_offset)
 			gen7_vertex_flush(sna);
 
-		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-			if (sna->render.vertex_reloc[i]) {
-				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-				     i, sna->render.vertex_reloc[i]));
-
-				sna->kgem.batch[sna->render.vertex_reloc[i]] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i],
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       0);
-				sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-					kgem_add_reloc(&sna->kgem,
-						       sna->render.vertex_reloc[i]+1,
-						       bo,
-						       I915_GEM_DOMAIN_VERTEX << 16,
-						       sna->render.vertex_used * 4 - 1);
-				sna->render.vertex_reloc[i] = 0;
-			}
+		for (i = 0; i < sna->render.nvertex_reloc; i++) {
+			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+			     i, sna->render.vertex_reloc[i]));
+
+			sna->kgem.batch[sna->render.vertex_reloc[i]] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i], bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       0);
+			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+				kgem_add_reloc(&sna->kgem,
+					       sna->render.vertex_reloc[i]+1, bo,
+					       I915_GEM_DOMAIN_VERTEX << 16,
+					       sna->render.vertex_used * 4 - 1);
 		}
 
+		sna->render.nvertex_reloc = 0;
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		sna->render_state.gen7.vb_id = 0;
@@ -1130,16 +1159,16 @@ static void gen7_vertex_close(struct sna *sna)
 
 	assert(sna->render_state.gen7.vertex_offset == 0);
 
-	DBG(("%s: used=%d, vbo active? %d\n",
-	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
-
-	if (!sna->render.vertex_used)
+	if (!sna->render_state.gen7.vb_id)
 		return;
 
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo ? sna->render.vbo->handle : 0));
+
 	bo = sna->render.vbo;
 	if (bo) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
-			DBG(("%s: discarding vbo (full)\n", __FUNCTION__));
+			DBG(("%s: discarding vbo (full), handle=%d\n", __FUNCTION__, sna->render.vbo->handle));
 			sna->render.vbo = NULL;
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
@@ -1170,30 +1199,29 @@ static void gen7_vertex_close(struct sna *sna)
 		}
 	}
 
-	for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
-		if (sna->render.vertex_reloc[i]) {
-			DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
-			     i, sna->render.vertex_reloc[i]));
-
-			sna->kgem.batch[sna->render.vertex_reloc[i]] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i],
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta);
-			sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
-				kgem_add_reloc(&sna->kgem,
-					       sna->render.vertex_reloc[i]+1,
-					       bo,
-					       I915_GEM_DOMAIN_VERTEX << 16,
-					       delta + sna->render.vertex_used * 4 - 1);
-			sna->render.vertex_reloc[i] = 0;
-		}
+	assert(sna->render.nvertex_reloc);
+	for (i = 0; i < sna->render.nvertex_reloc; i++) {
+		DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
+		     i, sna->render.vertex_reloc[i]));
+
+		sna->kgem.batch[sna->render.vertex_reloc[i]] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i], bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta);
+		sna->kgem.batch[sna->render.vertex_reloc[i]+1] =
+			kgem_add_reloc(&sna->kgem,
+				       sna->render.vertex_reloc[i]+1, bo,
+				       I915_GEM_DOMAIN_VERTEX << 16,
+				       delta + sna->render.vertex_used * 4 - 1);
 	}
+	sna->render.nvertex_reloc = 0;
 
 	if (sna->render.vbo == NULL) {
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
 	}
 
 	if (free_bo)
@@ -1360,6 +1388,8 @@ gen7_emit_composite_primitive_solid(struct sna *sna,
 
 	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+	assert(!too_large(r->dst.x + r->width, r->dst.y + r->height));
 
 	dst.p.x = r->dst.x + r->width;
 	dst.p.y = r->dst.y + r->height;
@@ -1599,7 +1629,7 @@ static void gen7_emit_vertex_buffer(struct sna *sna,
 		  GEN7_VB0_VERTEXDATA |
 		  GEN7_VB0_ADDRESS_MODIFY_ENABLE |
 		  4*op->floats_per_vertex << GEN7_VB0_BUFFER_PITCH_SHIFT);
-	sna->render.vertex_reloc[id] = sna->kgem.nbatch;
+	sna->render.vertex_reloc[sna->render.nvertex_reloc++] = sna->kgem.nbatch;
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -1686,6 +1716,7 @@ start:
 	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
 
+	assert(want > 0);
 	sna->render.vertex_index += 3*want;
 	return want;
 
@@ -1719,9 +1750,11 @@ inline static uint32_t *gen7_composite_get_binding_table(struct sna *sna,
 static uint32_t
 gen7_choose_composite_vertex_buffer(const struct sna_composite_op *op)
 {
-	int has_mask = op->mask.bo != NULL;
-	int is_affine = op->is_affine;
-	return has_mask << 1 | is_affine;
+	int id = 2 + !op->is_affine;
+	if (op->mask.bo)
+		id |= id << 2;
+	assert(id > 0 && id < 16);
+	return id;
 }
 
 static void
@@ -2908,21 +2941,12 @@ gen7_emit_composite_spans_primitive(struct sna *sna,
 {
 	gen7_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 
 	gen7_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 
 	gen7_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(0);
-	if (!op->base.is_affine)
-		OUT_VERTEX_F(1);
 }
 
 fastcall static void
@@ -2933,15 +2957,15 @@ gen7_emit_composite_spans_solid(struct sna *sna,
 {
 	OUT_VERTEX(box->x2, box->y2);
 	OUT_VERTEX_F(1); OUT_VERTEX_F(1);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity);
 
 	OUT_VERTEX(box->x1, box->y2);
 	OUT_VERTEX_F(0); OUT_VERTEX_F(1);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity);
 
 	OUT_VERTEX(box->x1, box->y1);
 	OUT_VERTEX_F(0); OUT_VERTEX_F(0);
-	OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
+	OUT_VERTEX_F(opacity);
 }
 
 fastcall static void
@@ -2962,24 +2986,24 @@ gen7_emit_composite_spans_identity(struct sna *sna,
 	int16_t ty = op->base.src.offset[1];
 
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*5;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
 	v[1] = (box->x2 + tx) * sx;
-	v[7] = v[2] = (box->y2 + ty) * sy;
-	v[13] = v[8] = v[3] = opacity;
-	v[9] = v[4] = 1;
+	v[6] = v[2] = (box->y2 + ty) * sy;
 
 	dst.p.x = box->x1;
-	v[5] = dst.f;
-	v[11] = v[6] = (box->x1 + tx) * sx;
+	v[4] = dst.f;
+	v[9] = v[5] = (box->x1 + tx) * sx;
 
 	dst.p.y = box->y1;
-	v[10] = dst.f;
-	v[12] = (box->y1 + ty) * sy;
-	v[14] = 0;
+	v[8] = dst.f;
+	v[10] = (box->y1 + ty) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
 }
 
 fastcall static void
@@ -3004,24 +3028,24 @@ gen7_emit_composite_spans_simple(struct sna *sna,
 	int16_t ty = op->base.src.offset[1];
 
 	v = sna->render.vertices + sna->render.vertex_used;
-	sna->render.vertex_used += 3*5;
+	sna->render.vertex_used += 3*4;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	dst.p.x = box->x2;
 	dst.p.y = box->y2;
 	v[0] = dst.f;
 	v[1] = ((box->x2 + tx) * xx + x0) * sx;
-	v[7] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
-	v[13] = v[8] = v[3] = opacity;
-	v[9] = v[4] = 1;
+	v[6] = v[2] = ((box->y2 + ty) * yy + y0) * sy;
 
 	dst.p.x = box->x1;
-	v[5] = dst.f;
-	v[11] = v[6] = ((box->x1 + tx) * xx + x0) * sx;
+	v[4] = dst.f;
+	v[9] = v[5] = ((box->x1 + tx) * xx + x0) * sx;
 
 	dst.p.y = box->y1;
-	v[10] = dst.f;
-	v[12] = ((box->y1 + ty) * yy + y0) * sy;
-	v[14] = 0;
+	v[8] = dst.f;
+	v[10] = ((box->y1 + ty) * yy + y0) * sy;
+
+	v[11] = v[7] = v[3] = opacity;
 }
 
 fastcall static void
@@ -3034,19 +3058,16 @@ gen7_emit_composite_spans_affine(struct sna *sna,
 	gen7_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x2, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
 
 	OUT_VERTEX(box->x1, box->y2);
 	gen7_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x1, box->y2);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(1);
 
 	OUT_VERTEX(box->x1, box->y1);
 	gen7_emit_composite_texcoord_affine(sna, &op->base.src,
 					    box->x1, box->y1);
 	OUT_VERTEX_F(opacity);
-	OUT_VERTEX_F(0);
 }
 
 fastcall static void
@@ -3106,7 +3127,6 @@ gen7_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -3184,9 +3204,7 @@ gen7_render_composite_spans(struct sna *sna,
 		gen7_composite_channel_convert(&tmp->base.src);
 		break;
 	}
-	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
-	if (tmp->base.mask.bo == NULL)
-		goto cleanup_src;
+	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.need_magic_ca_pass = false;
@@ -3205,7 +3223,7 @@ gen7_render_composite_spans(struct sna *sna,
 		} else
 			tmp->prim_emit = gen7_emit_composite_spans_affine;
 	}
-	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
+	tmp->base.floats_per_vertex = 4 + !tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
 	tmp->base.u.gen7.flags =
@@ -3215,7 +3233,7 @@ gen7_render_composite_spans(struct sna *sna,
 					      SAMPLER_EXTEND_PAD),
 			       gen7_get_blend(tmp->base.op, false, tmp->base.dst.format),
 			       GEN7_WM_KERNEL_OPACITY | !tmp->base.is_affine,
-			       1 << 1 | tmp->base.is_affine);
+			       1 << 2 | (2+!tmp->base.is_affine));
 
 	tmp->box   = gen7_render_composite_spans_box;
 	tmp->boxes = gen7_render_composite_spans_boxes;
@@ -4197,7 +4215,7 @@ static void gen7_render_reset(struct sna *sna)
 	sna->render_state.gen7.emit_flush = false;
 	sna->render_state.gen7.needs_invariant = true;
 	sna->render_state.gen7.vb_id = 0;
-	sna->render_state.gen7.ve_id = -1;
+	sna->render_state.gen7.ve_id = 3 << 2;
 	sna->render_state.gen7.last_primitive = -1;
 
 	sna->render_state.gen7.num_sf_outputs = 0;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index ddcafdb..68bb901 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -286,7 +286,8 @@ struct sna_render {
 	uint16_t vertex_index;
 	uint16_t vertex_used;
 	uint16_t vertex_size;
-	uint16_t vertex_reloc[8];
+	uint16_t vertex_reloc[16];
+	int nvertex_reloc;
 
 	struct kgem_bo *vbo;
 	float *vertices;
commit 9b2873d3d97b6780d878bd9b821fba0302470f9f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Aug 1 00:01:15 2012 +0100

    sna/gen4+: Implement an opacity shader
    
    Avoid the cumbersome lookup through the alpha gradient texture and
    simply multiply the incoming opacity value. The next step will be to
    reduce the number of floats required per vertex.
    
    Now that we have removed the primary user of the alpha solid cache, it
    may be time to retire that as well.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/brw/brw.h b/src/sna/brw/brw.h
index f0f3ac8..e5fa72f 100644
--- a/src/sna/brw/brw.h
+++ b/src/sna/brw/brw.h
@@ -12,3 +12,6 @@ bool brw_wm_kernel__projective(struct brw_compile *p, int dispatch_width);
 bool brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch_width);
 bool brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch_width);
 bool brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch_width);
+
+bool brw_wm_kernel__affine_opacity(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__projective_opacity(struct brw_compile *p, int dispatch_width);
diff --git a/src/sna/brw/brw_test_gen7.c b/src/sna/brw/brw_test_gen7.c
index c3f0e23..085b25c 100644
--- a/src/sna/brw/brw_test_gen7.c
+++ b/src/sna/brw/brw_test_gen7.c
@@ -167,6 +167,17 @@ static void gen7_ps_nomask_projective(void)
 	compare(ps_kernel_nomask_projective);
 }
 
+static void gen7_ps_opacity(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__affine_opacity(&p, 16);
+
+	compare(ps_kernel_nomask_affine);
+}
+
 void brw_test_gen7(void)
 {
 	gen7_ps_nomask_affine();
@@ -175,4 +186,6 @@ void brw_test_gen7(void)
 	gen7_ps_masksa_affine();
 
 	gen7_ps_nomask_projective();
+
+	gen7_ps_opacity();
 }
diff --git a/src/sna/brw/brw_wm.c b/src/sna/brw/brw_wm.c
index f96881a..bd4003d 100644
--- a/src/sna/brw/brw_wm.c
+++ b/src/sna/brw/brw_wm.c
@@ -323,6 +323,68 @@ done:
 	brw_fb_write(p, dw);
 }
 
+static void brw_wm_write__opacity(struct brw_compile *p, int dw,
+				  int src, int mask)
+{
+	int n;
+
+	if (dw == 8 && p->gen >= 60) {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+		brw_MUL(p,
+			brw_message_reg(2),
+			brw_vec8_grf(src+0, 0),
+			brw_vec1_grf(mask, 3));
+		brw_MUL(p,
+			brw_message_reg(3),
+			brw_vec8_grf(src+1, 0),
+			brw_vec1_grf(mask, 3));
+		brw_MUL(p,
+			brw_message_reg(4),
+			brw_vec8_grf(src+2, 0),
+			brw_vec1_grf(mask, 3));
+		brw_MUL(p,
+			brw_message_reg(5),
+			brw_vec8_grf(src+3, 0),
+			brw_vec1_grf(mask, 3));
+
+		goto done;
+	}
+
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
+	for (n = 0; n < 4; n++) {
+		if (p->gen >= 60) {
+			brw_MUL(p,
+				brw_message_reg(2 + 2*n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec1_grf(mask, 3));
+		} else if (p->gen >= 45 && dw == 16) {
+			brw_MUL(p,
+				brw_message_reg(2 + n + BRW_MRF_COMPR4),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec1_grf(mask, 3));
+		} else {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_MUL(p,
+				brw_message_reg(2 + n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec1_grf(mask, 3));
+
+			if (dw == 16) {
+				brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+				brw_MUL(p,
+					brw_message_reg(2 + n + 4),
+					brw_vec8_grf(src + 2*n+1, 0),
+					brw_vec1_grf(mask, 3));
+			}
+		}
+	}
+
+done:
+	brw_fb_write(p, dw);
+}
+
 static void brw_wm_write__mask_ca(struct brw_compile *p, int dw,
 				  int src, int mask)
 {
@@ -597,3 +659,37 @@ brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch)
 
 	return true;
 }
+
+bool
+brw_wm_kernel__affine_opacity(struct brw_compile *p, int dispatch)
+{
+	int src, mask;
+
+	if (p->gen < 60) {
+		brw_wm_xy(p, dispatch);
+		mask = 4;
+	} else
+		mask = dispatch == 16 ? 8 : 6;
+
+	src = brw_wm_affine(p, dispatch, 0, 1, 12);
+	brw_wm_write__opacity(p, dispatch, src, mask);
+
+	return true;
+}
+
+bool
+brw_wm_kernel__projective_opacity(struct brw_compile *p, int dispatch)
+{
+	int src, mask;
+
+	if (p->gen < 60) {
+		brw_wm_xy(p, dispatch);
+		mask = 4;
+	} else
+		mask = dispatch == 16 ? 8 : 6;
+
+	src = brw_wm_projective(p, dispatch, 0, 1, 12);
+	brw_wm_write__opacity(p, dispatch, src, mask);
+
+	return true;
+}
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d1dbf5a..d8beed9 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -149,6 +149,9 @@ static const struct wm_kernel_info {
 	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
 	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
 
+	NOKERNEL(WM_KERNEL_OPACITY, brw_wm_kernel__affine_opacity, true),
+	NOKERNEL(WM_KERNEL_OPACITY_P, brw_wm_kernel__projective_opacity, true),
+
 	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
 	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
@@ -2425,28 +2428,6 @@ cleanup_dst:
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static bool
-gen4_composite_alpha_gradient_init(struct sna *sna,
-				   struct sna_composite_channel *channel)
-{
-	DBG(("%s\n", __FUNCTION__));
-
-	channel->filter = PictFilterNearest;
-	channel->repeat = RepeatPad;
-	channel->is_affine = true;
-	channel->is_solid  = false;
-	channel->transform = NULL;
-	channel->width  = 256;
-	channel->height = 1;
-	channel->card_format = GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
-
-	channel->bo = sna_render_get_alpha_gradient(sna);
-
-	channel->scale[0]  = channel->scale[1]  = 1;
-	channel->offset[0] = channel->offset[1] = 0;
-	return channel->bo != NULL;
-}
-
 inline static void
 gen4_emit_composite_texcoord(struct sna *sna,
 			     const struct sna_composite_channel *channel,
@@ -2610,6 +2591,7 @@ gen4_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -2687,13 +2669,14 @@ gen4_render_composite_spans(struct sna *sna,
 		break;
 	}
 
-	tmp->base.mask.bo = NULL;
+	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
+	if (tmp->base.mask.bo == NULL)
+		goto cleanup_src;
+
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.has_component_alpha = false;
 	tmp->base.need_magic_ca_pass = false;
 
-	gen4_composite_alpha_gradient_init(sna, &tmp->base.mask);
-
 	tmp->prim_emit = gen4_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid)
 		tmp->prim_emit = gen4_emit_composite_spans_solid;
@@ -2702,10 +2685,7 @@ gen4_render_composite_spans(struct sna *sna,
 	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
-	tmp->base.u.gen4.wm_kernel =
-		gen4_choose_composite_kernel(tmp->base.op,
-					     true, false,
-					     tmp->base.is_affine);
+	tmp->base.u.gen5.wm_kernel = WM_KERNEL_OPACITY | !tmp->base.is_affine;
 	tmp->base.u.gen4.ve_id = 1 << 1 | tmp->base.is_affine;
 
 	tmp->box   = gen4_render_composite_spans_box;
diff --git a/src/sna/gen4_render.h b/src/sna/gen4_render.h
index 8e0cd74..49d232e 100644
--- a/src/sna/gen4_render.h
+++ b/src/sna/gen4_render.h
@@ -2635,6 +2635,9 @@ typedef enum {
 	WM_KERNEL_MASKSA,
 	WM_KERNEL_MASKSA_P,
 
+	WM_KERNEL_OPACITY,
+	WM_KERNEL_OPACITY_P,
+
 	WM_KERNEL_VIDEO_PLANAR,
 	WM_KERNEL_VIDEO_PACKED,
 	KERNEL_COUNT
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 3d826c4..1e0ee10 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -120,6 +120,9 @@ static const struct wm_kernel_info {
 	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
 	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
 
+	NOKERNEL(WM_KERNEL_OPACITY, brw_wm_kernel__affine_opacity, true),
+	NOKERNEL(WM_KERNEL_OPACITY_P, brw_wm_kernel__projective_opacity, true),
+
 	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
 	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
@@ -2439,30 +2442,7 @@ cleanup_dst:
 	return false;
 }
 
-/* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static bool
-gen5_composite_alpha_gradient_init(struct sna *sna,
-				   struct sna_composite_channel *channel)
-{
-	DBG(("%s\n", __FUNCTION__));
-
-	channel->filter = PictFilterNearest;
-	channel->repeat = RepeatPad;
-	channel->is_affine = true;
-	channel->is_solid  = false;
-	channel->transform = NULL;
-	channel->width  = 256;
-	channel->height = 1;
-	channel->card_format = GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
-
-	channel->bo = sna_render_get_alpha_gradient(sna);
-
-	channel->scale[0]  = channel->scale[1]  = 1;
-	channel->offset[0] = channel->offset[1] = 0;
-	return channel->bo != NULL;
-}
-
 inline static void
 gen5_emit_composite_texcoord(struct sna *sna,
 			     const struct sna_composite_channel *channel,
@@ -2639,6 +2619,7 @@ gen5_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -2721,13 +2702,14 @@ gen5_render_composite_spans(struct sna *sna,
 		break;
 	}
 
-	tmp->base.mask.bo = NULL;
+	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
+	if (tmp->base.mask.bo == NULL)
+		goto cleanup_src;
+
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.has_component_alpha = false;
 	tmp->base.need_magic_ca_pass = false;
 
-	gen5_composite_alpha_gradient_init(sna, &tmp->base.mask);
-
 	tmp->prim_emit = gen5_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid)
 		tmp->prim_emit = gen5_emit_composite_spans_solid;
@@ -2736,10 +2718,7 @@ gen5_render_composite_spans(struct sna *sna,
 	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
-	tmp->base.u.gen5.wm_kernel =
-		gen5_choose_composite_kernel(tmp->base.op,
-					     true, false,
-					     tmp->base.is_affine);
+	tmp->base.u.gen5.wm_kernel = WM_KERNEL_OPACITY | !tmp->base.is_affine;
 	tmp->base.u.gen5.ve_id = 1 << 1 | tmp->base.is_affine;
 
 	tmp->box   = gen5_render_composite_spans_box;
diff --git a/src/sna/gen5_render.h b/src/sna/gen5_render.h
index 17708b5..b6e5b0c 100644
--- a/src/sna/gen5_render.h
+++ b/src/sna/gen5_render.h
@@ -2770,6 +2770,9 @@ typedef enum {
 	WM_KERNEL_MASKSA,
 	WM_KERNEL_MASKSA_P,
 
+	WM_KERNEL_OPACITY,
+	WM_KERNEL_OPACITY_P,
+
 	WM_KERNEL_VIDEO_PLANAR,
 	WM_KERNEL_VIDEO_PACKED,
 	KERNEL_COUNT
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 78baa3a..3dc0729 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -128,6 +128,9 @@ static const struct wm_kernel_info {
 	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3, 2),
 	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3, 2),
 
+	NOKERNEL(OPACITY, brw_wm_kernel__affine_opacity, 2, 2),
+	NOKERNEL(OPACITY_P, brw_wm_kernel__projective_opacity, 2, 2),
+
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7, 1),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2, 1),
 };
@@ -2788,28 +2791,7 @@ cleanup_dst:
 	return false;
 }
 
-/* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static bool
-gen6_composite_alpha_gradient_init(struct sna *sna,
-				   struct sna_composite_channel *channel)
-{
-	DBG(("%s\n", __FUNCTION__));
-
-	channel->is_affine = true;
-	channel->is_solid  = false;
-	channel->transform = NULL;
-	channel->width  = 256;
-	channel->height = 1;
-	channel->card_format = GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
-
-	channel->bo = sna_render_get_alpha_gradient(sna);
-
-	channel->scale[0]  = channel->scale[1]  = 1;
-	channel->offset[0] = channel->offset[1] = 0;
-	return channel->bo != NULL;
-}
-
 inline static void
 gen6_emit_composite_texcoord_affine(struct sna *sna,
 				    const struct sna_composite_channel *channel,
@@ -3040,6 +3022,7 @@ gen6_render_composite_spans_done(struct sna *sna,
 	if (sna->render_state.gen6.vertex_offset)
 		gen6_vertex_flush(sna);
 
+	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -3120,13 +3103,13 @@ gen6_render_composite_spans(struct sna *sna,
 		gen6_composite_channel_convert(&tmp->base.src);
 		break;
 	}
+	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
+	if (tmp->base.mask.bo == NULL)
+		goto cleanup_src;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.need_magic_ca_pass = false;
 
-	if (!gen6_composite_alpha_gradient_init(sna, &tmp->base.mask))
-		goto cleanup_src;
-
 	tmp->prim_emit = gen6_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid) {
 		tmp->prim_emit = gen6_emit_composite_spans_solid;
@@ -3150,9 +3133,7 @@ gen6_render_composite_spans(struct sna *sna,
 					      SAMPLER_FILTER_NEAREST,
 					      SAMPLER_EXTEND_PAD),
 			       gen6_get_blend(tmp->base.op, false, tmp->base.dst.format),
-			       gen6_choose_composite_kernel(tmp->base.op,
-							    true, false,
-							    tmp->base.is_affine),
+			       GEN6_WM_KERNEL_OPACITY | !tmp->base.is_affine,
 			       1 << 1 | tmp->base.is_affine);
 
 	tmp->box   = gen6_render_composite_spans_box;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 8c64016..a199307 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -133,6 +133,9 @@ static const struct wm_kernel_info {
 	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3),
 	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3),
 
+	NOKERNEL(OPACITY, brw_wm_kernel__affine_opacity, 2),
+	NOKERNEL(OPACITY_P, brw_wm_kernel__projective_opacity, 2),
+
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2),
 };
@@ -2872,28 +2875,7 @@ cleanup_dst:
 	return false;
 }
 
-/* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static bool
-gen7_composite_alpha_gradient_init(struct sna *sna,
-				   struct sna_composite_channel *channel)
-{
-	DBG(("%s\n", __FUNCTION__));
-
-	channel->is_affine = true;
-	channel->is_solid  = false;
-	channel->transform = NULL;
-	channel->width  = 256;
-	channel->height = 1;
-	channel->card_format = GEN7_SURFACEFORMAT_B8G8R8A8_UNORM;
-
-	channel->bo = sna_render_get_alpha_gradient(sna);
-
-	channel->scale[0]  = channel->scale[1]  = 1;
-	channel->offset[0] = channel->offset[1] = 0;
-	return channel->bo != NULL;
-}
-
 inline static void
 gen7_emit_composite_texcoord_affine(struct sna *sna,
 				    const struct sna_composite_channel *channel,
@@ -3124,6 +3106,7 @@ gen7_render_composite_spans_done(struct sna *sna,
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	kgem_bo_destroy(&sna->kgem, op->base.mask.bo);
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -3201,13 +3184,13 @@ gen7_render_composite_spans(struct sna *sna,
 		gen7_composite_channel_convert(&tmp->base.src);
 		break;
 	}
+	tmp->base.mask.bo = sna_render_get_solid(sna, 0);
+	if (tmp->base.mask.bo == NULL)
+		goto cleanup_src;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.need_magic_ca_pass = false;
 
-	if (!gen7_composite_alpha_gradient_init(sna, &tmp->base.mask))
-		goto cleanup_src;
-
 	tmp->prim_emit = gen7_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid) {
 		tmp->prim_emit = gen7_emit_composite_spans_solid;
@@ -3231,9 +3214,7 @@ gen7_render_composite_spans(struct sna *sna,
 					      SAMPLER_FILTER_NEAREST,
 					      SAMPLER_EXTEND_PAD),
 			       gen7_get_blend(tmp->base.op, false, tmp->base.dst.format),
-			       gen7_choose_composite_kernel(tmp->base.op,
-							    true, false,
-							    tmp->base.is_affine),
+			       GEN7_WM_KERNEL_OPACITY | !tmp->base.is_affine,
 			       1 << 1 | tmp->base.is_affine);
 
 	tmp->box   = gen7_render_composite_spans_box;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 142f222..ddcafdb 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -383,6 +383,9 @@ enum {
 	GEN6_WM_KERNEL_MASKSA,
 	GEN6_WM_KERNEL_MASKSA_P,
 
+	GEN6_WM_KERNEL_OPACITY,
+	GEN6_WM_KERNEL_OPACITY_P,
+
 	GEN6_WM_KERNEL_VIDEO_PLANAR,
 	GEN6_WM_KERNEL_VIDEO_PACKED,
 	GEN6_KERNEL_COUNT
@@ -432,6 +435,9 @@ enum {
 	GEN7_WM_KERNEL_MASKSA,
 	GEN7_WM_KERNEL_MASKSA_P,
 
+	GEN7_WM_KERNEL_OPACITY,
+	GEN7_WM_KERNEL_OPACITY_P,
+
 	GEN7_WM_KERNEL_VIDEO_PLANAR,
 	GEN7_WM_KERNEL_VIDEO_PACKED,
 	GEN7_WM_KERNEL_COUNT
commit fd3a1236051265fab700aad689a171de47d7338f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 31 10:20:51 2012 +0100

    sna/gen6: Enable 8 pixel dispatch
    
    This gives a small performance increase when operating with rectangles,
    which is reasonably frequent.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index e3a103c..78baa3a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -55,6 +55,14 @@
 #define NO_RING_SWITCH 0
 #define PREFER_RENDER 0
 
+#define USE_8_PIXEL_DISPATCH 1
+#define USE_16_PIXEL_DISPATCH 1
+#define USE_32_PIXEL_DISPATCH 0
+
+#if !USE_8_PIXEL_DISPATCH && !USE_16_PIXEL_DISPATCH && !USE_32_PIXEL_DISPATCH
+#error "Must select at least 8, 16 or 32 pixel dispatch"
+#endif
+
 #define GEN6_MAX_SIZE 8192
 
 struct gt_info {
@@ -612,29 +620,36 @@ gen6_emit_sf(struct sna *sna, bool has_mask)
 static void
 gen6_emit_wm(struct sna *sna, unsigned int kernel)
 {
+	const uint32_t *kernels;
+
 	if (sna->render_state.gen6.kernel == kernel)
 		return;
 
 	sna->render_state.gen6.kernel = kernel;
+	kernels = sna->render_state.gen6.wm_kernel[kernel];
 
-	DBG(("%s: switching to %s, num_surfaces=%d\n",
+	DBG(("%s: switching to %s, num_surfaces=%d (8-pixel? %d, 16-pixel? %d,32-pixel? %d)\n",
 	     __FUNCTION__,
-	     wm_kernels[kernel].name,
-	     wm_kernels[kernel].num_surfaces));
+	     wm_kernels[kernel].name, wm_kernels[kernel].num_surfaces,
+	    kernels[0], kernels[1], kernels[2]));
 
 	OUT_BATCH(GEN6_3DSTATE_WM | (9 - 2));
-	OUT_BATCH(sna->render_state.gen6.wm_kernel[kernel]);
+	OUT_BATCH(kernels[0] ?: kernels[1] ?: kernels[2]);
 	OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
 		  wm_kernels[kernel].num_surfaces << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
-	OUT_BATCH(0);
-	OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT); /* DW4 */
+	OUT_BATCH(0); /* scratch space */
+	OUT_BATCH((kernels[0] ? 4 : kernels[1] ? 6 : 8) << GEN6_3DSTATE_WM_DISPATCH_0_START_GRF_SHIFT |
+		  8 << GEN6_3DSTATE_WM_DISPATCH_1_START_GRF_SHIFT |
+		  6 << GEN6_3DSTATE_WM_DISPATCH_2_START_GRF_SHIFT);
 	OUT_BATCH((sna->render_state.gen6.info->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT |
-		  GEN6_3DSTATE_WM_DISPATCH_ENABLE |
-		  GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
+		  (kernels[0] ? GEN6_3DSTATE_WM_8_DISPATCH_ENABLE : 0) |
+		  (kernels[1] ? GEN6_3DSTATE_WM_16_DISPATCH_ENABLE : 0) |
+		  (kernels[2] ? GEN6_3DSTATE_WM_32_DISPATCH_ENABLE : 0) |
+		  GEN6_3DSTATE_WM_DISPATCH_ENABLE);
 	OUT_BATCH(wm_kernels[kernel].num_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
 		  GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
-	OUT_BATCH(0);
-	OUT_BATCH(0);
+	OUT_BATCH(kernels[2]);
+	OUT_BATCH(kernels[1]);
 }
 
 static bool
@@ -4156,18 +4171,31 @@ static bool gen6_render_setup(struct sna *sna)
 
 	for (m = 0; m < GEN6_KERNEL_COUNT; m++) {
 		if (wm_kernels[m].size) {
-			state->wm_kernel[m] =
+			state->wm_kernel[m][1] =
 				sna_static_stream_add(&general,
 						      wm_kernels[m].data,
 						      wm_kernels[m].size,
 						      64);
 		} else {
-			state->wm_kernel[m] =
-				sna_static_stream_compile_wm(sna, &general,
-							     wm_kernels[m].data,
-							     16);
+			if (USE_8_PIXEL_DISPATCH) {
+				state->wm_kernel[m][0] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 8);
+			}
+
+			if (USE_16_PIXEL_DISPATCH) {
+				state->wm_kernel[m][1] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 16);
+			}
+
+			if (USE_32_PIXEL_DISPATCH) {
+				state->wm_kernel[m][2] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 32);
+			}
 		}
-		assert(state->wm_kernel[m]);
+		assert(state->wm_kernel[m][0]|state->wm_kernel[m][1]|state->wm_kernel[m][2]);
 	}
 
 	ss = sna_static_stream_map(&general,
diff --git a/src/sna/gen6_render.h b/src/sna/gen6_render.h
index 9cc8e14..2201a62 100644
--- a/src/sna/gen6_render.h
+++ b/src/sna/gen6_render.h
@@ -97,10 +97,13 @@
 # define GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT			27
 # define GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT	18
 /* DW4 */
-# define GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT		16
+# define GEN6_3DSTATE_WM_DISPATCH_0_START_GRF_SHIFT		16
+# define GEN6_3DSTATE_WM_DISPATCH_1_START_GRF_SHIFT		8
+# define GEN6_3DSTATE_WM_DISPATCH_2_START_GRF_SHIFT		0
 /* DW5 */
 # define GEN6_3DSTATE_WM_MAX_THREADS_SHIFT			25
 # define GEN6_3DSTATE_WM_DISPATCH_ENABLE			(1 << 19)
+# define GEN6_3DSTATE_WM_32_DISPATCH_ENABLE			(1 << 2)
 # define GEN6_3DSTATE_WM_16_DISPATCH_ENABLE			(1 << 1)
 # define GEN6_3DSTATE_WM_8_DISPATCH_ENABLE			(1 << 0)
 /* DW6 */
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 6381ccf..8c64016 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4277,7 +4277,7 @@ static bool gen7_render_setup(struct sna *sna)
 								     wm_kernels[m].data, 32);
 			}
 		}
-		assert(state->wm_kernel[m][1]);
+		assert(state->wm_kernel[m][0]|state->wm_kernel[m][1]|state->wm_kernel[m][2]);
 	}
 
 	ss = sna_static_stream_map(&general,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 943c248..142f222 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -396,7 +396,7 @@ struct gen6_render_state {
 	uint32_t sf_state;
 	uint32_t sf_mask_state;
 	uint32_t wm_state;
-	uint32_t wm_kernel[GEN6_KERNEL_COUNT];
+	uint32_t wm_kernel[GEN6_KERNEL_COUNT][3];
 
 	uint32_t cc_vp;
 	uint32_t cc_blend;
commit 8922b804bc9ed27957c81f7cda4812ab4ecbd4de
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 31 10:20:51 2012 +0100

    sna/gen7: Enable 8 pixel dispatch
    
    This gives a small performance increase when operating with rectangles,
    which is reasonably frequent.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 29ee4af..6381ccf 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -54,7 +54,7 @@
 
 #define NO_RING_SWITCH 0
 
-#define USE_8_PIXEL_DISPATCH 0
+#define USE_8_PIXEL_DISPATCH 1
 #define USE_16_PIXEL_DISPATCH 1
 #define USE_32_PIXEL_DISPATCH 0
 
commit 492093d04b1486dd34aafe2f109a77ddeb836f18
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 31 18:39:17 2012 +0100

    sna: Generate shaders for SNB+ 8-pixel dispatch
    
    Not ideal yet, sampling an alpha-only surface using SIMD8 only seems to
    ever return 0...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/brw/brw.h b/src/sna/brw/brw.h
index a39b253..f0f3ac8 100644
--- a/src/sna/brw/brw.h
+++ b/src/sna/brw/brw.h
@@ -1,14 +1,14 @@
 #include "brw_eu.h"
 
-void brw_sf_kernel__nomask(struct brw_compile *p);
-void brw_sf_kernel__mask(struct brw_compile *p);
+bool brw_sf_kernel__nomask(struct brw_compile *p);
+bool brw_sf_kernel__mask(struct brw_compile *p);
 
-void brw_wm_kernel__affine(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__affine(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch_width);
 
-void brw_wm_kernel__projective(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch_width);
-void brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__projective(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch_width);
+bool brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch_width);
diff --git a/src/sna/brw/brw_sf.c b/src/sna/brw/brw_sf.c
index 0c69433..6f82171 100644
--- a/src/sna/brw/brw_sf.c
+++ b/src/sna/brw/brw_sf.c
@@ -1,6 +1,6 @@
 #include "brw.h"
 
-void brw_sf_kernel__nomask(struct brw_compile *p)
+bool brw_sf_kernel__nomask(struct brw_compile *p)
 {
 	struct brw_reg inv, v0, v1, v2, delta;
 
@@ -23,10 +23,11 @@ void brw_sf_kernel__nomask(struct brw_compile *p)
 	brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0 ,0),
 		      false, true, 4, 0, true, true, 0,
 		      BRW_URB_SWIZZLE_TRANSPOSE);
+
+	return true;
 }
 
-void
-brw_sf_kernel__mask(struct brw_compile *p)
+bool brw_sf_kernel__mask(struct brw_compile *p)
 {
 	struct brw_reg inv, v0, v1, v2;
 
@@ -48,4 +49,6 @@ brw_sf_kernel__mask(struct brw_compile *p)
 	brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0 ,0),
 		      false, true, 4, 0, true, true, 0,
 		      BRW_URB_SWIZZLE_TRANSPOSE);
+
+	return true;
 }
diff --git a/src/sna/brw/brw_wm.c b/src/sna/brw/brw_wm.c
index 9a8af5f..f96881a 100644
--- a/src/sna/brw/brw_wm.c
+++ b/src/sna/brw/brw_wm.c
@@ -34,7 +34,8 @@ static void brw_wm_xy(struct brw_compile *p, int dw)
 	brw_ADD(p, brw_vec8_grf(Y16, 0), vec8(y_uw), brw_negate(__suboffset(r1, 1)));
 }
 
-static void brw_wm_affine_st(struct brw_compile *p, int dw, int channel, int msg)
+static void brw_wm_affine_st(struct brw_compile *p, int dw,
+			     int channel, int msg)
 {
 	int uv;
 
@@ -88,8 +89,8 @@ static inline struct brw_reg sample_result(int dw, int result)
 		       WRITEMASK_XYZW);
 }
 
-static void brw_wm_sample(struct brw_compile *p, int dw,
-			  int channel, int msg, int result)
+static int brw_wm_sample(struct brw_compile *p, int dw,
+			 int channel, int msg, int result)
 {
 	struct brw_reg src0;
 	bool header;
@@ -107,15 +108,24 @@ static void brw_wm_sample(struct brw_compile *p, int dw,
 	brw_SAMPLE(p, sample_result(dw, result), msg, src0,
 		   channel+1, channel, WRITEMASK_XYZW, 0,
 		   2*len, len+header, header, simd(dw));
+	return result;
 }
 
-static void brw_wm_sample__alpha(struct brw_compile *p, int dw,
-				 int channel, int msg, int result)
+static int brw_wm_sample__alpha(struct brw_compile *p, int dw,
+				int channel, int msg, int result)
 {
 	struct brw_reg src0;
-	int len;
+	int mlen, rlen;
+
+	if (dw == 8) {
+		/* SIMD8 sample return is not masked */
+		mlen = 3;
+		rlen = 4;
+	} else {
+		mlen = 5;
+		rlen = 2;
+	}
 
-	len = dw == 16 ? 4 : 2;
 	if (p->gen >= 60)
 		src0 = brw_message_reg(msg);
 	else
@@ -123,27 +133,31 @@ static void brw_wm_sample__alpha(struct brw_compile *p, int dw,
 
 	brw_SAMPLE(p, sample_result(dw, result), msg, src0,
 		   channel+1, channel, WRITEMASK_W, 0,
-		   len/2, len+1, true, simd(dw));
+		   rlen, mlen, true, simd(dw));
+
+	if (dw == 8)
+		result += 3;
+
+	return result;
 }
 
-static void brw_wm_affine(struct brw_compile *p, int dw,
-			  int channel, int msg, int result)
+static int brw_wm_affine(struct brw_compile *p, int dw,
+			 int channel, int msg, int result)
 {
 	brw_wm_affine_st(p, dw, channel, msg);
-	brw_wm_sample(p, dw, channel, msg, result);
+	return brw_wm_sample(p, dw, channel, msg, result);
 }
 
-static void brw_wm_affine__alpha(struct brw_compile *p, int dw,
-				 int channel, int msg, int result)
+static int brw_wm_affine__alpha(struct brw_compile *p, int dw,
+				int channel, int msg, int result)
 {
 	brw_wm_affine_st(p, dw, channel, msg);
-	brw_wm_sample__alpha(p, dw, channel, msg, result);
+	return brw_wm_sample__alpha(p, dw, channel, msg, result);
 }
 
 static inline struct brw_reg null_result(int dw)
 {
-	return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
-		       BRW_ARF_NULL, 0,
+	return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE, BRW_ARF_NULL, 0,
 		       BRW_REGISTER_TYPE_UW,
 		       dw == 16 ? BRW_VERTICAL_STRIDE_16 : BRW_VERTICAL_STRIDE_8,
 		       dw == 16 ? BRW_WIDTH_16 : BRW_WIDTH_8,
@@ -185,8 +199,8 @@ static void brw_fb_write(struct brw_compile *p, int dw)
 	insn->header.compression_control = BRW_COMPRESSION_NONE;
 
 	if (p->gen >= 60) {
-		src0 = brw_message_reg(2);
 		msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
+		src0 = brw_message_reg(2);
 		header = false;
 	} else {
 		insn->header.destreg__conditionalmod = 0;
@@ -206,14 +220,19 @@ static void brw_wm_write(struct brw_compile *p, int dw, int src)
 {
 	int n;
 
-	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
 	if (dw == 8 && p->gen >= 60) {
-		brw_MOV(p, brw_message_reg(2), brw_vec8_grf(src, 0));
+		/* XXX pixel execution mask? */
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+		brw_MOV(p, brw_message_reg(2), brw_vec8_grf(src+0, 0));
+		brw_MOV(p, brw_message_reg(3), brw_vec8_grf(src+1, 0));
 		brw_MOV(p, brw_message_reg(4), brw_vec8_grf(src+2, 0));
+		brw_MOV(p, brw_message_reg(5), brw_vec8_grf(src+3, 0));
 		goto done;
 	}
 
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
 	for (n = 0; n < 4; n++) {
 		if (p->gen >= 60) {
 			brw_MOV(p,
@@ -242,38 +261,36 @@ done:
 	brw_fb_write(p, dw);
 }
 
-static inline struct brw_reg mask_a8(int nr)
-{
-	return brw_reg(BRW_GENERAL_REGISTER_FILE,
-		       nr, 0,
-		       BRW_REGISTER_TYPE_F,
-		       BRW_VERTICAL_STRIDE_0,
-		       BRW_WIDTH_8,
-		       BRW_HORIZONTAL_STRIDE_1,
-		       BRW_SWIZZLE_XYZW,
-		       WRITEMASK_XYZW);
-}
-
-static void brw_wm_write__mask(struct brw_compile *p,
-			       int dw,
+static void brw_wm_write__mask(struct brw_compile *p, int dw,
 			       int src, int mask)
 {
 	int n;
 
-	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
 	if (dw == 8 && p->gen >= 60) {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
 		brw_MUL(p,
 			brw_message_reg(2),
-			brw_vec8_grf(src, 0),
-			mask_a8(mask));
+			brw_vec8_grf(src+0, 0),
+			brw_vec8_grf(mask, 0));
+		brw_MUL(p,
+			brw_message_reg(3),
+			brw_vec8_grf(src+1, 0),
+			brw_vec8_grf(mask, 0));
 		brw_MUL(p,
 			brw_message_reg(4),
 			brw_vec8_grf(src+2, 0),
-			mask_a8(mask));
+			brw_vec8_grf(mask, 0));
+		brw_MUL(p,
+			brw_message_reg(5),
+			brw_vec8_grf(src+3, 0),
+			brw_vec8_grf(mask, 0));
+
 		goto done;
 	}
 
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
 	for (n = 0; n < 4; n++) {
 		if (p->gen >= 60) {
 			brw_MUL(p,
@@ -306,25 +323,36 @@ done:
 	brw_fb_write(p, dw);
 }
 
-static void brw_wm_write__mask_ca(struct brw_compile *p,
-				  int dw, int src, int mask)
+static void brw_wm_write__mask_ca(struct brw_compile *p, int dw,
+				  int src, int mask)
 {
 	int n;
 
-	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
-
 	if (dw == 8 && p->gen >= 60) {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
 		brw_MUL(p,
 			brw_message_reg(2),
-			brw_vec8_grf(src, 0),
-			brw_vec8_grf(mask, 0));
+			brw_vec8_grf(src  + 0, 0),
+			brw_vec8_grf(mask + 0, 0));
+		brw_MUL(p,
+			brw_message_reg(3),
+			brw_vec8_grf(src  + 1, 0),
+			brw_vec8_grf(mask + 1, 0));
 		brw_MUL(p,
 			brw_message_reg(4),
-			brw_vec8_grf(src + 2, 0),
+			brw_vec8_grf(src  + 2, 0),
 			brw_vec8_grf(mask + 2, 0));
+		brw_MUL(p,
+			brw_message_reg(5),
+			brw_vec8_grf(src  + 3, 0),
+			brw_vec8_grf(mask + 3, 0));
+
 		goto done;
 	}
 
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
 	for (n = 0; n < 4; n++) {
 		if (p->gen >= 60) {
 			brw_MUL(p,
@@ -357,56 +385,71 @@ done:
 	brw_fb_write(p, dw);
 }
 
-void
+bool
 brw_wm_kernel__affine(struct brw_compile *p, int dispatch)
 {
-	int src = 12;
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_affine(p, dispatch, 0, 1, src);
-	brw_wm_write(p, dispatch, src);
+	brw_wm_write(p, dispatch, brw_wm_affine(p, dispatch, 0, 1, 12));
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 20;
+	int src, mask;
+
+	if (dispatch == 8)
+		return false; /* XXX sampler alpha retuns all 0 */
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_affine(p, dispatch, 0, 1, src);
-	brw_wm_affine__alpha(p, dispatch, 1, 7, mask);
+
+	src = brw_wm_affine(p, dispatch, 0, 1, 12);
+	mask = brw_wm_affine__alpha(p, dispatch, 1, 6, 20);
 	brw_wm_write__mask(p, dispatch, src, mask);
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 20;
+	int src, mask;
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_affine(p, dispatch, 0, 1, src);
-	brw_wm_affine(p, dispatch, 1, 7, mask);
+
+	src = brw_wm_affine(p, dispatch, 0, 1, 12);
+	mask = brw_wm_affine(p, dispatch, 1, 6, 20);
 	brw_wm_write__mask_ca(p, dispatch, src, mask);
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 14;
+	int src, mask;
+
+	if (dispatch == 8)
+		return false; /* XXX sampler alpha retuns all 0 */
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_affine__alpha(p, dispatch, 0, 1, src);
-	brw_wm_affine(p, dispatch, 1, 7, mask);
+
+	src = brw_wm_affine__alpha(p, dispatch, 0, 1, 12);
+	mask = brw_wm_affine(p, dispatch, 1, 6, 16);
 	brw_wm_write__mask(p, dispatch, mask, src);
+
+	return true;
 }
 
 /* Projective variants */
 
-static void brw_wm_projective_st(struct brw_compile *p, int dw, int channel, int msg)
+static void brw_wm_projective_st(struct brw_compile *p, int dw,
+				 int channel, int msg)
 {
 	int uv;
 
@@ -480,63 +523,77 @@ static void brw_wm_projective_st(struct brw_compile *p, int dw, int channel, int
 	}
 }
 
-static void brw_wm_projective(struct brw_compile *p, int dw,
-			      int channel, int msg, int result)
+static int brw_wm_projective(struct brw_compile *p, int dw,
+			     int channel, int msg, int result)
 {
 	brw_wm_projective_st(p, dw, channel, msg);
-	brw_wm_sample(p, dw, channel, msg, result);
+	return brw_wm_sample(p, dw, channel, msg, result);
 }
 
-static void brw_wm_projective__alpha(struct brw_compile *p, int dw,
+static int brw_wm_projective__alpha(struct brw_compile *p, int dw,
 				     int channel, int msg, int result)
 {
 	brw_wm_projective_st(p, dw, channel, msg);
-	brw_wm_sample__alpha(p, dw, channel, msg, result);
+	return brw_wm_sample__alpha(p, dw, channel, msg, result);
 }
 
-void
+bool
 brw_wm_kernel__projective(struct brw_compile *p, int dispatch)
 {
-	int src = 12;
-
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_projective(p, dispatch, 0, 1, src);
-	brw_wm_write(p, dispatch, src);
+	brw_wm_write(p, dispatch, brw_wm_projective(p, dispatch, 0, 1, 12));
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 20;
+	int src, mask;
+
+	if (dispatch == 8)
+		return false; /* XXX sampler alpha retuns all 0 */
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_projective(p, dispatch, 0, 1, src);
-	brw_wm_projective__alpha(p, dispatch, 1, 7, mask);
+
+	src = brw_wm_projective(p, dispatch, 0, 1, 12);
+	mask = brw_wm_projective__alpha(p, dispatch, 1, 6, 20);
 	brw_wm_write__mask(p, dispatch, src, mask);
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 20;
+	int src, mask;
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_projective(p, dispatch, 0, 1, src);
-	brw_wm_projective(p, dispatch, 1,7, mask);
+
+	src = brw_wm_projective(p, dispatch, 0, 1, 12);
+	mask = brw_wm_projective(p, dispatch, 1, 6, 20);
 	brw_wm_write__mask_ca(p, dispatch, src, mask);
+
+	return true;
 }
 
-void
+bool
 brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch)
 {
-	int src = 12, mask = 14;
+	int src, mask;
+
+	if (dispatch == 8)
+		return false; /* XXX sampler alpha retuns all 0 */
 
 	if (p->gen < 60)
 		brw_wm_xy(p, dispatch);
-	brw_wm_projective__alpha(p, dispatch, 0, 1, src);
-	brw_wm_projective(p, dispatch, 1, 7, mask);
+
+	src = brw_wm_projective__alpha(p, dispatch, 0, 1, 12);
+	mask = brw_wm_projective(p, dispatch, 1, 6, 16);
 	brw_wm_write__mask(p, dispatch, mask, src);
+
+	return true;
 }
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 011b1b7..943c248 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -482,11 +482,11 @@ uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream,
 				    void *ptr);
 unsigned sna_static_stream_compile_sf(struct sna *sna,
 				      struct sna_static_stream *stream,
-				      void (*compile)(struct brw_compile *));
+				      bool (*compile)(struct brw_compile *));
 
 unsigned sna_static_stream_compile_wm(struct sna *sna,
 				      struct sna_static_stream *stream,
-				      void (*compile)(struct brw_compile *, int),
+				      bool (*compile)(struct brw_compile *, int),
 				      int width);
 struct kgem_bo *sna_static_stream_fini(struct sna *sna,
 				       struct sna_static_stream *stream);
diff --git a/src/sna/sna_stream.c b/src/sna/sna_stream.c
index 66a8c46..1a0a86b 100644
--- a/src/sna/sna_stream.c
+++ b/src/sna/sna_stream.c
@@ -97,7 +97,7 @@ struct kgem_bo *sna_static_stream_fini(struct sna *sna,
 unsigned
 sna_static_stream_compile_sf(struct sna *sna,
 			     struct sna_static_stream *stream,
-			     void (*compile)(struct brw_compile *))
+			     bool (*compile)(struct brw_compile *))
 {
 	struct brw_compile p;
 
@@ -105,7 +105,11 @@ sna_static_stream_compile_sf(struct sna *sna,
 			 sna_static_stream_map(stream,
 					       64*sizeof(uint32_t), 64));
 
-	compile(&p);
+	if (!compile(&p)) {
+		stream->used -= 64*sizeof(uint32_t);
+		return 0;
+	}
+
 	assert(p.nr_insn*sizeof(struct brw_instruction) <= 64*sizeof(uint32_t));
 
 	stream->used -= 64*sizeof(uint32_t) - p.nr_insn*sizeof(struct brw_instruction);
@@ -115,7 +119,7 @@ sna_static_stream_compile_sf(struct sna *sna,
 unsigned
 sna_static_stream_compile_wm(struct sna *sna,
 			     struct sna_static_stream *stream,
-			     void (*compile)(struct brw_compile *, int),
+			     bool (*compile)(struct brw_compile *, int),
 			     int dispatch_width)
 {
 	struct brw_compile p;
@@ -124,7 +128,11 @@ sna_static_stream_compile_wm(struct sna *sna,
 			 sna_static_stream_map(stream,
 					       256*sizeof(uint32_t), 64));
 
-	compile(&p, dispatch_width);
+	if (!compile(&p, dispatch_width)) {
+		stream->used -= 256*sizeof(uint32_t);
+		return 0;
+	}
+
 	assert(p.nr_insn*sizeof(struct brw_instruction) <= 256*sizeof(uint32_t));
 
 	stream->used -= 256*sizeof(uint32_t) - p.nr_insn*sizeof(struct brw_instruction);
commit 6a5ed88f9fab654c9c11c566b841d42150d26c5d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 30 17:08:20 2012 +0100

    sna/gen4: Tidy debugging code
    
    Cluster the ifdefs together in the initialisation code.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 64fd7df..d1dbf5a 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -55,7 +55,9 @@
 #define NO_COPY 0
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
+#define NO_FILL_ONE 0
 #define NO_FILL_BOXES 0
+#define NO_VIDEO 0
 
 #if FLUSH_EVERY_VERTEX
 #define FLUSH(OP) do { \
@@ -1206,6 +1208,7 @@ gen4_get_batch(struct sna *sna)
 static void
 gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
+	assert(op->floats_per_rect == 3*op->floats_per_vertex);
 	if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
 			gen4_vertex_finish(sna);
@@ -2271,17 +2274,6 @@ gen4_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen4_blend_op))
 		return false;
 
-#if NO_COMPOSITE
-	if (mask)
-		return false;
-
-	return sna_blt_composite(sna, op,
-				 src, dst,
-				 src_x, src_y,
-				 dst_x, dst_y,
-				 width, height, tmp);
-#endif
-
 	if (mask == NULL &&
 	    try_blt(sna, dst, src, width, height) &&
 	    sna_blt_composite(sna, op,
@@ -2819,17 +2811,6 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	DBG(("%s x %d\n", __FUNCTION__, n));
 
-#if NO_COPY_BOXES
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy_boxes(sna, alu,
-				  src_bo, src_dx, src_dy,
-				  dst_bo, dst_dx, dst_dy,
-				  dst->drawable.bitsPerPixel,
-				  box, n);
-#endif
-
 	if (prefer_blt_copy(sna, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
@@ -3007,16 +2988,6 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 	     dst->drawable.serialNumber,
 	     alu));
 
-#if NO_COPY
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy(sna, alu,
-			    src_bo, dst_bo,
-			    dst->drawable.bitsPerPixel,
-			    op);
-#endif
-
 	if (prefer_blt(sna) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
@@ -3194,18 +3165,15 @@ gen4_render_fill_boxes(struct sna *sna,
 						     dst, dst_bo, box, n);
 	}
 
-#if NO_FILL_BOXES
-	return false;
-#endif
-
-	if (op == PictOpClear)
+	if (op == PictOpClear) {
 		pixel = 0;
-	else if (!sna_get_pixel_from_rgba(&pixel,
-					  color->red,
-					  color->green,
-					  color->blue,
-					  color->alpha,
-					  PICT_a8r8g8b8))
+		op = PictOpSrc;
+	} else if (!sna_get_pixel_from_rgba(&pixel,
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    PICT_a8r8g8b8))
 		return false;
 
 	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
@@ -3295,13 +3263,6 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 		 uint32_t color,
 		 struct sna_fill_op *op)
 {
-#if NO_FILL
-	return sna_blt_fill(sna, alu,
-			    dst_bo, dst->drawable.bitsPerPixel,
-			    color,
-			    op);
-#endif
-
 	if (prefer_blt(sna) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
@@ -3392,11 +3353,6 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	DBG(("%s: color=%08x\n", __FUNCTION__, color));
 
-#if NO_FILL_ONE
-	return gen4_render_fill_one_try_blt(sna, dst, bo, color,
-					    x1, y1, x2, y2, alu);
-#endif
-
 	if (gen4_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
 		return true;
@@ -3755,19 +3711,34 @@ bool gen4_render_init(struct sna *sna)
 	sna->kgem.retire = gen4_render_retire;
 	sna->kgem.expire = gen4_render_expire;
 
+#if !NO_COMPOSITE
 	sna->render.composite = gen4_render_composite;
+#endif
 #if !NO_COMPOSITE_SPANS
 	sna->render.check_composite_spans = gen4_check_composite_spans;
 	sna->render.composite_spans = gen4_render_composite_spans;
 #endif
+
+#if !NO_VIDEO
 	sna->render.video = gen4_render_video;
+#endif
 
+#if !NO_COPY_BOXES
 	sna->render.copy_boxes = gen4_render_copy_boxes;
+#endif
+#if !NO_COPY
 	sna->render.copy = gen4_render_copy;
+#endif
 
+#if !NO_FILL_BOXES
 	sna->render.fill_boxes = gen4_render_fill_boxes;
+#endif
+#if !NO_FILL
 	sna->render.fill = gen4_render_fill;
+#endif
+#if !NO_FILL_ONE
 	sna->render.fill_one = gen4_render_fill_one;
+#endif
 
 	sna->render.flush = gen4_render_flush;
 	sna->render.reset = gen4_render_reset;
commit 46ec9b0ed55d0fcade40f92206e59c02e402d870
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 31 17:41:34 2012 +0100

    sna: Update DPMS mode on CRTC after forcing the outputs on
    
    If we forcibly update the outputs to be on, then the core will not issue
    its on DPMS event and we miss out on updating the CRTC bookkeeping in
    sna_crtc_dpms(). So we need to update the flag on the CRTC as we
    manipulate the outputs during modesetting.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52142
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a908596..7754efa 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -499,6 +499,8 @@ sna_crtc_force_outputs_on(xf86CrtcPtr crtc)
 
 		output->funcs->dpms(output, DPMSModeOn);
 	}
+
+	to_sna_crtc(crtc)->dpms_mode = DPMSModeOn;
 }
 
 static bool
@@ -663,14 +665,21 @@ static void update_flush_interval(struct sna *sna)
 	int i, max_vrefresh = 0;
 
 	for (i = 0; i < xf86_config->num_crtc; i++) {
-		if (!xf86_config->crtc[i]->enabled)
+		xf86CrtcPtr crtc = xf86_config->crtc[i];
+
+		if (!crtc->enabled) {
+			DBG(("%s: CRTC:%d (pipe %d) disabled\n",
+			     __FUNCTION__,i, to_sna_crtc(crtc)->pipe));
 			continue;
+		}
 
-		if (to_sna_crtc(xf86_config->crtc[i])->dpms_mode != DPMSModeOn)
+		if (to_sna_crtc(crtc)->dpms_mode != DPMSModeOn) {
+			DBG(("%s: CRTC:%d (pipe %d) turned off\n",
+			     __FUNCTION__,i, to_sna_crtc(crtc)->pipe));
 			continue;
+		}
 
-		max_vrefresh = max(max_vrefresh,
-				   xf86ModeVRefresh(&xf86_config->crtc[i]->mode));
+		max_vrefresh = max(max_vrefresh, xf86ModeVRefresh(&crtc->mode));
 	}
 
 	if (max_vrefresh == 0)
commit 8f166d26b8a93592939068c5a8d160981c724cfd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 31 11:58:24 2012 +0100

    sna: Be more careful with damage reduction during CompositeRectangles
    
    We actually need to force DAMAGE_ALL in case we are promoting the GPU
    pixmap.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d2fd298..a4287f7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -314,9 +314,12 @@ static void assert_pixmap_damage(PixmapPtr p)
 	if (priv == NULL)
 		return;
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage) && DAMAGE_IS_ALL(priv->cpu_damage))
+	if (DAMAGE_IS_ALL(priv->gpu_damage) && DAMAGE_IS_ALL(priv->cpu_damage)) {
 		/* special upload buffer */
+		assert(priv->gpu_bo && priv->gpu_bo->proxy);
+		assert(priv->cpu_bo == NULL);
 		return;
+	}
 
 	assert(!DAMAGE_IS_ALL(priv->gpu_damage) || priv->cpu_damage == NULL);
 	assert(!DAMAGE_IS_ALL(priv->cpu_damage) || priv->gpu_damage == NULL);
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 36287c6..0ca66c7 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -855,25 +855,34 @@ sna_composite_rectangles(CARD8		 op,
 	/* Clearing a pixmap after creation is a common operation, so take
 	 * advantage and reduce further damage operations.
 	 */
-	if (op <= PictOpSrc &&
-	    region_subsumes_drawable(&region, &pixmap->drawable)) {
-		bool ok;
-
-		assert(DAMAGE_IS_ALL(priv->gpu_damage));
-
-		priv->undamaged = false;
-		priv->clear_color = 0;
-		ok = true;
-		if (op == PictOpSrc)
-			ok = sna_get_pixel_from_rgba(&priv->clear_color,
-						     color->red,
-						     color->green,
-						     color->blue,
-						     color->alpha,
-						     dst->format);
-		priv->clear = ok;
-		DBG(("%s: marking clear [%08x]? %d\n",
-		     __FUNCTION__, priv->clear_color, ok));
+	if (region_subsumes_drawable(&region, &pixmap->drawable)) {
+		if (damage) {
+			sna_damage_all(damage,
+				       pixmap->drawable.width,
+				       pixmap->drawable.height);
+			sna_damage_destroy(damage == &priv->gpu_damage ?
+					   &priv->cpu_damage : &priv->gpu_damage);
+			priv->undamaged = false;
+		}
+
+		if (op <= PictOpSrc && bo == priv->gpu_bo) {
+			bool ok;
+
+			assert(DAMAGE_IS_ALL(priv->gpu_damage));
+
+			priv->clear_color = 0;
+			ok = true;
+			if (op == PictOpSrc)
+				ok = sna_get_pixel_from_rgba(&priv->clear_color,
+							     color->red,
+							     color->green,
+							     color->blue,
+							     color->alpha,
+							     dst->format);
+			priv->clear = ok;
+			DBG(("%s: marking clear [%08x]? %d\n",
+			     __FUNCTION__, priv->clear_color, ok));
+		}
 	}
 	goto done;
 
commit e6cb5d93eaa01e7f4763f797bba341f3cc481d98
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 30 11:14:58 2012 +0100

    sna: Avoid overlapping gpu/cpu damage with IGNORE_CPU
    
    We cannot simply ignore the presence of CPU damage with IGNORE_CPU but
    must remember to discard it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7a434cb..d2fd298 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2333,7 +2333,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 		flags |= PREFER_GPU;
 	if (priv->shm)
 		flags = 0;
-	if (priv->cpu && (flags & (IGNORE_CPU | FORCE_GPU)) == 0)
+	if (priv->cpu && (flags & FORCE_GPU) == 0)
 		flags = 0;
 
 	if (!flags && (!priv->gpu_damage || !kgem_bo_is_busy(priv->gpu_bo)))
@@ -2395,7 +2395,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	     region.extents.x2, region.extents.y2));
 
 	if (priv->gpu_damage) {
-		if (flags & IGNORE_CPU || !priv->cpu_damage) {
+		if (!priv->cpu_damage) {
 			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
 							       &region.extents)) {
 				DBG(("%s: region wholly contained within GPU damage\n",
@@ -2444,7 +2444,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 
 move_to_gpu:
 	if (!sna_pixmap_move_area_to_gpu(pixmap, &region.extents,
-					 MOVE_READ | MOVE_WRITE)) {
+					 flags & IGNORE_CPU ? MOVE_WRITE : MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: failed to move-to-gpu, fallback\n", __FUNCTION__));
 		assert(priv->gpu_bo == NULL);
 		goto use_cpu_bo;
@@ -11400,17 +11400,18 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 				sna_damage_destroy(&priv->cpu_damage);
 				list_del(&priv->list);
 			}
+			hint |= IGNORE_CPU;
 		}
-		if (region_subsumes_drawable(&region, &pixmap->drawable) ||
-		    box_inplace(pixmap, &region.extents)) {
+		if (priv->cpu_damage == NULL &&
+		    (region_subsumes_drawable(&region, &pixmap->drawable) ||
+		     box_inplace(pixmap, &region.extents))) {
 			DBG(("%s: promoting to full GPU\n", __FUNCTION__));
-			if (priv->gpu_bo && priv->cpu_damage == NULL) {
+			if (priv->gpu_bo) {
 				sna_damage_all(&priv->gpu_damage,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height);
 				priv->undamaged = false;
 			}
-			hint |= IGNORE_CPU;
 		}
 		if (priv->cpu_damage == NULL) {
 			DBG(("%s: dropping last-cpu hint\n", __FUNCTION__));
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index b36e2ee..36287c6 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -832,7 +832,8 @@ sna_composite_rectangles(CARD8		 op,
 			priv->cpu = false;
 		}
 
-		hint |= IGNORE_CPU;
+		if (region.data == NULL)
+			hint |= IGNORE_CPU;
 	}
 
 	bo = sna_drawable_use_bo(&pixmap->drawable, hint,
commit c9805ba98775bb1e969ff59c7044fe1a49673ca8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 18:19:37 2012 +0100

    sna: Export sna_drawable_use_bo() to select target for FillRectangles
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index b8099c9..7295bef 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -460,6 +460,65 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 
 struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 
+#define PREFER_GPU	0x1
+#define FORCE_GPU	0x2
+#define IGNORE_CPU	0x4
+struct kgem_bo *
+sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
+		    struct sna_damage ***damage);
+
+static inline bool
+box_inplace(PixmapPtr pixmap, const BoxRec *box)
+{
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	return ((int)(box->x2 - box->x1) * (int)(box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 12) >= sna->kgem.half_cpu_cache_pages;
+}
+
+static inline bool
+region_subsumes_drawable(RegionPtr region, DrawablePtr drawable)
+{
+	const BoxRec *extents;
+
+	if (region->data)
+		return false;
+
+	extents = RegionExtents(region);
+	return  extents->x1 <= 0 && extents->y1 <= 0 &&
+		extents->x2 >= drawable->width &&
+		extents->y2 >= drawable->height;
+}
+
+static inline bool
+region_subsumes_damage(const RegionRec *region, struct sna_damage *damage)
+{
+	const BoxRec *re, *de;
+
+	DBG(("%s?\n", __FUNCTION__));
+	assert(damage);
+
+	re = &region->extents;
+	de = &DAMAGE_PTR(damage)->extents;
+	DBG(("%s: region (%d, %d), (%d, %d), damage (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     re->x1, re->y1, re->x2, re->y2,
+	     de->x1, de->y1, de->x2, de->y2));
+
+	if (re->x2 < de->x2 || re->x1 > de->x1 ||
+	    re->y2 < de->y2 || re->y1 > de->y1) {
+		DBG(("%s: not contained\n", __FUNCTION__));
+		return false;
+	}
+
+	if (region->data == NULL) {
+		DBG(("%s: singular region contains damage\n", __FUNCTION__));
+		return true;
+	}
+
+	return pixman_region_contains_rectangle((RegionPtr)region,
+						(BoxPtr)de) == PIXMAN_REGION_IN;
+}
+
+
 static inline bool
 sna_drawable_is_clear(DrawablePtr d)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f5cfd29..7a434cb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1396,50 +1396,6 @@ done:
 }
 
 static bool
-region_subsumes_drawable(RegionPtr region, DrawablePtr drawable)
-{
-	const BoxRec *extents;
-
-	if (region->data)
-		return false;
-
-	extents = RegionExtents(region);
-	return  extents->x1 <= 0 && extents->y1 <= 0 &&
-		extents->x2 >= drawable->width &&
-		extents->y2 >= drawable->height;
-}
-
-static bool
-region_subsumes_damage(const RegionRec *region, struct sna_damage *damage)
-{
-	const BoxRec *re, *de;
-
-	DBG(("%s?\n", __FUNCTION__));
-	assert(damage);
-
-	re = &region->extents;
-	de = &DAMAGE_PTR(damage)->extents;
-	DBG(("%s: region (%d, %d), (%d, %d), damage (%d, %d), (%d, %d)\n",
-	     __FUNCTION__,
-	     re->x1, re->y1, re->x2, re->y2,
-	     de->x1, de->y1, de->x2, de->y2));
-
-	if (re->x2 < de->x2 || re->x1 > de->x1 ||
-	    re->y2 < de->y2 || re->y1 > de->y1) {
-		DBG(("%s: not contained\n", __FUNCTION__));
-		return false;
-	}
-
-	if (region->data == NULL) {
-		DBG(("%s: singular region contains damage\n", __FUNCTION__));
-		return true;
-	}
-
-	return pixman_region_contains_rectangle((RegionPtr)region,
-						(BoxPtr)de) == PIXMAN_REGION_IN;
-}
-
-static bool
 region_overlaps_damage(const RegionRec *region,
 		       struct sna_damage *damage,
 		       int dx, int dy)
@@ -2134,13 +2090,6 @@ drawable_gc_flags(DrawablePtr draw, GCPtr gc, bool partial)
 	return (partial ? MOVE_READ : 0) | MOVE_WRITE | MOVE_INPLACE_HINT;
 }
 
-static inline bool
-box_inplace(PixmapPtr pixmap, const BoxRec *box)
-{
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	return ((int)(box->x2 - box->x1) * (int)(box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 12) >= sna->kgem.half_cpu_cache_pages;
-}
-
 static inline struct sna_pixmap *
 sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 {
@@ -2349,11 +2298,7 @@ done:
 	return sna_pixmap_mark_active(sna, priv) != NULL;
 }
 
-#define PREFER_GPU	0x1
-#define FORCE_GPU	0x2
-#define IGNORE_CPU	0x4
-
-static inline struct kgem_bo *
+struct kgem_bo *
 sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 		    struct sna_damage ***damage)
 {
@@ -2391,7 +2336,7 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 	if (priv->cpu && (flags & (IGNORE_CPU | FORCE_GPU)) == 0)
 		flags = 0;
 
-	if (!flags && (!priv->gpu_bo || !kgem_bo_is_busy(priv->gpu_bo)))
+	if (!flags && (!priv->gpu_damage || !kgem_bo_is_busy(priv->gpu_bo)))
 		goto use_cpu_bo;
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
@@ -11467,8 +11412,10 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 			}
 			hint |= IGNORE_CPU;
 		}
-		if (priv->cpu_damage == NULL)
+		if (priv->cpu_damage == NULL) {
+			DBG(("%s: dropping last-cpu hint\n", __FUNCTION__));
 			priv->cpu = false;
+		}
 	}
 
 	/* If the source is already on the GPU, keep the operation on the GPU */
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 60179c4..b36e2ee 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -667,11 +667,13 @@ sna_composite_rectangles(CARD8		 op,
 	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
 	PixmapPtr pixmap;
 	struct sna_pixmap *priv;
+	struct kgem_bo *bo;
+	struct sna_damage **damage;
 	pixman_region16_t region;
 	pixman_box16_t *boxes;
 	int16_t dst_x, dst_y;
-	int num_boxes;
-	int error;
+	int num_boxes, error;
+	unsigned hint;
 
 	DBG(("%s(op=%d, %08x x %d [(%d, %d)x(%d, %d) ...])\n",
 	     __FUNCTION__, op,
@@ -782,6 +784,7 @@ sna_composite_rectangles(CARD8		 op,
 	     __FUNCTION__, dst_x, dst_y,
 	     RegionExtents(&region)->x1, RegionExtents(&region)->y1,
 	     RegionExtents(&region)->x2, RegionExtents(&region)->y2));
+	assert_pixmap_contains_box(pixmap, RegionExtents(&region));
 
 	if (NO_COMPOSITE_RECTANGLES)
 		goto fallback;
@@ -794,71 +797,83 @@ sna_composite_rectangles(CARD8		 op,
 		goto fallback;
 	}
 
-	boxes = pixman_region_rectangles(&region, &num_boxes);
-
 	priv = sna_pixmap(pixmap);
-	if (priv == NULL) {
-		DBG(("%s: fallback, not attached\n", __FUNCTION__));
+	if (priv == NULL || too_small(priv)) {
+		DBG(("%s: fallback, too small or not attached\n", __FUNCTION__));
 		goto fallback;
 	}
 
-	if (use_cpu(pixmap, priv, op,
-		    region.extents.x2 - region.extents.x1,
-		    region.extents.y2 - region.extents.y1)) {
-		DBG(("%s: fallback, dst is too small (or completely damaged)\n", __FUNCTION__));
-		goto fallback;
-	}
+	boxes = pixman_region_rectangles(&region, &num_boxes);
 
 	/* If we going to be overwriting any CPU damage with a subsequent
 	 * operation, then we may as well delete it without moving it
 	 * first to the GPU.
 	 */
-	if (op <= PictOpSrc)
-		sna_damage_subtract(&priv->cpu_damage, &region);
+	hint = PREFER_GPU;
+	if (op <= PictOpSrc) {
+		if (priv->cpu_damage &&
+		    region_subsumes_damage(&region, priv->cpu_damage)) {
+			DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
+			sna_damage_destroy(&priv->cpu_damage);
+			list_del(&priv->list);
+		}
+		if (region_subsumes_drawable(&region, &pixmap->drawable) ||
+		    box_inplace(pixmap, &region.extents)) {
+			DBG(("%s: promoting to full GPU\n", __FUNCTION__));
+			if (priv->gpu_bo && priv->cpu_damage == NULL) {
+				sna_damage_all(&priv->gpu_damage,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height);
+				priv->undamaged = false;
+			}
+		}
+		if (priv->cpu_damage == NULL) {
+			DBG(("%s: dropping last-cpu hint\n", __FUNCTION__));
+			priv->cpu = false;
+		}
 
-	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
-	if (priv == NULL) {
+		hint |= IGNORE_CPU;
+	}
+
+	bo = sna_drawable_use_bo(&pixmap->drawable, hint,
+				 &region.extents, &damage);
+	if (bo == NULL) {
 		DBG(("%s: fallback due to no GPU bo\n", __FUNCTION__));
 		goto fallback;
 	}
 
 	if (!sna->render.fill_boxes(sna, op, dst->format, color,
-				    pixmap, priv->gpu_bo,
-				    boxes, num_boxes)) {
+				    pixmap, bo, boxes, num_boxes)) {
 		DBG(("%s: fallback - acceleration failed\n", __FUNCTION__));
 		goto fallback;
 	}
 
+	if (damage)
+		sna_damage_add(damage, &region);
+
 	/* Clearing a pixmap after creation is a common operation, so take
 	 * advantage and reduce further damage operations.
 	 */
-	if (region.data == NULL &&
-	    region.extents.x2 - region.extents.x1 == pixmap->drawable.width &&
-	    region.extents.y2 - region.extents.y1 == pixmap->drawable.height) {
-		sna_damage_all(&priv->gpu_damage,
-			       pixmap->drawable.width, pixmap->drawable.height);
+	if (op <= PictOpSrc &&
+	    region_subsumes_drawable(&region, &pixmap->drawable)) {
+		bool ok;
+
+		assert(DAMAGE_IS_ALL(priv->gpu_damage));
+
 		priv->undamaged = false;
-		if (op <= PictOpSrc) {
-			bool ok = true;
-
-			priv->clear_color = 0;
-			if (op == PictOpSrc)
-				ok = sna_get_pixel_from_rgba(&priv->clear_color,
-							     color->red,
-							     color->green,
-							     color->blue,
-							     color->alpha,
-							     dst->format);
-			priv->clear = ok;
-			DBG(("%s: marking clear [%08x]? %d\n",
-			     __FUNCTION__, priv->clear_color, ok));
-		}
-	}
-	if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
-		assert_pixmap_contains_box(pixmap, RegionExtents(&region));
-		sna_damage_add(&priv->gpu_damage, &region);
+		priv->clear_color = 0;
+		ok = true;
+		if (op == PictOpSrc)
+			ok = sna_get_pixel_from_rgba(&priv->clear_color,
+						     color->red,
+						     color->green,
+						     color->blue,
+						     color->alpha,
+						     dst->format);
+		priv->clear = ok;
+		DBG(("%s: marking clear [%08x]? %d\n",
+		     __FUNCTION__, priv->clear_color, ok));
 	}
-
 	goto done;
 
 fallback:
commit 89e75dbcb6749bde7587ecc08abed276c255e7f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 15:36:44 2012 +0100

    sna/gen7: Prefer the BLT for self-copies
    
    If we are copying to ourselves, we have to regularly flush the render
    cache at which point the RENDER pipeline is slower than the BLT
    pipeline.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index aba5811..29ee4af 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2437,6 +2437,13 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return true;
+
+		if (dst->pDrawable == src->pDrawable)
+			return true;
+
+		if (src->pDrawable &&
+		    get_drawable_pixmap(dst->pDrawable) == get_drawable_pixmap(src->pDrawable))
+			return true;
 	}
 
 	return false;
@@ -3315,7 +3322,8 @@ static inline bool prefer_blt_copy(struct sna *sna,
 }
 
 static inline bool
-overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+overlaps(struct sna *sna,
+	 struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	 struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 	 const BoxRec *box, int n)
 {
@@ -3324,6 +3332,9 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 	if (src_bo != dst_bo)
 		return false;
 
+	if (can_switch_rings(sna))
+		return true;
+
 	extents = box[0];
 	while (--n) {
 		box++;
@@ -3356,7 +3367,8 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
-	     overlaps(src_bo, src_dx, src_dy,
+	     overlaps(sna,
+		      src_bo, src_dx, src_dy,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
@@ -3395,7 +3407,8 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	if (!(alu == GXcopy || alu == GXclear) ||
-	    overlaps(src_bo, src_dx, src_dy,
+	    overlaps(sna,
+		     src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
 		     box, n)) {
 fallback_blt:
commit 33d6afda6cec124494f49b74152768da8a3fbdb5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 09:51:16 2012 +0100

    sna/gen7: Compile basic kernels at runtime

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ded22d5..aba5811 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -40,6 +40,7 @@
 #include "sna_render_inline.h"
 #include "sna_video.h"
 
+#include "brw/brw.h"
 #include "gen7_render.h"
 
 #define NO_COMPOSITE 0
@@ -53,6 +54,14 @@
 
 #define NO_RING_SWITCH 0
 
+#define USE_8_PIXEL_DISPATCH 0
+#define USE_16_PIXEL_DISPATCH 1
+#define USE_32_PIXEL_DISPATCH 0
+
+#if !USE_8_PIXEL_DISPATCH && !USE_16_PIXEL_DISPATCH && !USE_32_PIXEL_DISPATCH
+#error "Must select at least 8, 16 or 32 pixel dispatch"
+#endif
+
 #define GEN7_MAX_SIZE 16384
 
 /* XXX Todo
@@ -88,72 +97,6 @@ static const struct gt_info gt2_info = {
 	.urb = { 256, 704, 320 },
 };
 
-static const uint32_t ps_kernel_nomask_affine[][4] = {
-#include "exa_wm_src_affine.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_nomask_projective[][4] = {
-#include "exa_wm_src_projective.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_maskca_affine[][4] = {
-#include "exa_wm_src_affine.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_mask_affine.g7b"
-#include "exa_wm_mask_sample_argb.g7b"
-#include "exa_wm_ca.g6b" //#include "exa_wm_ca.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_maskca_projective[][4] = {
-#include "exa_wm_src_projective.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_mask_projective.g7b"
-#include "exa_wm_mask_sample_argb.g7b"
-#include "exa_wm_ca.g6b" //#include "exa_wm_ca.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
-#include "exa_wm_src_affine.g7b"
-#include "exa_wm_src_sample_a.g7b"
-#include "exa_wm_mask_affine.g7b"
-#include "exa_wm_mask_sample_argb.g7b"
-#include "exa_wm_ca_srcalpha.g6b" //#include "exa_wm_ca_srcalpha.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
-#include "exa_wm_src_projective.g7b"
-#include "exa_wm_src_sample_a.g7b"
-#include "exa_wm_mask_projective.g7b"
-#include "exa_wm_mask_sample_argb.g7b"
-#include "exa_wm_ca_srcalpha.g6b" //#include "exa_wm_ca_srcalpha.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_masknoca_affine[][4] = {
-#include "exa_wm_src_affine.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_mask_affine.g7b"
-#include "exa_wm_mask_sample_a.g7b"
-#include "exa_wm_noca.g6b"// #include "exa_wm_noca.g7b"
-#include "exa_wm_write.g7b"
-};
-
-static const uint32_t ps_kernel_masknoca_projective[][4] = {
-#include "exa_wm_src_projective.g7b"
-#include "exa_wm_src_sample_argb.g7b"
-#include "exa_wm_mask_projective.g7b"
-#include "exa_wm_mask_sample_a.g7b"
-#include "exa_wm_noca.g6b" //#include "exa_wm_noca.g7b"
-#include "exa_wm_write.g7b"
-};
-
 static const uint32_t ps_kernel_packed[][4] = {
 #include "exa_wm_src_affine.g7b"
 #include "exa_wm_src_sample_argb.g7b"
@@ -170,23 +113,25 @@ static const uint32_t ps_kernel_planar[][4] = {
 
 #define KERNEL(kernel_enum, kernel, num_surfaces) \
     [GEN7_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), num_surfaces}
+#define NOKERNEL(kernel_enum, func, num_surfaces) \
+    [GEN7_WM_KERNEL_##kernel_enum] = {#kernel_enum, (void *)func, 0, num_surfaces}
 static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
 	int num_surfaces;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, 2),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, 2),
+	NOKERNEL(NOMASK, brw_wm_kernel__affine, 2),
+	NOKERNEL(NOMASK_P, brw_wm_kernel__projective, 2),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, 3),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, 3),
+	NOKERNEL(MASK, brw_wm_kernel__affine_mask, 3),
+	NOKERNEL(MASK_P, brw_wm_kernel__projective_mask, 3),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, 3),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3),
+	NOKERNEL(MASKCA, brw_wm_kernel__affine_mask_ca, 3),
+	NOKERNEL(MASKCA_P, brw_wm_kernel__projective_mask_ca, 3),
 
-	KERNEL(MASKSA, ps_kernel_maskca_srcalpha_affine, 3),
-	KERNEL(MASKSA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3),
+	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3),
+	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3),
 
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2),
@@ -818,27 +763,35 @@ gen7_emit_sf(struct sna *sna, bool has_mask)
 static void
 gen7_emit_wm(struct sna *sna, int kernel)
 {
+	const uint32_t *kernels;
+
 	if (sna->render_state.gen7.kernel == kernel)
 		return;
 
 	sna->render_state.gen7.kernel = kernel;
+	kernels = sna->render_state.gen7.wm_kernel[kernel];
 
-	DBG(("%s: switching to %s, num_surfaces=%d\n",
+	DBG(("%s: switching to %s, num_surfaces=%d (8-wide? %d, 16-wide? %d, 32-wide? %d)\n",
 	     __FUNCTION__,
 	     wm_kernels[kernel].name,
-	     wm_kernels[kernel].num_surfaces));
+	     wm_kernels[kernel].num_surfaces,
+	     kernels[0], kernels[1], kernels[2]));
 
 	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
-	OUT_BATCH(sna->render_state.gen7.wm_kernel[kernel]);
+	OUT_BATCH(kernels[0] ?: kernels[1] ?: kernels[2]);
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
 		  wm_kernels[kernel].num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
 	OUT_BATCH(sna->render_state.gen7.info->max_wm_threads |
-		  GEN7_PS_ATTRIBUTE_ENABLE |
-		  GEN7_PS_16_DISPATCH_ENABLE);
-	OUT_BATCH(6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
-	OUT_BATCH(0); /* kernel 1 */
-	OUT_BATCH(0); /* kernel 2 */
+		  (kernels[0] ? GEN7_PS_8_DISPATCH_ENABLE : 0) |
+		  (kernels[1] ? GEN7_PS_16_DISPATCH_ENABLE : 0) |
+		  (kernels[2] ? GEN7_PS_32_DISPATCH_ENABLE : 0) |
+		  GEN7_PS_ATTRIBUTE_ENABLE);
+	OUT_BATCH((kernels[0] ? 4 : kernels[1] ? 6 : 8) << GEN7_PS_DISPATCH_START_GRF_SHIFT_0 |
+		  8 << GEN7_PS_DISPATCH_START_GRF_SHIFT_1 |
+		  6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_2);
+	OUT_BATCH(kernels[2]);
+	OUT_BATCH(kernels[1]);
 }
 
 static bool
@@ -4285,12 +4238,34 @@ static bool gen7_render_setup(struct sna *sna)
 	 */
 	null_create(&general);
 
-	for (m = 0; m < GEN7_WM_KERNEL_COUNT; m++)
-		state->wm_kernel[m] =
-			sna_static_stream_add(&general,
-					       wm_kernels[m].data,
-					       wm_kernels[m].size,
-					       64);
+	for (m = 0; m < GEN7_WM_KERNEL_COUNT; m++) {
+		if (wm_kernels[m].size) {
+			state->wm_kernel[m][1] =
+				sna_static_stream_add(&general,
+						      wm_kernels[m].data,
+						      wm_kernels[m].size,
+						      64);
+		} else {
+			if (USE_8_PIXEL_DISPATCH) {
+				state->wm_kernel[m][0] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 8);
+			}
+
+			if (USE_16_PIXEL_DISPATCH) {
+				state->wm_kernel[m][1] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 16);
+			}
+
+			if (USE_32_PIXEL_DISPATCH) {
+				state->wm_kernel[m][2] =
+					sna_static_stream_compile_wm(sna, &general,
+								     wm_kernels[m].data, 32);
+			}
+		}
+		assert(state->wm_kernel[m][1]);
+	}
 
 	ss = sna_static_stream_map(&general,
 				   2 * sizeof(*ss) *
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index f4fabad..011b1b7 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -421,16 +421,16 @@ struct gen6_render_state {
 
 enum {
 	GEN7_WM_KERNEL_NOMASK = 0,
-	GEN7_WM_KERNEL_NOMASK_PROJECTIVE,
+	GEN7_WM_KERNEL_NOMASK_P,
 
 	GEN7_WM_KERNEL_MASK,
-	GEN7_WM_KERNEL_MASK_PROJECTIVE,
+	GEN7_WM_KERNEL_MASK_P,
 
 	GEN7_WM_KERNEL_MASKCA,
-	GEN7_WM_KERNEL_MASKCA_PROJECTIVE,
+	GEN7_WM_KERNEL_MASKCA_P,
 
 	GEN7_WM_KERNEL_MASKSA,
-	GEN7_WM_KERNEL_MASKSA_PROJECTIVE,
+	GEN7_WM_KERNEL_MASKSA_P,
 
 	GEN7_WM_KERNEL_VIDEO_PLANAR,
 	GEN7_WM_KERNEL_VIDEO_PACKED,
@@ -445,7 +445,7 @@ struct gen7_render_state {
 	uint32_t sf_state;
 	uint32_t sf_mask_state;
 	uint32_t wm_state;
-	uint32_t wm_kernel[GEN7_WM_KERNEL_COUNT];
+	uint32_t wm_kernel[GEN7_WM_KERNEL_COUNT][3];
 
 	uint32_t cc_vp;
 	uint32_t cc_blend;
commit eba8d3b3e14a5a16cea6cb8a89f12d3feb8f3d99
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 09:51:16 2012 +0100

    sna/gen6: Compile basic kernels at runtime

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 5b64efa..e3a103c 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -40,6 +40,7 @@
 #include "sna_render_inline.h"
 #include "sna_video.h"
 
+#include "brw/brw.h"
 #include "gen6_render.h"
 
 #define NO_COMPOSITE 0
@@ -81,72 +82,6 @@ static const struct gt_info gt2_info = {
 	.urb = { 64, 256, 256 },
 };
 
-static const uint32_t ps_kernel_nomask_affine[][4] = {
-#include "exa_wm_src_affine.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_nomask_projective[][4] = {
-#include "exa_wm_src_projective.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_maskca_affine[][4] = {
-#include "exa_wm_src_affine.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_mask_affine.g6b"
-#include "exa_wm_mask_sample_argb.g6b"
-#include "exa_wm_ca.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_maskca_projective[][4] = {
-#include "exa_wm_src_projective.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_mask_projective.g6b"
-#include "exa_wm_mask_sample_argb.g6b"
-#include "exa_wm_ca.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
-#include "exa_wm_src_affine.g6b"
-#include "exa_wm_src_sample_a.g6b"
-#include "exa_wm_mask_affine.g6b"
-#include "exa_wm_mask_sample_argb.g6b"
-#include "exa_wm_ca_srcalpha.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
-#include "exa_wm_src_projective.g6b"
-#include "exa_wm_src_sample_a.g6b"
-#include "exa_wm_mask_projective.g6b"
-#include "exa_wm_mask_sample_argb.g6b"
-#include "exa_wm_ca_srcalpha.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_masknoca_affine[][4] = {
-#include "exa_wm_src_affine.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_mask_affine.g6b"
-#include "exa_wm_mask_sample_a.g6b"
-#include "exa_wm_noca.g6b"
-#include "exa_wm_write.g6b"
-};
-
-static const uint32_t ps_kernel_masknoca_projective[][4] = {
-#include "exa_wm_src_projective.g6b"
-#include "exa_wm_src_sample_argb.g6b"
-#include "exa_wm_mask_projective.g6b"
-#include "exa_wm_mask_sample_a.g6b"
-#include "exa_wm_noca.g6b"
-#include "exa_wm_write.g6b"
-};
-
 static const uint32_t ps_kernel_packed[][4] = {
 #include "exa_wm_src_affine.g6b"
 #include "exa_wm_src_sample_argb.g6b"
@@ -161,8 +96,11 @@ static const uint32_t ps_kernel_planar[][4] = {
 #include "exa_wm_write.g6b"
 };
 
+#define NOKERNEL(kernel_enum, func, ns, ni) \
+    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, func, 0, ns, ni}
 #define KERNEL(kernel_enum, kernel, ns, ni) \
     [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), ns, ni}
+
 static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
@@ -170,17 +108,17 @@ static const struct wm_kernel_info {
 	unsigned int num_surfaces;
 	unsigned int num_inputs;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, 2, 1),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, 2, 1),
+	NOKERNEL(NOMASK, brw_wm_kernel__affine, 2, 1),
+	NOKERNEL(NOMASK_P, brw_wm_kernel__projective, 2, 1),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, 3, 2),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, 3, 2),
+	NOKERNEL(MASK, brw_wm_kernel__affine_mask, 3, 2),
+	NOKERNEL(MASK_P, brw_wm_kernel__projective_mask, 3, 2),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, 3, 2),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3, 2),
+	NOKERNEL(MASKCA, brw_wm_kernel__affine_mask_ca, 3, 2),
+	NOKERNEL(MASKCA_P, brw_wm_kernel__projective_mask_ca, 3, 2),
 
-	KERNEL(MASKSA, ps_kernel_maskca_srcalpha_affine, 3, 2),
-	KERNEL(MASKSA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3, 2),
+	NOKERNEL(MASKSA, brw_wm_kernel__affine_mask_sa, 3, 2),
+	NOKERNEL(MASKSA_P, brw_wm_kernel__projective_mask_sa, 3, 2),
 
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7, 1),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2, 1),
@@ -4216,12 +4154,21 @@ static bool gen6_render_setup(struct sna *sna)
 	null_create(&general);
 	scratch_create(&general);
 
-	for (m = 0; m < GEN6_KERNEL_COUNT; m++)
-		state->wm_kernel[m] =
-			sna_static_stream_add(&general,
-					       wm_kernels[m].data,
-					       wm_kernels[m].size,
-					       64);
+	for (m = 0; m < GEN6_KERNEL_COUNT; m++) {
+		if (wm_kernels[m].size) {
+			state->wm_kernel[m] =
+				sna_static_stream_add(&general,
+						      wm_kernels[m].data,
+						      wm_kernels[m].size,
+						      64);
+		} else {
+			state->wm_kernel[m] =
+				sna_static_stream_compile_wm(sna, &general,
+							     wm_kernels[m].data,
+							     16);
+		}
+		assert(state->wm_kernel[m]);
+	}
 
 	ss = sna_static_stream_map(&general,
 				   2 * sizeof(*ss) *
diff --git a/src/sna/gen6_render.h b/src/sna/gen6_render.h
index b0331ec..9cc8e14 100644
--- a/src/sna/gen6_render.h
+++ b/src/sna/gen6_render.h
@@ -1233,11 +1233,6 @@
 #define GEN6_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS   2
 #define GEN6_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS  3
 
-#define GEN6_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ          0
-#define GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ     1
-#define GEN6_DATAPORT_READ_MESSAGE_DWORD_BLOCK_READ          2
-#define GEN6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ      3
-
 #define GEN6_DATAPORT_READ_TARGET_DATA_CACHE      0
 #define GEN6_DATAPORT_READ_TARGET_RENDER_CACHE    1
 #define GEN6_DATAPORT_READ_TARGET_SAMPLER_CACHE   2
@@ -1248,28 +1243,6 @@
 #define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23         3
 #define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01       4
 
-#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE                0
-#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE           1
-#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_BLOCK_WRITE                2
-#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE            3
-#define GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE              4
-#define GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE     5
-#define GEN6_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE               7
-
-#define GEN6_MATH_FUNCTION_INV                              1
-#define GEN6_MATH_FUNCTION_LOG                              2
-#define GEN6_MATH_FUNCTION_EXP                              3
-#define GEN6_MATH_FUNCTION_SQRT                             4
-#define GEN6_MATH_FUNCTION_RSQ                              5
-#define GEN6_MATH_FUNCTION_SIN                              6 /* was 7 */
-#define GEN6_MATH_FUNCTION_COS                              7 /* was 8 */
-#define GEN6_MATH_FUNCTION_SINCOS                           8 /* was 6 */
-#define GEN6_MATH_FUNCTION_TAN                              9
-#define GEN6_MATH_FUNCTION_POW                              10
-#define GEN6_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER   11
-#define GEN6_MATH_FUNCTION_INT_DIV_QUOTIENT                 12
-#define GEN6_MATH_FUNCTION_INT_DIV_REMAINDER                13
-
 #define GEN6_MATH_INTEGER_UNSIGNED     0
 #define GEN6_MATH_INTEGER_SIGNED       1
 
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 64e2412..f4fabad 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -372,16 +372,16 @@ struct gen5_render_state {
 
 enum {
 	GEN6_WM_KERNEL_NOMASK = 0,
-	GEN6_WM_KERNEL_NOMASK_PROJECTIVE,
+	GEN6_WM_KERNEL_NOMASK_P,
 
 	GEN6_WM_KERNEL_MASK,
-	GEN6_WM_KERNEL_MASK_PROJECTIVE,
+	GEN6_WM_KERNEL_MASK_P,
 
 	GEN6_WM_KERNEL_MASKCA,
-	GEN6_WM_KERNEL_MASKCA_PROJECTIVE,
+	GEN6_WM_KERNEL_MASKCA_P,
 
 	GEN6_WM_KERNEL_MASKSA,
-	GEN6_WM_KERNEL_MASKSA_PROJECTIVE,
+	GEN6_WM_KERNEL_MASKSA_P,
 
 	GEN6_WM_KERNEL_VIDEO_PLANAR,
 	GEN6_WM_KERNEL_VIDEO_PACKED,
commit 8515ec90405912b3d776defcd6e81b1b5f699f1e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 09:51:16 2012 +0100

    sna/gen5: Compile basic kernels at runtime

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 3af9097..3d826c4 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -40,6 +40,7 @@
 #include "sna_render_inline.h"
 #include "sna_video.h"
 
+#include "brw/brw.h"
 #include "gen5_render.h"
 
 #define NO_COMPOSITE_SPANS 0
@@ -82,88 +83,6 @@
 #define PS_KERNEL_NUM_GRF   32
 #define PS_MAX_THREADS	    72
 
-static const uint32_t sf_kernel[][4] = {
-#include "exa_sf.g5b"
-};
-
-static const uint32_t sf_kernel_mask[][4] = {
-#include "exa_sf_mask.g5b"
-};
-
-static const uint32_t ps_kernel_nomask_affine[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_affine.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_nomask_projective[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_projective.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_maskca_affine[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_affine.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_mask_affine.g5b"
-#include "exa_wm_mask_sample_argb.g5b"
-#include "exa_wm_ca.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_maskca_projective[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_projective.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_mask_projective.g5b"
-#include "exa_wm_mask_sample_argb.g5b"
-#include "exa_wm_ca.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_affine.g5b"
-#include "exa_wm_src_sample_a.g5b"
-#include "exa_wm_mask_affine.g5b"
-#include "exa_wm_mask_sample_argb.g5b"
-#include "exa_wm_ca_srcalpha.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_projective.g5b"
-#include "exa_wm_src_sample_a.g5b"
-#include "exa_wm_mask_projective.g5b"
-#include "exa_wm_mask_sample_argb.g5b"
-#include "exa_wm_ca_srcalpha.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_masknoca_affine[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_affine.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_mask_affine.g5b"
-#include "exa_wm_mask_sample_a.g5b"
-#include "exa_wm_noca.g5b"
-#include "exa_wm_write.g5b"
-};
-
-static const uint32_t ps_kernel_masknoca_projective[][4] = {
-#include "exa_wm_xy.g5b"
-#include "exa_wm_src_projective.g5b"
-#include "exa_wm_src_sample_argb.g5b"
-#include "exa_wm_mask_projective.g5b"
-#include "exa_wm_mask_sample_a.g5b"
-#include "exa_wm_noca.g5b"
-#include "exa_wm_write.g5b"
-};
-
 static const uint32_t ps_kernel_packed_static[][4] = {
 #include "exa_wm_xy.g5b"
 #include "exa_wm_src_affine.g5b"
@@ -180,6 +99,8 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 #include "exa_wm_write.g5b"
 };
 
+#define NOKERNEL(kernel_enum, func, masked) \
+    [kernel_enum] = {func, 0, masked}
 #define KERNEL(kernel_enum, kernel, masked) \
     [kernel_enum] = {&kernel, sizeof(kernel), masked}
 static const struct wm_kernel_info {
@@ -187,19 +108,17 @@ static const struct wm_kernel_info {
 	unsigned int size;
 	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
+	NOKERNEL(WM_KERNEL, brw_wm_kernel__affine, false),
+	NOKERNEL(WM_KERNEL_P, brw_wm_kernel__projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
+	NOKERNEL(WM_KERNEL_MASK, brw_wm_kernel__affine_mask, true),
+	NOKERNEL(WM_KERNEL_MASK_P, brw_wm_kernel__projective_mask, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
+	NOKERNEL(WM_KERNEL_MASKCA, brw_wm_kernel__affine_mask_ca, true),
+	NOKERNEL(WM_KERNEL_MASKCA_P, brw_wm_kernel__projective_mask_ca, true),
 
-	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, true),
-	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, true),
+	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
+	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
 
 	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
 	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
@@ -263,7 +182,7 @@ gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 	if (has_mask) {
 		if (is_ca) {
 			if (gen5_blend_op[op].src_alpha)
-				base = WM_KERNEL_MASKCA_SRCALPHA;
+				base = WM_KERNEL_MASKSA;
 			else
 				base = WM_KERNEL_MASKCA;
 		} else
@@ -3709,9 +3628,7 @@ static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
 
 	sf_state->thread0.grf_reg_count = GEN5_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
 	sf_state->thread0.kernel_start_pointer = kernel >> 6;
-	sf_state->sf1.single_program_flow = 1;
-	/* scratch space is not used in our kernel */
-	sf_state->thread2.scratch_space_base_pointer = 0;
+
 	sf_state->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
 	sf_state->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
 	sf_state->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
@@ -3864,19 +3781,21 @@ static bool gen5_render_setup(struct sna *sna)
 	null_create(&general);
 
 	/* Set up the two SF states (one for blending with a mask, one without) */
-	sf[0] = sna_static_stream_add(&general,
-				      sf_kernel,
-				      sizeof(sf_kernel),
-				      64);
-	sf[1] = sna_static_stream_add(&general,
-				      sf_kernel_mask,
-				      sizeof(sf_kernel_mask),
-				      64);
+	sf[0] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__nomask);
+	sf[1] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__mask);
+
 	for (m = 0; m < KERNEL_COUNT; m++) {
-		wm[m] = sna_static_stream_add(&general,
-					      wm_kernels[m].data,
-					      wm_kernels[m].size,
-					      64);
+		if (wm_kernels[m].size) {
+			wm[m] = sna_static_stream_add(&general,
+						      wm_kernels[m].data,
+						      wm_kernels[m].size,
+						      64);
+		} else {
+			wm[m] = sna_static_stream_compile_wm(sna, &general,
+							     wm_kernels[m].data,
+							     16);
+		}
+		assert(wm[m]);
 	}
 
 	state->vs = gen5_create_vs_unit_state(&general);
diff --git a/src/sna/gen5_render.h b/src/sna/gen5_render.h
index 880a4c0..17708b5 100644
--- a/src/sna/gen5_render.h
+++ b/src/sna/gen5_render.h
@@ -2759,16 +2759,16 @@ typedef enum {
 
 typedef enum {
 	WM_KERNEL = 0,
-	WM_KERNEL_PROJECTIVE,
+	WM_KERNEL_P,
 
 	WM_KERNEL_MASK,
-	WM_KERNEL_MASK_PROJECTIVE,
+	WM_KERNEL_MASK_P,
 
 	WM_KERNEL_MASKCA,
-	WM_KERNEL_MASKCA_PROJECTIVE,
+	WM_KERNEL_MASKCA_P,
 
-	WM_KERNEL_MASKCA_SRCALPHA,
-	WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
+	WM_KERNEL_MASKSA,
+	WM_KERNEL_MASKSA_P,
 
 	WM_KERNEL_VIDEO_PLANAR,
 	WM_KERNEL_VIDEO_PACKED,
commit 00c08b1842c9493ca918a868202946b2e7150de0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 09:51:16 2012 +0100

    sna/gen4: Compile basic kernels at runtime

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 58d4422..64fd7df 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -40,6 +40,7 @@
 #include "sna_render_inline.h"
 #include "sna_video.h"
 
+#include "brw/brw.h"
 #include "gen4_render.h"
 
 /* gen4 has a serious issue with its shaders that we need to flush
@@ -109,88 +110,6 @@ static const struct gt_info {
 	24, 50, 384,
 };
 
-static const uint32_t sf_kernel[][4] = {
-#include "exa_sf.g4b"
-};
-
-static const uint32_t sf_kernel_mask[][4] = {
-#include "exa_sf_mask.g4b"
-};
-
-static const uint32_t ps_kernel_nomask_affine[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_affine.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_nomask_projective[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_projective.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_maskca_affine[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_affine.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_mask_affine.g4b"
-#include "exa_wm_mask_sample_argb.g4b"
-#include "exa_wm_ca.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_maskca_projective[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_projective.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_mask_projective.g4b"
-#include "exa_wm_mask_sample_argb.g4b"
-#include "exa_wm_ca.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_affine.g4b"
-#include "exa_wm_src_sample_a.g4b"
-#include "exa_wm_mask_affine.g4b"
-#include "exa_wm_mask_sample_argb.g4b"
-#include "exa_wm_ca_srcalpha.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_projective.g4b"
-#include "exa_wm_src_sample_a.g4b"
-#include "exa_wm_mask_projective.g4b"
-#include "exa_wm_mask_sample_argb.g4b"
-#include "exa_wm_ca_srcalpha.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_masknoca_affine[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_affine.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_mask_affine.g4b"
-#include "exa_wm_mask_sample_a.g4b"
-#include "exa_wm_noca.g4b"
-#include "exa_wm_write.g4b"
-};
-
-static const uint32_t ps_kernel_masknoca_projective[][4] = {
-#include "exa_wm_xy.g4b"
-#include "exa_wm_src_projective.g4b"
-#include "exa_wm_src_sample_argb.g4b"
-#include "exa_wm_mask_projective.g4b"
-#include "exa_wm_mask_sample_a.g4b"
-#include "exa_wm_noca.g4b"
-#include "exa_wm_write.g4b"
-};
-
 static const uint32_t ps_kernel_packed_static[][4] = {
 #include "exa_wm_xy.g4b"
 #include "exa_wm_src_affine.g4b"
@@ -207,6 +126,8 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 #include "exa_wm_write.g4b"
 };
 
+#define NOKERNEL(kernel_enum, func, masked) \
+    [kernel_enum] = {func, 0, masked}
 #define KERNEL(kernel_enum, kernel, masked) \
     [kernel_enum] = {&kernel, sizeof(kernel), masked}
 static const struct wm_kernel_info {
@@ -214,19 +135,17 @@ static const struct wm_kernel_info {
 	unsigned int size;
 	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
+	NOKERNEL(WM_KERNEL, brw_wm_kernel__affine, false),
+	NOKERNEL(WM_KERNEL_P, brw_wm_kernel__projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
+	NOKERNEL(WM_KERNEL_MASK, brw_wm_kernel__affine_mask, true),
+	NOKERNEL(WM_KERNEL_MASK_P, brw_wm_kernel__projective_mask, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
+	NOKERNEL(WM_KERNEL_MASKCA, brw_wm_kernel__affine_mask_ca, true),
+	NOKERNEL(WM_KERNEL_MASKCA_P, brw_wm_kernel__projective_mask_ca, true),
 
-	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, true),
-	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, true),
+	NOKERNEL(WM_KERNEL_MASKSA, brw_wm_kernel__affine_mask_sa, true),
+	NOKERNEL(WM_KERNEL_MASKSA_P, brw_wm_kernel__projective_mask_sa, true),
 
 	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
 	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
@@ -292,7 +211,7 @@ gen4_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 	if (has_mask) {
 		if (is_ca) {
 			if (gen4_blend_op[op].src_alpha)
-				base = WM_KERNEL_MASKCA_SRCALPHA;
+				base = WM_KERNEL_MASKSA;
 			else
 				base = WM_KERNEL_MASKCA;
 		} else
@@ -1339,10 +1258,9 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 			    kernel);
 	bp = gen4_get_blend(blend, op->has_component_alpha, op->dst.format);
 
-	key = op->mask.bo != NULL;
-	key |= sp << 1;
-	key |= bp << 16;
+	DBG(("%s: sp=%d, bp=%d\n", __FUNCTION__, sp, bp));
 
+	key = sp | bp << 16;
 	if (key == sna->render_state.gen4.last_pipelined_pointers)
 		return;
 
@@ -2017,6 +1935,10 @@ gen4_composite_picture(struct sna *sna,
 
 static void gen4_composite_channel_convert(struct sna_composite_channel *channel)
 {
+	DBG(("%s: repeat %d -> %d, filter %d -> %d\n",
+	     __FUNCTION__,
+	     channel->repeat, gen4_repeat(channel->repeat),
+	     channel->filter, gen4_repeat(channel->filter)));
 	channel->repeat = gen4_repeat(channel->repeat);
 	channel->filter = gen4_filter(channel->filter);
 	if (channel->card_format == (unsigned)-1)
@@ -3622,9 +3544,6 @@ static uint32_t gen4_create_sf_state(struct sna_static_stream *stream,
 
 	sf->thread0.grf_reg_count = GEN4_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
 	sf->thread0.kernel_start_pointer = kernel >> 6;
-	sf->sf1.single_program_flow = 1;
-	/* scratch space is not used in our kernel */
-	sf->thread2.scratch_space_base_pointer = 0;
 	sf->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
 	sf->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
 	sf->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
@@ -3667,25 +3586,22 @@ static void gen4_init_wm_state(struct gen4_wm_unit_state *wm,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
-	wm->thread0.grf_reg_count = GEN4_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
+	assert((kernel & 63) == 0);
 	wm->thread0.kernel_start_pointer = kernel >> 6;
+	wm->thread0.grf_reg_count = GEN4_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
 
 	wm->thread1.single_program_flow = 0;
 
-	/* scratch space is not used in our kernel */
-	wm->thread2.scratch_space_base_pointer = 0;
-	wm->thread2.per_thread_scratch_space = 0;
-
 	wm->thread3.const_urb_entry_read_length = 0;
 	wm->thread3.const_urb_entry_read_offset = 0;
 
 	wm->thread3.urb_entry_read_offset = 0;
-	/* wm kernel use urb from 3, see wm_program in compiler module */
-	wm->thread3.dispatch_grf_start_reg = 3;	/* must match kernel */
-
-	wm->wm4.sampler_count = 1;	/* 1-4 samplers */
+	wm->thread3.dispatch_grf_start_reg = 3;
 
+	assert((sampler & 31) == 0);
 	wm->wm4.sampler_state_pointer = sampler >> 5;
+	wm->wm4.sampler_count = 1;
+
 	wm->wm5.max_threads = info->max_wm_threads - 1;
 	wm->wm5.transposed_urb_read = 0;
 	wm->wm5.thread_dispatch_enable = 1;
@@ -3698,10 +3614,10 @@ static void gen4_init_wm_state(struct gen4_wm_unit_state *wm,
 
 	/* Each pair of attributes (src/mask coords) is two URB entries */
 	if (has_mask) {
-		wm->thread1.binding_table_entry_count = 3;	/* 2 tex and fb */
+		wm->thread1.binding_table_entry_count = 3;
 		wm->thread3.urb_entry_read_length = 4;
 	} else {
-		wm->thread1.binding_table_entry_count = 2;	/* 1 tex and fb */
+		wm->thread1.binding_table_entry_count = 2;
 		wm->thread3.urb_entry_read_length = 2;
 	}
 }
@@ -3778,31 +3694,25 @@ static bool gen4_render_setup(struct sna *sna)
 	 */
 	null_create(&general);
 
-	/* Set up the two SF states (one for blending with a mask, one without) */
-	sf[0] = sna_static_stream_add(&general,
-				      sf_kernel,
-				      sizeof(sf_kernel),
-				      64);
-	sf[1] = sna_static_stream_add(&general,
-				      sf_kernel_mask,
-				      sizeof(sf_kernel_mask),
-				      64);
+	sf[0] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__nomask);
+	sf[1] = sna_static_stream_compile_sf(sna, &general, brw_sf_kernel__mask);
 	for (m = 0; m < KERNEL_COUNT; m++) {
-		wm[m] = sna_static_stream_add(&general,
-					      wm_kernels[m].data,
-					      wm_kernels[m].size,
-					      64);
+		if (wm_kernels[m].size) {
+			wm[m] = sna_static_stream_add(&general,
+						      wm_kernels[m].data,
+						      wm_kernels[m].size,
+						      64);
+		} else {
+			wm[m] = sna_static_stream_compile_wm(sna, &general,
+							     wm_kernels[m].data,
+							     16);
+		}
 	}
 
 	state->vs = gen4_create_vs_unit_state(&general);
-
 	state->sf[0] = gen4_create_sf_state(&general, info, sf[0]);
 	state->sf[1] = gen4_create_sf_state(&general, info, sf[1]);
 
-
-	/* Set up the WM states: each filter/extend type for source and mask, per
-	 * kernel.
-	 */
 	wm_state = sna_static_stream_map(&general,
 					  sizeof(*wm_state) * KERNEL_COUNT *
 					  FILTER_COUNT * EXTEND_COUNT *
@@ -3823,8 +3733,7 @@ static bool gen4_render_setup(struct sna *sna)
 					for (m = 0; m < KERNEL_COUNT; m++) {
 						gen4_init_wm_state(&wm_state->state, info,
 								   wm_kernels[m].has_mask,
-								   wm[m],
-								   sampler_state);
+								   wm[m], sampler_state);
 						wm_state++;
 					}
 				}
diff --git a/src/sna/gen4_render.h b/src/sna/gen4_render.h
index a014e52..8e0cd74 100644
--- a/src/sna/gen4_render.h
+++ b/src/sna/gen4_render.h
@@ -2624,16 +2624,16 @@ typedef enum {
 
 typedef enum {
 	WM_KERNEL = 0,
-	WM_KERNEL_PROJECTIVE,
+	WM_KERNEL_P,
 
 	WM_KERNEL_MASK,
-	WM_KERNEL_MASK_PROJECTIVE,
+	WM_KERNEL_MASK_P,
 
 	WM_KERNEL_MASKCA,
-	WM_KERNEL_MASKCA_PROJECTIVE,
+	WM_KERNEL_MASKCA_P,
 
-	WM_KERNEL_MASKCA_SRCALPHA,
-	WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
+	WM_KERNEL_MASKSA,
+	WM_KERNEL_MASKSA_P,
 
 	WM_KERNEL_VIDEO_PLANAR,
 	WM_KERNEL_VIDEO_PACKED,
commit 7c9dbc980b760e0053d83ca2d7cb147613285680
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 09:50:39 2012 +0100

    sna: Assemble SF and WM kernels using brw
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/.gitignore b/.gitignore
index 579fe4b..f7799e5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -44,6 +44,7 @@ symlink-tree
 texinfo.tex
 ylwrap
 src/sna/git_version.h
+src/sna/brw/brw_test
 
 #	Do not edit the following section
 # 	Edit Compile Debug Document Distribute
diff --git a/src/sna/brw/Makefile.am b/src/sna/brw/Makefile.am
index edb3db4..b3513cf 100644
--- a/src/sna/brw/Makefile.am
+++ b/src/sna/brw/Makefile.am
@@ -20,6 +20,7 @@
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 noinst_LTLIBRARIES = libbrw.la
+noinst_PROGRAMS = brw_test
 
 AM_CFLAGS = \
 	@CWARNFLAGS@ \
@@ -35,8 +36,24 @@ AM_CFLAGS += @VALGRIND_CFLAGS@
 endif
 
 libbrw_la_SOURCES = \
+	brw.h \
 	brw_disasm.c \
 	brw_eu.h \
 	brw_eu.c \
 	brw_eu_emit.c \
+	brw_sf.c \
+	brw_wm.c \
+	$(NULL)
+
+brw_test_SOURCES = \
+	brw_test.c \
+	brw_test.h \
+	brw_test_gen4.c \
+	brw_test_gen5.c \
+	brw_test_gen6.c \
+	brw_test_gen7.c \
+	$(NULL)
+
+brw_test_LDADD = \
+	libbrw.la \
 	$(NULL)
diff --git a/src/sna/brw/brw.h b/src/sna/brw/brw.h
new file mode 100644
index 0000000..a39b253
--- /dev/null
+++ b/src/sna/brw/brw.h
@@ -0,0 +1,14 @@
+#include "brw_eu.h"
+
+void brw_sf_kernel__nomask(struct brw_compile *p);
+void brw_sf_kernel__mask(struct brw_compile *p);
+
+void brw_wm_kernel__affine(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch_width);
+
+void brw_wm_kernel__projective(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch_width);
+void brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch_width);
diff --git a/src/sna/brw/brw_sf.c b/src/sna/brw/brw_sf.c
new file mode 100644
index 0000000..0c69433
--- /dev/null
+++ b/src/sna/brw/brw_sf.c
@@ -0,0 +1,51 @@
+#include "brw.h"
+
+void brw_sf_kernel__nomask(struct brw_compile *p)
+{
+	struct brw_reg inv, v0, v1, v2, delta;
+
+	v0 = brw_vec4_grf(3, 0);
+	v1 = brw_vec4_grf(4, 0);
+	v2 = brw_vec4_grf(5, 0);
+	delta = brw_vec8_grf(7, 0);
+
+	inv = brw_vec4_grf(6, 0);
+	brw_math_invert(p, inv, brw_vec4_grf(1, 11));
+
+	brw_MOV(p, brw_message_reg(3), v0);
+
+	brw_ADD(p, delta, v1, brw_negate(v2));
+	brw_MUL(p, brw_message_reg(1), delta, brw_vec1_grf(6,0));
+
+	brw_ADD(p, delta, v2, brw_negate(v0));
+	brw_MUL(p, brw_message_reg(2), delta, brw_vec1_grf(6,2));
+
+	brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0 ,0),
+		      false, true, 4, 0, true, true, 0,
+		      BRW_URB_SWIZZLE_TRANSPOSE);
+}
+
+void
+brw_sf_kernel__mask(struct brw_compile *p)
+{
+	struct brw_reg inv, v0, v1, v2;
+
+	v0 = brw_vec8_grf(3, 0);
+	v1 = brw_vec8_grf(4, 0);
+	v2 = brw_vec8_grf(5, 0);
+
+	inv = brw_vec4_grf(6, 0);
+	brw_math_invert(p, inv, brw_vec4_grf(1, 11));
+
+	brw_MOV(p, brw_message_reg(3), v0);
+
+	brw_ADD(p, brw_vec8_grf(7, 0), v1, brw_negate(v2));
+	brw_MUL(p, brw_message_reg(1), brw_vec8_grf(7, 0), brw_vec1_grf(6,0));
+
+	brw_ADD(p, brw_vec8_grf(7, 0), v2, brw_negate(v0));
+	brw_MUL(p, brw_message_reg(2), brw_vec8_grf(7, 0), brw_vec1_grf(6,2));
+
+	brw_urb_WRITE(p, brw_null_reg(), 0, brw_vec8_grf(0 ,0),
+		      false, true, 4, 0, true, true, 0,
+		      BRW_URB_SWIZZLE_TRANSPOSE);
+}
diff --git a/src/sna/brw/brw_test.c b/src/sna/brw/brw_test.c
new file mode 100644
index 0000000..4f03858
--- /dev/null
+++ b/src/sna/brw/brw_test.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "brw_test.h"
+#include <string.h>
+
+void brw_test_compare(const char *function, int gen,
+		      const struct brw_instruction *new, int num_new,
+		      const struct brw_instruction *old, int num_old)
+{
+	int n;
+
+	if (num_new != num_old ||
+	    memcmp(new, old, num_new * sizeof(struct brw_instruction))) {
+		printf ("%s: new\n", function);
+		for (n = 0; n < num_new; n++)
+			brw_disasm(stdout, &new[n], gen);
+
+		printf ("%s: old\n", function);
+		for (n = 0; n < num_old; n++)
+			brw_disasm(stdout, &old[n], gen);
+		printf ("\n");
+	}
+}
+
+
+/* Check that we can recreate all the existing programs using the assembler */
+int main(int argc, char **argv)
+{
+	brw_test_gen4();
+	brw_test_gen5();
+	brw_test_gen6();
+	brw_test_gen7();
+
+	return 0;
+}
diff --git a/src/sna/brw/brw_test.h b/src/sna/brw/brw_test.h
new file mode 100644
index 0000000..41f4ca6
--- /dev/null
+++ b/src/sna/brw/brw_test.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#ifndef BRW_TEST_H
+#define BRW_TEST_H
+
+#include "brw.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+void brw_test_compare(const char *function, int gen,
+		      const struct brw_instruction *new, int num_new,
+		      const struct brw_instruction *old, int num_old);
+
+void brw_test_gen4(void);
+void brw_test_gen5(void);
+void brw_test_gen6(void);
+void brw_test_gen7(void);
+
+#endif /* BRW_TEST_H */
diff --git a/src/sna/brw/brw_test_gen4.c b/src/sna/brw/brw_test_gen4.c
new file mode 100644
index 0000000..742c7c2
--- /dev/null
+++ b/src/sna/brw/brw_test_gen4.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "brw_test.h"
+
+#include <string.h>
+
+static const uint32_t sf_kernel[][4] = {
+#include "exa_sf.g4b"
+};
+
+static const uint32_t sf_kernel_mask[][4] = {
+#include "exa_sf_mask.g4b"
+};
+
+static const uint32_t ps_kernel_nomask_affine[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_nomask_projective[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_projective.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_maskca_affine[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_mask_affine.g4b"
+#include "exa_wm_mask_sample_argb.g4b"
+#include "exa_wm_ca.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_maskca_projective[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_projective.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_mask_projective.g4b"
+#include "exa_wm_mask_sample_argb.g4b"
+#include "exa_wm_ca.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_a.g4b"
+#include "exa_wm_mask_affine.g4b"
+#include "exa_wm_mask_sample_argb.g4b"
+#include "exa_wm_ca_srcalpha.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_projective.g4b"
+#include "exa_wm_src_sample_a.g4b"
+#include "exa_wm_mask_projective.g4b"
+#include "exa_wm_mask_sample_argb.g4b"
+#include "exa_wm_ca_srcalpha.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_masknoca_affine[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_mask_affine.g4b"
+#include "exa_wm_mask_sample_a.g4b"
+#include "exa_wm_noca.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_masknoca_projective[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_projective.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_mask_projective.g4b"
+#include "exa_wm_mask_sample_a.g4b"
+#include "exa_wm_noca.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_packed_static[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_argb.g4b"
+#include "exa_wm_yuv_rgb.g4b"
+#include "exa_wm_write.g4b"
+};
+
+static const uint32_t ps_kernel_planar_static[][4] = {
+#include "exa_wm_xy.g4b"
+#include "exa_wm_src_affine.g4b"
+#include "exa_wm_src_sample_planar.g4b"
+#include "exa_wm_yuv_rgb.g4b"
+#include "exa_wm_write.g4b"
+};
+
+#define compare(old) brw_test_compare(__FUNCTION__, p.gen, p.store, p.nr_insn, (struct brw_instruction *)old, ARRAY_SIZE(old)-8)
+
+static void gen4_sf__nomask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 40, store);
+	brw_sf_kernel__nomask(&p);
+
+	compare(sf_kernel);
+}
+
+static void gen4_sf__mask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 40, store);
+	brw_sf_kernel__mask(&p);
+
+	compare(sf_kernel_mask);
+}
+
+static void
+gen4_wm_kernel__affine_nomask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 40, store);
+	brw_wm_kernel__affine(&p, 16);
+
+	compare(ps_kernel_nomask_affine);
+}
+
+static void
+gen4_wm_kernel__affine_mask_noca(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 40, store);
+	brw_wm_kernel__affine_mask(&p, 16);
+
+	compare(ps_kernel_masknoca_affine);
+}
+
+static void
+gen4_wm_kernel__projective_nomask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 40, store);
+	brw_wm_kernel__projective(&p, 16);
+
+	compare(ps_kernel_nomask_projective);
+}
+
+void brw_test_gen4(void)
+{
+	gen4_sf__nomask();
+	gen4_sf__mask();
+
+	gen4_wm_kernel__affine_nomask();
+	gen4_wm_kernel__affine_mask_noca();
+
+	gen4_wm_kernel__projective_nomask();
+}
diff --git a/src/sna/brw/brw_test_gen5.c b/src/sna/brw/brw_test_gen5.c
new file mode 100644
index 0000000..62a999e
--- /dev/null
+++ b/src/sna/brw/brw_test_gen5.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "brw_test.h"
+
+#include <string.h>
+
+static const uint32_t sf_kernel[][4] = {
+#include "exa_sf.g5b"
+};
+
+static const uint32_t sf_kernel_mask[][4] = {
+#include "exa_sf_mask.g5b"
+};
+
+static const uint32_t ps_kernel_nomask_affine[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_nomask_projective[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_projective.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_maskca_affine[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_mask_affine.g5b"
+#include "exa_wm_mask_sample_argb.g5b"
+#include "exa_wm_ca.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_maskca_projective[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_projective.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_mask_projective.g5b"
+#include "exa_wm_mask_sample_argb.g5b"
+#include "exa_wm_ca.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_a.g5b"
+#include "exa_wm_mask_affine.g5b"
+#include "exa_wm_mask_sample_argb.g5b"
+#include "exa_wm_ca_srcalpha.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_projective.g5b"
+#include "exa_wm_src_sample_a.g5b"
+#include "exa_wm_mask_projective.g5b"
+#include "exa_wm_mask_sample_argb.g5b"
+#include "exa_wm_ca_srcalpha.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_masknoca_affine[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_mask_affine.g5b"
+#include "exa_wm_mask_sample_a.g5b"
+#include "exa_wm_noca.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_masknoca_projective[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_projective.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_mask_projective.g5b"
+#include "exa_wm_mask_sample_a.g5b"
+#include "exa_wm_noca.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_packed_static[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_argb.g5b"
+#include "exa_wm_yuv_rgb.g5b"
+#include "exa_wm_write.g5b"
+};
+
+static const uint32_t ps_kernel_planar_static[][4] = {
+#include "exa_wm_xy.g5b"
+#include "exa_wm_src_affine.g5b"
+#include "exa_wm_src_sample_planar.g5b"
+#include "exa_wm_yuv_rgb.g5b"
+#include "exa_wm_write.g5b"
+};
+
+#define compare(old) brw_test_compare(__FUNCTION__, p.gen, p.store, p.nr_insn, (struct brw_instruction *)old, ARRAY_SIZE(old))
+
+static void gen5_sf(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_sf_kernel__nomask(&p);
+
+	compare(sf_kernel);
+}
+
+static void gen5_sf_mask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_sf_kernel__mask(&p);
+
+	compare(sf_kernel_mask);
+}
+
+static void gen5_wm_affine_nomask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_wm_kernel__affine(&p, 16);
+
+	compare(ps_kernel_nomask_affine);
+}
+
+static void gen5_wm_affine_mask_noca(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_wm_kernel__affine_mask(&p, 16);
+
+	compare(ps_kernel_masknoca_affine);
+}
+
+static void gen5_wm_affine_mask_ca(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_wm_kernel__affine_mask_ca(&p, 16);
+
+	compare(ps_kernel_maskca_affine);
+}
+
+static void gen5_wm_projective_nomask(void)
+{
+	uint32_t store[128];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 50, store);
+	brw_wm_kernel__projective(&p, 16);
+
+	compare(ps_kernel_nomask_projective);
+}
+
+void brw_test_gen5(void)
+{
+	gen5_sf();
+	gen5_sf_mask();
+
+	gen5_wm_affine_nomask();
+	gen5_wm_affine_mask_noca();
+	gen5_wm_affine_mask_ca();
+
+	gen5_wm_projective_nomask();
+}
diff --git a/src/sna/brw/brw_test_gen6.c b/src/sna/brw/brw_test_gen6.c
new file mode 100644
index 0000000..64bc2fb
--- /dev/null
+++ b/src/sna/brw/brw_test_gen6.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "brw_test.h"
+
+#include <string.h>
+
+static const uint32_t ps_kernel_nomask_affine[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_nomask_projective[][4] = {
+#include "exa_wm_src_projective.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_maskca_affine[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_mask_affine.g6b"
+#include "exa_wm_mask_sample_argb.g6b"
+#include "exa_wm_ca.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_maskca_projective[][4] = {
+#include "exa_wm_src_projective.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_mask_projective.g6b"
+#include "exa_wm_mask_sample_argb.g6b"
+#include "exa_wm_ca.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_a.g6b"
+#include "exa_wm_mask_affine.g6b"
+#include "exa_wm_mask_sample_argb.g6b"
+#include "exa_wm_ca_srcalpha.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
+#include "exa_wm_src_projective.g6b"
+#include "exa_wm_src_sample_a.g6b"
+#include "exa_wm_mask_projective.g6b"
+#include "exa_wm_mask_sample_argb.g6b"
+#include "exa_wm_ca_srcalpha.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_masknoca_affine[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_mask_affine.g6b"
+#include "exa_wm_mask_sample_a.g6b"
+#include "exa_wm_noca.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_masknoca_projective[][4] = {
+#include "exa_wm_src_projective.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_mask_projective.g6b"
+#include "exa_wm_mask_sample_a.g6b"
+#include "exa_wm_noca.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_packed[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_yuv_rgb.g6b"
+#include "exa_wm_write.g6b"
+};
+
+static const uint32_t ps_kernel_planar[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_planar.g6b"
+#include "exa_wm_yuv_rgb.g6b"
+#include "exa_wm_write.g6b"
+};
+
+#define compare(old) brw_test_compare(__FUNCTION__, p.gen, p.store, p.nr_insn, (struct brw_instruction *)old, ARRAY_SIZE(old))
+
+#if 0
+static void wm_src_affine(struct brw_compile *p)
+{
+	brw_PLN(p, brw_message_reg(2), brw_vec1_grf(6,0), brw_vec8_grf(2,0));
+	brw_PLN(p, brw_message_reg(3), brw_vec1_grf(6,0), brw_vec8_grf(4,0));
+	brw_PLN(p, brw_message_reg(4), brw_vec1_grf(6,4), brw_vec8_grf(2,0));
+	brw_PLN(p, brw_message_reg(5), brw_vec1_grf(6,4), brw_vec8_grf(4,0));
+}
+
+static void wm_src_sample_argb(struct brw_compile *p)
+{
+	static const uint32_t fragment[][4] = {
+#include "exa_wm_src_affine.g6b"
+#include "exa_wm_src_sample_argb.g6b"
+#include "exa_wm_write.g6b"
+	};
+	int n;
+
+	brw_push_insn_state(p);
+	brw_set_mask_control(p, BRW_MASK_DISABLE);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_MOV(p,
+		retype(brw_vec1_grf(0,2), BRW_REGISTER_TYPE_UD),
+		brw_imm_ud(0));
+	brw_pop_insn_state(p);
+
+	brw_SAMPLE(p,
+		   retype(vec16(brw_vec8_grf(14, 0)), BRW_REGISTER_TYPE_UW),
+		   1,
+		   retype(brw_vec8_grf(0, 0), BRW_REGISTER_TYPE_UD),
+		   1, 0,
+		   WRITEMASK_XYZW,
+		   GEN5_SAMPLER_MESSAGE_SAMPLE,
+		   8,
+		   5,
+		   true,
+		   BRW_SAMPLER_SIMD_MODE_SIMD16);
+
+
+	for (n = 0; n < p->nr_insn; n++) {
+		brw_disasm(stdout, &p->store[n], 60);
+	}
+
+	printf("\n\n");
+	for (n = 0; n < ARRAY_SIZE(fragment); n++) {
+		brw_disasm(stdout,
+			   (const struct brw_instruction *)&fragment[n][0],
+			   60);
+	}
+}
+
+static void wm_write(struct brw_compile *p)
+{
+}
+#endif
+
+static void gen6_ps_nomask_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 60, store);
+	brw_wm_kernel__affine(&p, 16);
+
+	compare(ps_kernel_nomask_affine);
+}
+
+static void gen6_ps_mask_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 60, store);
+	brw_wm_kernel__affine_mask(&p, 16);
+
+	compare(ps_kernel_masknoca_affine);
+}
+
+static void gen6_ps_nomask_projective(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, 60, store);
+	brw_wm_kernel__projective(&p, 16);
+
+	compare(ps_kernel_nomask_projective);
+}
+
+void brw_test_gen6(void)
+{
+	gen6_ps_nomask_affine();
+	gen6_ps_mask_affine();
+
+	gen6_ps_nomask_projective();
+}
diff --git a/src/sna/brw/brw_test_gen7.c b/src/sna/brw/brw_test_gen7.c
new file mode 100644
index 0000000..c3f0e23
--- /dev/null
+++ b/src/sna/brw/brw_test_gen7.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "brw_test.h"
+
+#include <string.h>
+
+static const uint32_t ps_kernel_nomask_affine[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_nomask_projective[][4] = {
+#include "exa_wm_src_projective.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_maskca_affine[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_mask_affine.g7b"
+#include "exa_wm_mask_sample_argb.g7b"
+#include "exa_wm_ca.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_maskca_projective[][4] = {
+#include "exa_wm_src_projective.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_mask_projective.g7b"
+#include "exa_wm_mask_sample_argb.g7b"
+#include "exa_wm_ca.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_affine[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_a.g7b"
+#include "exa_wm_mask_affine.g7b"
+#include "exa_wm_mask_sample_argb.g7b"
+#include "exa_wm_ca_srcalpha.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_maskca_srcalpha_projective[][4] = {
+#include "exa_wm_src_projective.g7b"
+#include "exa_wm_src_sample_a.g7b"
+#include "exa_wm_mask_projective.g7b"
+#include "exa_wm_mask_sample_argb.g7b"
+#include "exa_wm_ca_srcalpha.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_masknoca_affine[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_mask_affine.g7b"
+#include "exa_wm_mask_sample_a.g7b"
+#include "exa_wm_noca.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_masknoca_projective[][4] = {
+#include "exa_wm_src_projective.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_mask_projective.g7b"
+#include "exa_wm_mask_sample_a.g7b"
+#include "exa_wm_noca.g6b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_packed[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_argb.g7b"
+#include "exa_wm_yuv_rgb.g7b"
+#include "exa_wm_write.g7b"
+};
+
+static const uint32_t ps_kernel_planar[][4] = {
+#include "exa_wm_src_affine.g7b"
+#include "exa_wm_src_sample_planar.g7b"
+#include "exa_wm_yuv_rgb.g7b"
+#include "exa_wm_write.g7b"
+};
+
+#define compare(old) brw_test_compare(__FUNCTION__, p.gen, p.store, p.nr_insn, (struct brw_instruction *)old, ARRAY_SIZE(old))
+#define GEN 70
+
+static void gen7_ps_nomask_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__affine(&p, 8);
+
+	compare(ps_kernel_nomask_affine);
+}
+
+static void gen7_ps_mask_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__affine_mask(&p, 8);
+
+	compare(ps_kernel_masknoca_affine);
+}
+
+static void gen7_ps_maskca_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__affine_mask_ca(&p, 8);
+
+	compare(ps_kernel_maskca_affine);
+}
+
+static void gen7_ps_masksa_affine(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__affine_mask_sa(&p, 8);
+
+	compare(ps_kernel_maskca_srcalpha_affine);
+}
+
+static void gen7_ps_nomask_projective(void)
+{
+	uint32_t store[1024];
+	struct brw_compile p;
+
+	brw_compile_init(&p, GEN, store);
+	brw_wm_kernel__projective(&p, 8);
+
+	compare(ps_kernel_nomask_projective);
+}
+
+void brw_test_gen7(void)
+{
+	gen7_ps_nomask_affine();
+	gen7_ps_mask_affine();
+	gen7_ps_maskca_affine();
+	gen7_ps_masksa_affine();
+
+	gen7_ps_nomask_projective();
+}
diff --git a/src/sna/brw/brw_wm.c b/src/sna/brw/brw_wm.c
new file mode 100644
index 0000000..9a8af5f
--- /dev/null
+++ b/src/sna/brw/brw_wm.c
@@ -0,0 +1,542 @@
+#include "brw.h"
+
+#define X16 8
+#define Y16 10
+
+static void brw_wm_xy(struct brw_compile *p, int dw)
+{
+	struct brw_reg r1 = brw_vec1_grf(1, 0);
+	struct brw_reg r1_uw = __retype_uw(r1);
+	struct brw_reg x_uw, y_uw;
+
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+	if (dw == 16) {
+		x_uw = brw_uw16_grf(30, 0);
+		y_uw = brw_uw16_grf(28, 0);
+	} else {
+		x_uw = brw_uw8_grf(30, 0);
+		y_uw = brw_uw8_grf(28, 0);
+	}
+
+	brw_ADD(p,
+		x_uw,
+		__stride(__suboffset(r1_uw, 4), 2, 4, 0),
+		brw_imm_v(0x10101010));
+	brw_ADD(p,
+		y_uw,
+		__stride(__suboffset(r1_uw, 5), 2, 4, 0),
+		brw_imm_v(0x11001100));
+
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
+	brw_ADD(p, brw_vec8_grf(X16, 0), vec8(x_uw), brw_negate(r1));
+	brw_ADD(p, brw_vec8_grf(Y16, 0), vec8(y_uw), brw_negate(__suboffset(r1, 1)));
+}
+
+static void brw_wm_affine_st(struct brw_compile *p, int dw, int channel, int msg)
+{
+	int uv;
+
+	if (dw == 16) {
+		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+		uv = p->gen >= 60 ? 6 : 3;
+		uv += 2*channel;
+	} else {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		uv = p->gen >= 60 ? 4 : 3;
+		uv += channel;
+	}
+
+	msg++;
+	if (p->gen >= 60) {
+		brw_PLN(p,
+			brw_message_reg(msg),
+			brw_vec1_grf(uv, 0),
+			brw_vec8_grf(2, 0));
+		msg += dw/8;
+
+		brw_PLN(p,
+			brw_message_reg(msg),
+			brw_vec1_grf(uv, 4),
+			brw_vec8_grf(2, 0));
+	} else {
+		struct brw_reg r = brw_vec1_grf(uv, 0);
+
+		brw_LINE(p, brw_null_reg(), __suboffset(r, 0), brw_vec8_grf(X16, 0));
+		brw_MAC(p, brw_message_reg(msg), __suboffset(r, 1), brw_vec8_grf(Y16, 0));
+		msg += dw/8;
+
+		brw_LINE(p, brw_null_reg(), __suboffset(r, 4), brw_vec8_grf(X16, 0));
+		brw_MAC(p, brw_message_reg(msg), __suboffset(r, 5), brw_vec8_grf(Y16, 0));
+	}
+}
+
+static inline unsigned simd(int dw)
+{
+	return dw == 16 ? BRW_SAMPLER_SIMD_MODE_SIMD16 : BRW_SAMPLER_SIMD_MODE_SIMD8;
+}
+
+static inline struct brw_reg sample_result(int dw, int result)
+{
+	return brw_reg(BRW_GENERAL_REGISTER_FILE, result, 0,
+		       BRW_REGISTER_TYPE_UW,
+		       dw == 16 ? BRW_VERTICAL_STRIDE_16 : BRW_VERTICAL_STRIDE_8,
+		       dw == 16 ? BRW_WIDTH_16 : BRW_WIDTH_8,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+static void brw_wm_sample(struct brw_compile *p, int dw,
+			  int channel, int msg, int result)
+{
+	struct brw_reg src0;
+	bool header;
+	int len;
+
+	len = dw == 16 ? 4 : 2;
+	if (p->gen >= 60) {
+		header = false;
+		src0 = brw_message_reg(++msg);
+	} else {
+		header = true;
+		src0 = brw_vec8_grf(0, 0);
+	}
+
+	brw_SAMPLE(p, sample_result(dw, result), msg, src0,
+		   channel+1, channel, WRITEMASK_XYZW, 0,
+		   2*len, len+header, header, simd(dw));
+}
+
+static void brw_wm_sample__alpha(struct brw_compile *p, int dw,
+				 int channel, int msg, int result)
+{
+	struct brw_reg src0;
+	int len;
+
+	len = dw == 16 ? 4 : 2;
+	if (p->gen >= 60)
+		src0 = brw_message_reg(msg);
+	else
+		src0 = brw_vec8_grf(0, 0);
+
+	brw_SAMPLE(p, sample_result(dw, result), msg, src0,
+		   channel+1, channel, WRITEMASK_W, 0,
+		   len/2, len+1, true, simd(dw));
+}
+
+static void brw_wm_affine(struct brw_compile *p, int dw,
+			  int channel, int msg, int result)
+{
+	brw_wm_affine_st(p, dw, channel, msg);
+	brw_wm_sample(p, dw, channel, msg, result);
+}
+
+static void brw_wm_affine__alpha(struct brw_compile *p, int dw,
+				 int channel, int msg, int result)
+{
+	brw_wm_affine_st(p, dw, channel, msg);
+	brw_wm_sample__alpha(p, dw, channel, msg, result);
+}
+
+static inline struct brw_reg null_result(int dw)
+{
+	return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+		       BRW_ARF_NULL, 0,
+		       BRW_REGISTER_TYPE_UW,
+		       dw == 16 ? BRW_VERTICAL_STRIDE_16 : BRW_VERTICAL_STRIDE_8,
+		       dw == 16 ? BRW_WIDTH_16 : BRW_WIDTH_8,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+static void brw_fb_write(struct brw_compile *p, int dw)
+{
+	struct brw_instruction *insn;
+	unsigned msg_control, msg_type, msg_len;
+	struct brw_reg src0;
+	bool header;
+
+	if (dw == 16) {
+		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+		msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE;
+		msg_len = 8;
+	} else {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		msg_control = BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01;
+		msg_len = 4;
+	}
+
+	if (p->gen < 60) {
+		brw_push_insn_state(p);
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		brw_set_mask_control(p, BRW_MASK_DISABLE);
+		brw_MOV(p, brw_message_reg(1), brw_vec8_grf(1, 0));
+		brw_pop_insn_state(p);
+
+		msg_len += 2;
+	}
+
+	/* The execution mask is ignored for render target writes. */
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.predicate_control = 0;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+
+	if (p->gen >= 60) {
+		src0 = brw_message_reg(2);
+		msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
+		header = false;
+	} else {
+		insn->header.destreg__conditionalmod = 0;
+		msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
+		src0 = __retype_uw(brw_vec8_grf(0, 0));
+		header = true;
+	}
+
+	brw_set_dest(p, insn, null_result(dw));
+	brw_set_src0(p, insn, src0);
+	brw_set_dp_write_message(p, insn, 0,
+				 msg_control, msg_type, msg_len,
+				 header, true, 0, true, false);
+}
+
+static void brw_wm_write(struct brw_compile *p, int dw, int src)
+{
+	int n;
+
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
+	if (dw == 8 && p->gen >= 60) {
+		brw_MOV(p, brw_message_reg(2), brw_vec8_grf(src, 0));
+		brw_MOV(p, brw_message_reg(4), brw_vec8_grf(src+2, 0));
+		goto done;
+	}
+
+	for (n = 0; n < 4; n++) {
+		if (p->gen >= 60) {
+			brw_MOV(p,
+				brw_message_reg(2 + 2*n),
+				brw_vec8_grf(src + 2*n, 0));
+		} else if (p->gen >= 45 && dw == 16) {
+			brw_MOV(p,
+				brw_message_reg(2 + n + BRW_MRF_COMPR4),
+				brw_vec8_grf(src + 2*n, 0));
+		} else {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_MOV(p,
+				brw_message_reg(2 + n),
+				brw_vec8_grf(src + 2*n, 0));
+
+			if (dw == 16) {
+				brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+				brw_MOV(p,
+					brw_message_reg(2 + n + 4),
+					brw_vec8_grf(src + 2*n+1, 0));
+			}
+		}
+	}
+
+done:
+	brw_fb_write(p, dw);
+}
+
+static inline struct brw_reg mask_a8(int nr)
+{
+	return brw_reg(BRW_GENERAL_REGISTER_FILE,
+		       nr, 0,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_0,
+		       BRW_WIDTH_8,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+static void brw_wm_write__mask(struct brw_compile *p,
+			       int dw,
+			       int src, int mask)
+{
+	int n;
+
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
+	if (dw == 8 && p->gen >= 60) {
+		brw_MUL(p,
+			brw_message_reg(2),
+			brw_vec8_grf(src, 0),
+			mask_a8(mask));
+		brw_MUL(p,
+			brw_message_reg(4),
+			brw_vec8_grf(src+2, 0),
+			mask_a8(mask));
+		goto done;
+	}
+
+	for (n = 0; n < 4; n++) {
+		if (p->gen >= 60) {
+			brw_MUL(p,
+				brw_message_reg(2 + 2*n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask, 0));
+		} else if (p->gen >= 45 && dw == 16) {
+			brw_MUL(p,
+				brw_message_reg(2 + n + BRW_MRF_COMPR4),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask, 0));
+		} else {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_MUL(p,
+				brw_message_reg(2 + n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask, 0));
+
+			if (dw == 16) {
+				brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+				brw_MUL(p,
+					brw_message_reg(2 + n + 4),
+					brw_vec8_grf(src + 2*n+1, 0),
+					brw_vec8_grf(mask+1, 0));
+			}
+		}
+	}
+
+done:
+	brw_fb_write(p, dw);
+}
+
+static void brw_wm_write__mask_ca(struct brw_compile *p,
+				  int dw, int src, int mask)
+{
+	int n;
+
+	brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+
+	if (dw == 8 && p->gen >= 60) {
+		brw_MUL(p,
+			brw_message_reg(2),
+			brw_vec8_grf(src, 0),
+			brw_vec8_grf(mask, 0));
+		brw_MUL(p,
+			brw_message_reg(4),
+			brw_vec8_grf(src + 2, 0),
+			brw_vec8_grf(mask + 2, 0));
+		goto done;
+	}
+
+	for (n = 0; n < 4; n++) {
+		if (p->gen >= 60) {
+			brw_MUL(p,
+				brw_message_reg(2 + 2*n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask + 2*n, 0));
+		} else if (p->gen >= 45 && dw == 16) {
+			brw_MUL(p,
+				brw_message_reg(2 + n + BRW_MRF_COMPR4),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask + 2*n, 0));
+		} else {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_MUL(p,
+				brw_message_reg(2 + n),
+				brw_vec8_grf(src + 2*n, 0),
+				brw_vec8_grf(mask + 2*n, 0));
+
+			if (dw == 16) {
+				brw_set_compression_control(p, BRW_COMPRESSION_2NDHALF);
+				brw_MUL(p,
+					brw_message_reg(2 + n + 4),
+					brw_vec8_grf(src + 2*n + 1, 0),
+					brw_vec8_grf(mask + 2*n + 1, 0));
+			}
+		}
+	}
+
+done:
+	brw_fb_write(p, dw);
+}
+
+void
+brw_wm_kernel__affine(struct brw_compile *p, int dispatch)
+{
+	int src = 12;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_affine(p, dispatch, 0, 1, src);
+	brw_wm_write(p, dispatch, src);
+}
+
+void
+brw_wm_kernel__affine_mask(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 20;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_affine(p, dispatch, 0, 1, src);
+	brw_wm_affine__alpha(p, dispatch, 1, 7, mask);
+	brw_wm_write__mask(p, dispatch, src, mask);
+}
+
+void
+brw_wm_kernel__affine_mask_ca(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 20;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_affine(p, dispatch, 0, 1, src);
+	brw_wm_affine(p, dispatch, 1, 7, mask);
+	brw_wm_write__mask_ca(p, dispatch, src, mask);
+}
+
+void
+brw_wm_kernel__affine_mask_sa(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 14;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_affine__alpha(p, dispatch, 0, 1, src);
+	brw_wm_affine(p, dispatch, 1, 7, mask);
+	brw_wm_write__mask(p, dispatch, mask, src);
+}
+
+/* Projective variants */
+
+static void brw_wm_projective_st(struct brw_compile *p, int dw, int channel, int msg)
+{
+	int uv;
+
+	if (dw == 16) {
+		brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+		uv = p->gen >= 60 ? 6 : 3;
+		uv += 2*channel;
+	} else {
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		uv = p->gen >= 60 ? 4 : 3;
+		uv += channel;
+	}
+
+	msg++;
+	if (p->gen >= 60) {
+		/* First compute 1/z */
+		brw_PLN(p,
+			brw_message_reg(msg),
+			brw_vec1_grf(uv+1, 0),
+			brw_vec8_grf(2, 0));
+
+		if (dw == 16) {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
+			brw_math_invert(p, brw_vec8_grf(31, 0), brw_vec8_grf(31, 0));
+			brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+		} else
+			brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
+		brw_PLN(p,
+			brw_vec8_grf(28, 0),
+			brw_vec1_grf(uv, 0),
+			brw_vec8_grf(2, 0));
+		brw_MUL(p,
+			brw_message_reg(msg),
+			brw_vec8_grf(28, 0),
+			brw_vec8_grf(30, 0));
+		msg += dw/8;
+
+		brw_PLN(p,
+			brw_vec8_grf(28, 0),
+			brw_vec1_grf(uv, 0),
+			brw_vec8_grf(4, 0));
+		brw_MUL(p,
+			brw_message_reg(msg),
+			brw_vec8_grf(28, 0),
+			brw_vec8_grf(30, 0));
+	} else {
+		struct brw_reg r = brw_vec1_grf(uv, 0);
+
+		/* First compute 1/z */
+		brw_LINE(p, brw_null_reg(), brw_vec1_grf(uv+1, 0), brw_vec8_grf(X16, 0));
+		brw_MAC(p, brw_vec8_grf(30, 0), brw_vec1_grf(uv+1, 1), brw_vec8_grf(Y16, 0));
+
+		if (dw == 16) {
+			brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+			brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
+			brw_math_invert(p, brw_vec8_grf(31, 0), brw_vec8_grf(31, 0));
+			brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
+		} else
+			brw_math_invert(p, brw_vec8_grf(30, 0), brw_vec8_grf(30, 0));
+
+		/* Now compute the output s,t values */
+		brw_LINE(p, brw_null_reg(), __suboffset(r, 0), brw_vec8_grf(X16, 0));
+		brw_MAC(p, brw_vec8_grf(28, 0), __suboffset(r, 1), brw_vec8_grf(Y16, 0));
+		brw_MUL(p, brw_message_reg(msg), brw_vec8_grf(28, 0), brw_vec8_grf(30, 0));
+		msg += dw/8;
+
+		brw_LINE(p, brw_null_reg(), __suboffset(r, 4), brw_vec8_grf(X16, 0));
+		brw_MAC(p, brw_vec8_grf(28, 0), __suboffset(r, 5), brw_vec8_grf(Y16, 0));
+		brw_MUL(p, brw_message_reg(msg), brw_vec8_grf(28, 0), brw_vec8_grf(30, 0));
+	}
+}
+
+static void brw_wm_projective(struct brw_compile *p, int dw,
+			      int channel, int msg, int result)
+{
+	brw_wm_projective_st(p, dw, channel, msg);
+	brw_wm_sample(p, dw, channel, msg, result);
+}
+
+static void brw_wm_projective__alpha(struct brw_compile *p, int dw,
+				     int channel, int msg, int result)
+{
+	brw_wm_projective_st(p, dw, channel, msg);
+	brw_wm_sample__alpha(p, dw, channel, msg, result);
+}
+
+void
+brw_wm_kernel__projective(struct brw_compile *p, int dispatch)
+{
+	int src = 12;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_projective(p, dispatch, 0, 1, src);
+	brw_wm_write(p, dispatch, src);
+}
+
+void
+brw_wm_kernel__projective_mask(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 20;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_projective(p, dispatch, 0, 1, src);
+	brw_wm_projective__alpha(p, dispatch, 1, 7, mask);
+	brw_wm_write__mask(p, dispatch, src, mask);
+}
+
+void
+brw_wm_kernel__projective_mask_ca(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 20;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_projective(p, dispatch, 0, 1, src);
+	brw_wm_projective(p, dispatch, 1,7, mask);
+	brw_wm_write__mask_ca(p, dispatch, src, mask);
+}
+
+void
+brw_wm_kernel__projective_mask_sa(struct brw_compile *p, int dispatch)
+{
+	int src = 12, mask = 14;
+
+	if (p->gen < 60)
+		brw_wm_xy(p, dispatch);
+	brw_wm_projective__alpha(p, dispatch, 0, 1, src);
+	brw_wm_projective(p, dispatch, 1, 7, mask);
+	brw_wm_write__mask(p, dispatch, mask, src);
+}
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c292da1..5b64efa 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -179,8 +179,8 @@ static const struct wm_kernel_info {
 	KERNEL(MASKCA, ps_kernel_maskca_affine, 3, 2),
 	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3, 2),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, 3, 2),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3, 2),
+	KERNEL(MASKSA, ps_kernel_maskca_srcalpha_affine, 3, 2),
+	KERNEL(MASKSA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3, 2),
 
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7, 1),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2, 1),
@@ -431,7 +431,7 @@ gen6_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 	if (has_mask) {
 		if (is_ca) {
 			if (gen6_blend_op[op].src_alpha)
-				base = GEN6_WM_KERNEL_MASKCA_SRCALPHA;
+				base = GEN6_WM_KERNEL_MASKSA;
 			else
 				base = GEN6_WM_KERNEL_MASKCA;
 		} else
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index d06b791..ded22d5 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -185,8 +185,8 @@ static const struct wm_kernel_info {
 	KERNEL(MASKCA, ps_kernel_maskca_affine, 3),
 	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, 3),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3),
+	KERNEL(MASKSA, ps_kernel_maskca_srcalpha_affine, 3),
+	KERNEL(MASKSA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3),
 
 	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7),
 	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2),
@@ -437,7 +437,7 @@ gen7_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 	if (has_mask) {
 		if (is_ca) {
 			if (gen7_blend_op[op].src_alpha)
-				base = GEN7_WM_KERNEL_MASKCA_SRCALPHA;
+				base = GEN7_WM_KERNEL_MASKSA;
 			else
 				base = GEN7_WM_KERNEL_MASKCA;
 		} else
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index e676b6a..64e2412 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -13,6 +13,7 @@ struct sna;
 struct sna_glyph;
 struct sna_video;
 struct sna_video_frame;
+struct brw_compile;
 
 struct sna_composite_rectangles {
 	struct sna_coordinate {
@@ -379,8 +380,8 @@ enum {
 	GEN6_WM_KERNEL_MASKCA,
 	GEN6_WM_KERNEL_MASKCA_PROJECTIVE,
 
-	GEN6_WM_KERNEL_MASKCA_SRCALPHA,
-	GEN6_WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
+	GEN6_WM_KERNEL_MASKSA,
+	GEN6_WM_KERNEL_MASKSA_PROJECTIVE,
 
 	GEN6_WM_KERNEL_VIDEO_PLANAR,
 	GEN6_WM_KERNEL_VIDEO_PACKED,
@@ -428,8 +429,8 @@ enum {
 	GEN7_WM_KERNEL_MASKCA,
 	GEN7_WM_KERNEL_MASKCA_PROJECTIVE,
 
-	GEN7_WM_KERNEL_MASKCA_SRCALPHA,
-	GEN7_WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
+	GEN7_WM_KERNEL_MASKSA,
+	GEN7_WM_KERNEL_MASKSA_PROJECTIVE,
 
 	GEN7_WM_KERNEL_VIDEO_PLANAR,
 	GEN7_WM_KERNEL_VIDEO_PACKED,
@@ -479,6 +480,14 @@ void *sna_static_stream_map(struct sna_static_stream *stream,
 			    uint32_t len, uint32_t align);
 uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream,
 				    void *ptr);
+unsigned sna_static_stream_compile_sf(struct sna *sna,
+				      struct sna_static_stream *stream,
+				      void (*compile)(struct brw_compile *));
+
+unsigned sna_static_stream_compile_wm(struct sna *sna,
+				      struct sna_static_stream *stream,
+				      void (*compile)(struct brw_compile *, int),
+				      int width);
 struct kgem_bo *sna_static_stream_fini(struct sna *sna,
 				       struct sna_static_stream *stream);
 
diff --git a/src/sna/sna_stream.c b/src/sna/sna_stream.c
index aab1549..66a8c46 100644
--- a/src/sna/sna_stream.c
+++ b/src/sna/sna_stream.c
@@ -27,6 +27,7 @@
 
 #include "sna.h"
 #include "sna_render.h"
+#include "brw/brw.h"
 
 int sna_static_stream_init(struct sna_static_stream *stream)
 {
@@ -92,3 +93,40 @@ struct kgem_bo *sna_static_stream_fini(struct sna *sna,
 
 	return bo;
 }
+
+unsigned
+sna_static_stream_compile_sf(struct sna *sna,
+			     struct sna_static_stream *stream,
+			     void (*compile)(struct brw_compile *))
+{
+	struct brw_compile p;
+
+	brw_compile_init(&p, sna->kgem.gen,
+			 sna_static_stream_map(stream,
+					       64*sizeof(uint32_t), 64));
+
+	compile(&p);
+	assert(p.nr_insn*sizeof(struct brw_instruction) <= 64*sizeof(uint32_t));
+
+	stream->used -= 64*sizeof(uint32_t) - p.nr_insn*sizeof(struct brw_instruction);
+	return sna_static_stream_offsetof(stream, p.store);
+}
+
+unsigned
+sna_static_stream_compile_wm(struct sna *sna,
+			     struct sna_static_stream *stream,
+			     void (*compile)(struct brw_compile *, int),
+			     int dispatch_width)
+{
+	struct brw_compile p;
+
+	brw_compile_init(&p, sna->kgem.gen,
+			 sna_static_stream_map(stream,
+					       256*sizeof(uint32_t), 64));
+
+	compile(&p, dispatch_width);
+	assert(p.nr_insn*sizeof(struct brw_instruction) <= 256*sizeof(uint32_t));
+
+	stream->used -= 256*sizeof(uint32_t) - p.nr_insn*sizeof(struct brw_instruction);
+	return sna_static_stream_offsetof(stream, p.store);
+}
commit 8ebafa0493c0fa08ab9d80eeb1191b7560dc0863
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Sep 21 19:06:07 2011 +0100

    sna: Add the brw assembler
    
    In order to construct programs on the fly to cater for the combinatorial
    number of possible shaders, we need an assembler, whilst also taking the
    opportunity to remove some of the inefficiencies and mistakes from the
    current shaders.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 9945d5b..2a8d08b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -391,6 +391,7 @@ AC_CONFIG_FILES([
                 src/legacy/i810/Makefile
                 src/legacy/i810/xvmc/Makefile
                 src/sna/Makefile
+                src/sna/brw/Makefile
                 src/sna/fb/Makefile
                 man/Makefile
                 src/render_program/Makefile
diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 8463a80..306996b 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -18,7 +18,7 @@
 #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
-SUBDIRS = fb
+SUBDIRS = brw fb
 
 AM_CFLAGS = \
 	@CWARNFLAGS@ \
@@ -34,7 +34,7 @@ AM_CFLAGS += @VALGRIND_CFLAGS@
 endif
 
 noinst_LTLIBRARIES = libsna.la
-libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@ fb/libfb.la
+libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@ brw/libbrw.la fb/libfb.la
 
 libsna_la_SOURCES = \
 	blt.c \
diff --git a/src/sna/brw/Makefile.am b/src/sna/brw/Makefile.am
new file mode 100644
index 0000000..edb3db4
--- /dev/null
+++ b/src/sna/brw/Makefile.am
@@ -0,0 +1,42 @@
+
+#  Copyright 2005 Adam Jackson.
+#
+#  Permission is hereby granted, free of charge, to any person obtaining a
+#  copy of this software and associated documentation files (the "Software"),
+#  to deal in the Software without restriction, including without limitation
+#  on the rights to use, copy, modify, merge, publish, distribute, sub
+#  license, and/or sell copies of the Software, and to permit persons to whom
+#  the Software is furnished to do so, subject to the following conditions:
+#
+#  The above copyright notice and this permission notice (including the next
+#  paragraph) shall be included in all copies or substantial portions of the
+#  Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+#  IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+#  FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
+#  ADAM JACKSON BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+#  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+#  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+noinst_LTLIBRARIES = libbrw.la
+
+AM_CFLAGS = \
+	@CWARNFLAGS@ \
+	-I$(top_srcdir)/src \
+	-I$(top_srcdir)/src/render_program \
+	@XORG_CFLAGS@ \
+	@UDEV_CFLAGS@ \
+	@DRM_CFLAGS@ \
+	$(NULL)
+
+if DEBUG
+AM_CFLAGS += @VALGRIND_CFLAGS@
+endif
+
+libbrw_la_SOURCES = \
+	brw_disasm.c \
+	brw_eu.h \
+	brw_eu.c \
+	brw_eu_emit.c \
+	$(NULL)
diff --git a/src/sna/brw/brw_disasm.c b/src/sna/brw/brw_disasm.c
new file mode 100644
index 0000000..106eed3
--- /dev/null
+++ b/src/sna/brw/brw_disasm.c
@@ -0,0 +1,1101 @@
+/*
+ * Copyright © 2008 Keith Packard
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  The copyright holders make no representations
+ * about the suitability of this software for any purpose.  It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include "brw_eu.h"
+
+static const struct {
+	const char *name;
+	int nsrc;
+	int ndst;
+} opcode[128] = {
+	[BRW_OPCODE_MOV] = { .name = "mov", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_FRC] = { .name = "frc", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_RNDU] = { .name = "rndu", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_RNDD] = { .name = "rndd", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_RNDE] = { .name = "rnde", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_RNDZ] = { .name = "rndz", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_NOT] = { .name = "not", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_LZD] = { .name = "lzd", .nsrc = 1, .ndst = 1 },
+
+	[BRW_OPCODE_MUL] = { .name = "mul", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_MAC] = { .name = "mac", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_MACH] = { .name = "mach", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_LINE] = { .name = "line", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_PLN] = { .name = "pln", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_SAD2] = { .name = "sad2", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_SADA2] = { .name = "sada2", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_DP4] = { .name = "dp4", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_DPH] = { .name = "dph", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_DP3] = { .name = "dp3", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_DP2] = { .name = "dp2", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_MATH] = { .name = "math", .nsrc = 2, .ndst = 1 },
+
+	[BRW_OPCODE_AVG] = { .name = "avg", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_ADD] = { .name = "add", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_SEL] = { .name = "sel", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_AND] = { .name = "and", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_OR] = { .name = "or", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_XOR] = { .name = "xor", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_SHR] = { .name = "shr", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_SHL] = { .name = "shl", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_ASR] = { .name = "asr", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_CMP] = { .name = "cmp", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_CMPN] = { .name = "cmpn", .nsrc = 2, .ndst = 1 },
+
+	[BRW_OPCODE_SEND] = { .name = "send", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_SENDC] = { .name = "sendc", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_NOP] = { .name = "nop", .nsrc = 0, .ndst = 0 },
+	[BRW_OPCODE_JMPI] = { .name = "jmpi", .nsrc = 1, .ndst = 0 },
+	[BRW_OPCODE_IF] = { .name = "if", .nsrc = 2, .ndst = 0 },
+	[BRW_OPCODE_IFF] = { .name = "iff", .nsrc = 2, .ndst = 1 },
+	[BRW_OPCODE_WHILE] = { .name = "while", .nsrc = 2, .ndst = 0 },
+	[BRW_OPCODE_ELSE] = { .name = "else", .nsrc = 2, .ndst = 0 },
+	[BRW_OPCODE_BREAK] = { .name = "break", .nsrc = 2, .ndst = 0 },
+	[BRW_OPCODE_CONTINUE] = { .name = "cont", .nsrc = 1, .ndst = 0 },
+	[BRW_OPCODE_HALT] = { .name = "halt", .nsrc = 1, .ndst = 0 },
+	[BRW_OPCODE_MSAVE] = { .name = "msave", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_PUSH] = { .name = "push", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_MRESTORE] = { .name = "mrest", .nsrc = 1, .ndst = 1 },
+	[BRW_OPCODE_POP] = { .name = "pop", .nsrc = 2, .ndst = 0 },
+	[BRW_OPCODE_WAIT] = { .name = "wait", .nsrc = 1, .ndst = 0 },
+	[BRW_OPCODE_DO] = { .name = "do", .nsrc = 0, .ndst = 0 },
+	[BRW_OPCODE_ENDIF] = { .name = "endif", .nsrc = 2, .ndst = 0 },
+};
+
+static const char *conditional_modifier[16] = {
+	[BRW_CONDITIONAL_NONE] = "",
+	[BRW_CONDITIONAL_Z] = ".e",
+	[BRW_CONDITIONAL_NZ] = ".ne",
+	[BRW_CONDITIONAL_G] = ".g",
+	[BRW_CONDITIONAL_GE] = ".ge",
+	[BRW_CONDITIONAL_L] = ".l",
+	[BRW_CONDITIONAL_LE] = ".le",
+	[BRW_CONDITIONAL_R] = ".r",
+	[BRW_CONDITIONAL_O] = ".o",
+	[BRW_CONDITIONAL_U] = ".u",
+};
+
+static const char *negate[2] = {
+	[0] = "",
+	[1] = "-",
+};
+
+static const char *_abs[2] = {
+	[0] = "",
+	[1] = "(abs)",
+};
+
+static const char *vert_stride[16] = {
+	[0] = "0",
+	[1] = "1",
+	[2] = "2",
+	[3] = "4",
+	[4] = "8",
+	[5] = "16",
+	[6] = "32",
+	[15] = "VxH",
+};
+
+static const char *width[8] = {
+	[0] = "1",
+	[1] = "2",
+	[2] = "4",
+	[3] = "8",
+	[4] = "16",
+};
+
+static const char *horiz_stride[4] = {
+	[0] = "0",
+	[1] = "1",
+	[2] = "2",
+	[3] = "4"
+};
+
+static const char *chan_sel[4] = {
+	[0] = "x",
+	[1] = "y",
+	[2] = "z",
+	[3] = "w",
+};
+
+#if 0
+static const char *dest_condmod[16] = {
+};
+
+static const char *imm_encoding[8] = {
+	[0] = "UD",
+	[1] = "D",
+	[2] = "UW",
+	[3] = "W",
+	[5] = "VF",
+	[6] = "V",
+	[7] = "F"
+};
+#endif
+
+static const char *debug_ctrl[2] = {
+	[0] = "",
+	[1] = ".breakpoint"
+};
+
+static const char *saturate[2] = {
+	[0] = "",
+	[1] = ".sat"
+};
+
+static const char *accwr[2] = {
+	[0] = "",
+	[1] = "AccWrEnable"
+};
+
+static const char *wectrl[2] = {
+	[0] = "WE_normal",
+	[1] = "WE_all"
+};
+
+static const char *exec_size[8] = {
+	[0] = "1",
+	[1] = "2",
+	[2] = "4",
+	[3] = "8",
+	[4] = "16",
+	[5] = "32"
+};
+
+static const char *pred_inv[2] = {
+	[0] = "+",
+	[1] = "-"
+};
+
+static const char *pred_ctrl_align16[16] = {
+	[1] = "",
+	[2] = ".x",
+	[3] = ".y",
+	[4] = ".z",
+	[5] = ".w",
+	[6] = ".any4h",
+	[7] = ".all4h",
+};
+
+static const char *pred_ctrl_align1[16] = {
+	[1] = "",
+	[2] = ".anyv",
+	[3] = ".allv",
+	[4] = ".any2h",
+	[5] = ".all2h",
+	[6] = ".any4h",
+	[7] = ".all4h",
+	[8] = ".any8h",
+	[9] = ".all8h",
+	[10] = ".any16h",
+	[11] = ".all16h",
+};
+
+static const char *thread_ctrl[4] = {
+	[0] = "",
+	[2] = "switch"
+};
+
+static const char *compr_ctrl[4] = {
+	[0] = "",
+	[1] = "sechalf",
+	[2] = "compr",
+	[3] = "compr4",
+};
+
+static const char *dep_ctrl[4] = {
+	[0] = "",
+	[1] = "NoDDClr",
+	[2] = "NoDDChk",
+	[3] = "NoDDClr,NoDDChk",
+};
+
+static const char *mask_ctrl[4] = {
+	[0] = "",
+	[1] = "nomask",
+};
+
+static const char *access_mode[2] = {
+	[0] = "align1",
+	[1] = "align16",
+};
+
+static const char *reg_encoding[8] = {
+	[0] = "UD",
+	[1] = "D",
+	[2] = "UW",
+	[3] = "W",
+	[4] = "UB",
+	[5] = "B",
+	[7] = "F"
+};
+
+static const int reg_type_size[8] = {
+	[0] = 4,
+	[1] = 4,
+	[2] = 2,
+	[3] = 2,
+	[4] = 1,
+	[5] = 1,
+	[7] = 4
+};
+
+static const char *reg_file[4] = {
+	[0] = "A",
+	[1] = "g",
+	[2] = "m",
+	[3] = "imm",
+};
+
+static const char *writemask[16] = {
+	[0x0] = ".",
+	[0x1] = ".x",
+	[0x2] = ".y",
+	[0x3] = ".xy",
+	[0x4] = ".z",
+	[0x5] = ".xz",
+	[0x6] = ".yz",
+	[0x7] = ".xyz",
+	[0x8] = ".w",
+	[0x9] = ".xw",
+	[0xa] = ".yw",
+	[0xb] = ".xyw",
+	[0xc] = ".zw",
+	[0xd] = ".xzw",
+	[0xe] = ".yzw",
+	[0xf] = "",
+};
+
+static const char *end_of_thread[2] = {
+	[0] = "",
+	[1] = "EOT"
+};
+
+static const char *target_function[16] = {
+	[BRW_SFID_NULL] = "null",
+	[BRW_SFID_MATH] = "math",
+	[BRW_SFID_SAMPLER] = "sampler",
+	[BRW_SFID_MESSAGE_GATEWAY] = "gateway",
+	[BRW_SFID_DATAPORT_READ] = "read",
+	[BRW_SFID_DATAPORT_WRITE] = "write",
+	[BRW_SFID_URB] = "urb",
+	[BRW_SFID_THREAD_SPAWNER] = "thread_spawner"
+};
+
+static const char *target_function_gen6[16] = {
+	[BRW_SFID_NULL] = "null",
+	[BRW_SFID_MATH] = "math",
+	[BRW_SFID_SAMPLER] = "sampler",
+	[BRW_SFID_MESSAGE_GATEWAY] = "gateway",
+	[BRW_SFID_URB] = "urb",
+	[BRW_SFID_THREAD_SPAWNER] = "thread_spawner",
+	[GEN6_SFID_DATAPORT_SAMPLER_CACHE] = "sampler",
+	[GEN6_SFID_DATAPORT_RENDER_CACHE] = "render",
+	[GEN6_SFID_DATAPORT_CONSTANT_CACHE] = "const",
+	[GEN7_SFID_DATAPORT_DATA_CACHE] = "data"
+};
+
+static const char *dp_rc_msg_type_gen6[16] = {
+	[BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ] = "OWORD block read",
+	[GEN6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ] = "RT UNORM read",
+	[GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ] = "OWORD dual block read",
+	[GEN6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ] = "media block read",
+	[GEN6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ] = "OWORD unaligned block read",
+	[GEN6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ] = "DWORD scattered read",
+	[GEN6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE] = "DWORD atomic write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE] = "OWORD block write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE] = "OWORD dual block write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE] = "media block write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE] = "DWORD scattered write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE] = "RT write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE] = "streamed VB write",
+	[GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE] = "RT UNORMc write",
+};
+
+static const char *math_function[16] = {
+	[BRW_MATH_FUNCTION_INV] = "inv",
+	[BRW_MATH_FUNCTION_LOG] = "log",
+	[BRW_MATH_FUNCTION_EXP] = "exp",
+	[BRW_MATH_FUNCTION_SQRT] = "sqrt",
+	[BRW_MATH_FUNCTION_RSQ] = "rsq",
+	[BRW_MATH_FUNCTION_SIN] = "sin",
+	[BRW_MATH_FUNCTION_COS] = "cos",
+	[BRW_MATH_FUNCTION_SINCOS] = "sincos",
+	[BRW_MATH_FUNCTION_TAN] = "tan",
+	[BRW_MATH_FUNCTION_POW] = "pow",
+	[BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER] = "intdivmod",
+	[BRW_MATH_FUNCTION_INT_DIV_QUOTIENT] = "intdiv",
+	[BRW_MATH_FUNCTION_INT_DIV_REMAINDER] = "intmod",
+};
+
+static const char *math_saturate[2] = {
+	[0] = "",
+	[1] = "sat"
+};
+
+static const char *math_signed[2] = {
+	[0] = "",
+	[1] = "signed"
+};
+
+static const char *math_scalar[2] = {
+	[0] = "",
+	[1] = "scalar"
+};
+
+static const char *math_precision[2] = {
+	[0] = "",
+	[1] = "partial_precision"
+};
+
+static const char *urb_opcode[2] = {
+	[0] = "urb_write",
+	[1] = "ff_sync",
+};
+
+static const char *urb_swizzle[4] = {
+	[BRW_URB_SWIZZLE_NONE] = "",
+	[BRW_URB_SWIZZLE_INTERLEAVE] = "interleave",
+	[BRW_URB_SWIZZLE_TRANSPOSE] = "transpose",
+};
+
+static const char *urb_allocate[2] = {
+	[0] = "",
+	[1] = "allocate"
+};
+
+static const char *urb_used[2] = {
+	[0] = "",
+	[1] = "used"
+};
+
+static const char *urb_complete[2] = {
+	[0] = "",
+	[1] = "complete"
+};
+
+static const char *sampler_target_format[4] = {
+	[0] = "F",
+	[2] = "UD",
+	[3] = "D"
+};
+
+static int column;
+
+static int string(FILE *file, const char *str)
+{
+	fputs(str, file);
+	column += strlen(str);
+	return 0;
+}
+
+static int format(FILE *f, const char *fmt, ...)
+{
+	char buf[1024];
+	va_list	args;
+
+	va_start(args, fmt);
+	vsnprintf(buf, sizeof(buf) - 1, fmt, args);
+	va_end(args);
+
+	string(f, buf);
+	return 0;
+}
+
+static void newline(FILE *f)
+{
+	putc('\n', f);
+	column = 0;
+}
+
+static void pad(FILE *f, int c)
+{
+	do
+		string(f, " ");
+	while (column < c);
+}
+
+static void control(FILE *file, const char *name, const char *ctrl[], unsigned id, int *space)
+{
+	if (!ctrl[id]) {
+		fprintf(file, "*** invalid %s value %d ",
+			name, id);
+		assert(0);
+	}
+	if (ctrl[id][0]) {
+		if (space && *space)
+			string(file, " ");
+		string(file, ctrl[id]);
+		if (space)
+			*space = 1;
+	}
+}
+
+static void print_opcode(FILE *file, int id)
+{
+	if (!opcode[id].name) {
+		format(file, "*** invalid opcode value %d ", id);
+		assert(0);
+	}
+	string(file, opcode[id].name);
+}
+
+static int reg(FILE *file, unsigned _reg_file, unsigned _reg_nr)
+{
+	/* Clear the Compr4 instruction compression bit. */
+	if (_reg_file == BRW_MESSAGE_REGISTER_FILE)
+		_reg_nr &= ~(1 << 7);
+
+	if (_reg_file == BRW_ARCHITECTURE_REGISTER_FILE) {
+		switch (_reg_nr & 0xf0) {
+		case BRW_ARF_NULL:
+			string(file, "null");
+			return -1;
+		case BRW_ARF_ADDRESS:
+			format(file, "a%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_ACCUMULATOR:
+			format(file, "acc%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_FLAG:
+			format(file, "f%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_MASK:
+			format(file, "mask%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_MASK_STACK:
+			format(file, "msd%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_STATE:
+			format(file, "sr%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_CONTROL:
+			format(file, "cr%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_NOTIFICATION_COUNT:
+			format(file, "n%d", _reg_nr & 0x0f);
+			break;
+		case BRW_ARF_IP:
+			string(file, "ip");
+			return -1;
+		default:
+			format(file, "ARF%d", _reg_nr);
+			break;
+		}
+	} else {
+		control(file, "src reg file", reg_file, _reg_file, NULL);
+		format(file, "%d", _reg_nr);
+	}
+	return 0;
+}
+
+static void dest(FILE *file, const struct brw_instruction *inst)
+{
+	if (inst->header.access_mode == BRW_ALIGN_1) {
+		if (inst->bits1.da1.dest_address_mode == BRW_ADDRESS_DIRECT) {
+			if (reg(file, inst->bits1.da1.dest_reg_file, inst->bits1.da1.dest_reg_nr))
+				return;
+
+			if (inst->bits1.da1.dest_subreg_nr)
+				format(file, ".%d", inst->bits1.da1.dest_subreg_nr /
+				       reg_type_size[inst->bits1.da1.dest_reg_type]);
+			format(file, "<%d>", inst->bits1.da1.dest_horiz_stride);
+			control(file, "dest reg encoding", reg_encoding, inst->bits1.da1.dest_reg_type, NULL);
+		} else {
+			string(file, "g[a0");
+			if (inst->bits1.ia1.dest_subreg_nr)
+				format(file, ".%d", inst->bits1.ia1.dest_subreg_nr /
+				       reg_type_size[inst->bits1.ia1.dest_reg_type]);
+			if (inst->bits1.ia1.dest_indirect_offset)
+				format(file, " %d", inst->bits1.ia1.dest_indirect_offset);
+			string(file, "]");
+			format(file, "<%d>", inst->bits1.ia1.dest_horiz_stride);
+			control(file, "dest reg encoding", reg_encoding, inst->bits1.ia1.dest_reg_type, NULL);
+		}
+	} else {
+		if (inst->bits1.da16.dest_address_mode == BRW_ADDRESS_DIRECT) {
+			if (reg(file, inst->bits1.da16.dest_reg_file, inst->bits1.da16.dest_reg_nr))
+				return;
+
+			if (inst->bits1.da16.dest_subreg_nr)
+				format(file, ".%d", inst->bits1.da16.dest_subreg_nr /
+				       reg_type_size[inst->bits1.da16.dest_reg_type]);
+			string(file, "<1>");
+			control(file, "writemask", writemask, inst->bits1.da16.dest_writemask, NULL);
+			control(file, "dest reg encoding", reg_encoding, inst->bits1.da16.dest_reg_type, NULL);
+		} else {
+			string(file, "Indirect align16 address mode not supported");
+		}
+	}
+}
+
+static void src_align1_region(FILE *file,
+			      unsigned _vert_stride, unsigned _width, unsigned _horiz_stride)
+{
+	string(file, "<");
+	control(file, "vert stride", vert_stride, _vert_stride, NULL);
+	string(file, ",");
+	control(file, "width", width, _width, NULL);
+	string(file, ",");
+	control(file, "horiz_stride", horiz_stride, _horiz_stride, NULL);
+	string(file, ">");
+}
+
+static void src_da1(FILE *file, unsigned type, unsigned _reg_file,
+		    unsigned _vert_stride, unsigned _width, unsigned _horiz_stride,
+		    unsigned reg_num, unsigned sub_reg_num, unsigned __abs, unsigned _negate)
+{
+	control(file, "negate", negate, _negate, NULL);
+	control(file, "abs", _abs, __abs, NULL);
+
+	if (reg(file, _reg_file, reg_num))
+		return;
+
+	if (sub_reg_num)
+		format(file, ".%d", sub_reg_num / reg_type_size[type]); /* use formal style like spec */
+	src_align1_region(file, _vert_stride, _width, _horiz_stride);
+	control(file, "src reg encoding", reg_encoding, type, NULL);
+}
+
+static void src_ia1(FILE *file,
+		    unsigned type,
+		    unsigned _reg_file,
+		    int _addr_imm,
+		    unsigned _addr_subreg_nr,
+		    unsigned _negate,
+		    unsigned __abs,
+		    unsigned _addr_mode,
+		    unsigned _horiz_stride,
+		    unsigned _width,
+		    unsigned _vert_stride)
+{
+	control(file, "negate", negate, _negate, NULL);
+	control(file, "abs", _abs, __abs, NULL);
+
+	string(file, "g[a0");
+	if (_addr_subreg_nr)
+		format(file, ".%d", _addr_subreg_nr);
+	if (_addr_imm)
+		format(file, " %d", _addr_imm);
+	string(file, "]");
+	src_align1_region(file, _vert_stride, _width, _horiz_stride);
+	control(file, "src reg encoding", reg_encoding, type, NULL);
+}
+
+static void src_da16(FILE *file,
+		     unsigned _reg_type,
+		     unsigned _reg_file,
+		     unsigned _vert_stride,
+		     unsigned _reg_nr,
+		     unsigned _subreg_nr,
+		     unsigned __abs,
+		     unsigned _negate,
+		     unsigned swz_x,
+		     unsigned swz_y,
+		     unsigned swz_z,
+		     unsigned swz_w)
+{
+	control(file, "negate", negate, _negate, NULL);
+	control(file, "abs", _abs, __abs, NULL);
+
+	if (reg(file, _reg_file, _reg_nr))
+		return;
+
+	if (_subreg_nr)
+		/* bit4 for subreg number byte addressing. Make this same meaning as
+		   in da1 case, so output looks consistent. */
+		format(file, ".%d", 16 / reg_type_size[_reg_type]);
+	string(file, "<");
+	control(file, "vert stride", vert_stride, _vert_stride, NULL);
+	string(file, ",4,1>");
+	/*
+	 * Three kinds of swizzle display:
+	 *  identity - nothing printed
+	 *  1->all	 - print the single channel
+	 *  1->1     - print the mapping
+	 */
+	if (swz_x == BRW_CHANNEL_X &&
+	    swz_y == BRW_CHANNEL_Y &&
+	    swz_z == BRW_CHANNEL_Z &&
+	    swz_w == BRW_CHANNEL_W)
+	{
+		;
+	}
+	else if (swz_x == swz_y && swz_x == swz_z && swz_x == swz_w)
+	{
+		string(file, ".");
+		control(file, "channel select", chan_sel, swz_x, NULL);
+	}
+	else
+	{
+		string(file, ".");
+		control(file, "channel select", chan_sel, swz_x, NULL);
+		control(file, "channel select", chan_sel, swz_y, NULL);
+		control(file, "channel select", chan_sel, swz_z, NULL);
+		control(file, "channel select", chan_sel, swz_w, NULL);
+	}
+	control(file, "src da16 reg type", reg_encoding, _reg_type, NULL);
+}
+
+static void imm(FILE *file, unsigned type, const struct brw_instruction *inst)
+{
+	switch (type) {
+	case BRW_REGISTER_TYPE_UD:
+		format(file, "0x%08xUD", inst->bits3.ud);
+		break;
+	case BRW_REGISTER_TYPE_D:
+		format(file, "%dD", inst->bits3.d);
+		break;
+	case BRW_REGISTER_TYPE_UW:
+		format(file, "0x%04xUW", (uint16_t) inst->bits3.ud);
+		break;
+	case BRW_REGISTER_TYPE_W:
+		format(file, "%dW", (int16_t) inst->bits3.d);
+		break;
+	case BRW_REGISTER_TYPE_UB:
+		format(file, "0x%02xUB", (int8_t) inst->bits3.ud);
+		break;
+	case BRW_REGISTER_TYPE_VF:
+		format(file, "Vector Float");
+		break;
+	case BRW_REGISTER_TYPE_V:
+		format(file, "0x%08xV", inst->bits3.ud);
+		break;
+	case BRW_REGISTER_TYPE_F:
+		format(file, "%-gF", inst->bits3.f);
+	}
+}
+
+static void src0(FILE *file, const struct brw_instruction *inst)
+{
+	if (inst->bits1.da1.src0_reg_file == BRW_IMMEDIATE_VALUE)
+		imm(file, inst->bits1.da1.src0_reg_type, inst);
+	else if (inst->header.access_mode == BRW_ALIGN_1) {
+		if (inst->bits2.da1.src0_address_mode == BRW_ADDRESS_DIRECT) {
+			src_da1(file,
+				inst->bits1.da1.src0_reg_type,
+				inst->bits1.da1.src0_reg_file,
+				inst->bits2.da1.src0_vert_stride,
+				inst->bits2.da1.src0_width,
+				inst->bits2.da1.src0_horiz_stride,
+				inst->bits2.da1.src0_reg_nr,
+				inst->bits2.da1.src0_subreg_nr,
+				inst->bits2.da1.src0_abs,
+				inst->bits2.da1.src0_negate);
+		} else {
+			src_ia1(file,
+				inst->bits1.ia1.src0_reg_type,
+				inst->bits1.ia1.src0_reg_file,
+				inst->bits2.ia1.src0_indirect_offset,
+				inst->bits2.ia1.src0_subreg_nr,
+				inst->bits2.ia1.src0_negate,
+				inst->bits2.ia1.src0_abs,
+				inst->bits2.ia1.src0_address_mode,
+				inst->bits2.ia1.src0_horiz_stride,
+				inst->bits2.ia1.src0_width,
+				inst->bits2.ia1.src0_vert_stride);
+		}
+	} else {
+		if (inst->bits2.da16.src0_address_mode == BRW_ADDRESS_DIRECT) {
+			src_da16(file,
+				 inst->bits1.da16.src0_reg_type,
+				 inst->bits1.da16.src0_reg_file,
+				 inst->bits2.da16.src0_vert_stride,
+				 inst->bits2.da16.src0_reg_nr,
+				 inst->bits2.da16.src0_subreg_nr,
+				 inst->bits2.da16.src0_abs,
+				 inst->bits2.da16.src0_negate,
+				 inst->bits2.da16.src0_swz_x,
+				 inst->bits2.da16.src0_swz_y,
+				 inst->bits2.da16.src0_swz_z,
+				 inst->bits2.da16.src0_swz_w);
+		} else {
+			string(file, "Indirect align16 address mode not supported");
+		}
+	}
+}
+
+static void src1(FILE *file, const struct brw_instruction *inst)
+{
+	if (inst->bits1.da1.src1_reg_file == BRW_IMMEDIATE_VALUE)
+		imm(file, inst->bits1.da1.src1_reg_type, inst);
+	else if (inst->header.access_mode == BRW_ALIGN_1) {
+		if (inst->bits3.da1.src1_address_mode == BRW_ADDRESS_DIRECT) {
+			src_da1(file,
+				inst->bits1.da1.src1_reg_type,
+				inst->bits1.da1.src1_reg_file,
+				inst->bits3.da1.src1_vert_stride,
+				inst->bits3.da1.src1_width,
+				inst->bits3.da1.src1_horiz_stride,
+				inst->bits3.da1.src1_reg_nr,
+				inst->bits3.da1.src1_subreg_nr,
+				inst->bits3.da1.src1_abs,
+				inst->bits3.da1.src1_negate);
+		} else {
+			src_ia1(file,
+				inst->bits1.ia1.src1_reg_type,
+				inst->bits1.ia1.src1_reg_file,
+				inst->bits3.ia1.src1_indirect_offset,
+				inst->bits3.ia1.src1_subreg_nr,
+				inst->bits3.ia1.src1_negate,
+				inst->bits3.ia1.src1_abs,
+				inst->bits3.ia1.src1_address_mode,
+				inst->bits3.ia1.src1_horiz_stride,
+				inst->bits3.ia1.src1_width,
+				inst->bits3.ia1.src1_vert_stride);
+		}
+	} else {
+		if (inst->bits3.da16.src1_address_mode == BRW_ADDRESS_DIRECT) {
+			src_da16(file,
+				 inst->bits1.da16.src1_reg_type,
+				 inst->bits1.da16.src1_reg_file,
+				 inst->bits3.da16.src1_vert_stride,
+				 inst->bits3.da16.src1_reg_nr,
+				 inst->bits3.da16.src1_subreg_nr,
+				 inst->bits3.da16.src1_abs,
+				 inst->bits3.da16.src1_negate,
+				 inst->bits3.da16.src1_swz_x,
+				 inst->bits3.da16.src1_swz_y,
+				 inst->bits3.da16.src1_swz_z,
+				 inst->bits3.da16.src1_swz_w);
+		} else {
+			string(file, "Indirect align16 address mode not supported");
+		}
+	}
+}
+
+static const int esize[6] = {
+	[0] = 1,
+	[1] = 2,
+	[2] = 4,
+	[3] = 8,
+	[4] = 16,
+	[5] = 32,
+};
+
+static int qtr_ctrl(FILE *file, const struct brw_instruction *inst)
+{
+	int qtr_ctl = inst->header.compression_control;
+	int size = esize[inst->header.execution_size];
+
+	if (size == 8) {
+		switch (qtr_ctl) {
+		case 0:
+			string(file, " 1Q");
+			break;
+		case 1:
+			string(file, " 2Q");
+			break;
+		case 2:
+			string(file, " 3Q");
+			break;
+		case 3:
+			string(file, " 4Q");
+			break;
+		}
+	} else if (size == 16){
+		if (qtr_ctl < 2)
+			string(file, " 1H");
+		else
+			string(file, " 2H");
+	}
+	return 0;
+}
+
+void brw_disasm(FILE *file, const struct brw_instruction *inst, int gen)
+{
+	int space = 0;
+
+	format(file, "%08x %08x %08x %08x\n",
+	       ((uint32_t*)inst)[0],
+	       ((uint32_t*)inst)[1],
+	       ((uint32_t*)inst)[2],
+	       ((uint32_t*)inst)[3]);
+
+	if (inst->header.predicate_control) {
+		string(file, "(");
+		control(file, "predicate inverse", pred_inv, inst->header.predicate_inverse, NULL);
+		string(file, "f0");
+		if (inst->bits2.da1.flag_subreg_nr)
+			format(file, ".%d", inst->bits2.da1.flag_subreg_nr);
+		if (inst->header.access_mode == BRW_ALIGN_1)
+			control(file, "predicate control align1", pred_ctrl_align1,
+				inst->header.predicate_control, NULL);
+		else
+			control(file, "predicate control align16", pred_ctrl_align16,
+				inst->header.predicate_control, NULL);
+		string(file, ") ");
+	}
+
+	print_opcode(file, inst->header.opcode);
+	control(file, "saturate", saturate, inst->header.saturate, NULL);
+	control(file, "debug control", debug_ctrl, inst->header.debug_control, NULL);
+
+	if (inst->header.opcode == BRW_OPCODE_MATH) {
+		string(file, " ");
+		control(file, "function", math_function,
+			inst->header.destreg__conditionalmod, NULL);
+	} else if (inst->header.opcode != BRW_OPCODE_SEND &&
+		   inst->header.opcode != BRW_OPCODE_SENDC)
+		control(file, "conditional modifier", conditional_modifier,
+			inst->header.destreg__conditionalmod, NULL);
+
+	if (inst->header.opcode != BRW_OPCODE_NOP) {
+		string(file, "(");
+		control(file, "execution size", exec_size, inst->header.execution_size, NULL);
+		string(file, ")");
+	}
+
+	if (inst->header.opcode == BRW_OPCODE_SEND && gen < 60)
+		format(file, " %d", inst->header.destreg__conditionalmod);
+
+	if (opcode[inst->header.opcode].ndst > 0) {
+		pad(file, 16);
+		dest(file, inst);
+	} else if (gen >= 60 && (inst->header.opcode == BRW_OPCODE_IF ||
+				 inst->header.opcode == BRW_OPCODE_ELSE ||
+				 inst->header.opcode == BRW_OPCODE_ENDIF ||
+				 inst->header.opcode == BRW_OPCODE_WHILE)) {
+		format(file, " %d", inst->bits1.branch_gen6.jump_count);
+	}
+
+	if (opcode[inst->header.opcode].nsrc > 0) {
+		pad(file, 32);
+		src0(file, inst);
+	}
+	if (opcode[inst->header.opcode].nsrc > 1) {
+		pad(file, 48);
+		src1(file, inst);
+	}
+
+	if (inst->header.opcode == BRW_OPCODE_SEND ||
+	    inst->header.opcode == BRW_OPCODE_SENDC) {
+		enum brw_message_target target;
+
+		if (gen >= 60)
+			target = inst->header.destreg__conditionalmod;
+		else if (gen >= 50)
+			target = inst->bits2.send_gen5.sfid;
+		else
+			target = inst->bits3.generic.msg_target;
+
+		newline (file);
+		pad (file, 16);
+		space = 0;
+
+		if (gen >= 60) {
+			control (file, "target function", target_function_gen6,
+				 target, &space);
+		} else {
+			control (file, "target function", target_function,
+				 target, &space);
+		}
+
+		switch (target) {
+		case BRW_SFID_MATH:
+			control (file, "math function", math_function,
+				 inst->bits3.math.function, &space);
+			control (file, "math saturate", math_saturate,
+				 inst->bits3.math.saturate, &space);
+			control (file, "math signed", math_signed,
+				 inst->bits3.math.int_type, &space);
+			control (file, "math scalar", math_scalar,
+				 inst->bits3.math.data_type, &space);
+			control (file, "math precision", math_precision,
+				 inst->bits3.math.precision, &space);
+			break;
+		case BRW_SFID_SAMPLER:
+			if (gen >= 70) {
+				format (file, " (%d, %d, %d, %d)",
+					inst->bits3.sampler_gen7.binding_table_index,
+					inst->bits3.sampler_gen7.sampler,
+					inst->bits3.sampler_gen7.msg_type,
+					inst->bits3.sampler_gen7.simd_mode);
+			} else if (gen >= 50) {
+				format (file, " (%d, %d, %d, %d)",
+					inst->bits3.sampler_gen5.binding_table_index,
+					inst->bits3.sampler_gen5.sampler,
+					inst->bits3.sampler_gen5.msg_type,
+					inst->bits3.sampler_gen5.simd_mode);
+			} else if (gen >= 45) {
+				format (file, " (%d, %d)",
+					inst->bits3.sampler_g4x.binding_table_index,
+					inst->bits3.sampler_g4x.sampler);
+			} else {
+				format (file, " (%d, %d, ",
+					inst->bits3.sampler.binding_table_index,
+					inst->bits3.sampler.sampler);
+				control (file, "sampler target format",
+					 sampler_target_format,
+					 inst->bits3.sampler.return_format, NULL);
+				string (file, ")");
+			}
+			break;
+		case BRW_SFID_DATAPORT_READ:
+			if (gen >= 60) {
+				format (file, " (%d, %d, %d, %d)",
+					inst->bits3.gen6_dp.binding_table_index,
+					inst->bits3.gen6_dp.msg_control,
+					inst->bits3.gen6_dp.msg_type,
+					inst->bits3.gen6_dp.send_commit_msg);
+			} else if (gen >= 45) {
+				format (file, " (%d, %d, %d)",
+					inst->bits3.dp_read_gen5.binding_table_index,
+					inst->bits3.dp_read_gen5.msg_control,
+					inst->bits3.dp_read_gen5.msg_type);
+			} else {
+				format (file, " (%d, %d, %d)",
+					inst->bits3.dp_read.binding_table_index,
+					inst->bits3.dp_read.msg_control,
+					inst->bits3.dp_read.msg_type);
+			}
+			break;
+
+		case BRW_SFID_DATAPORT_WRITE:
+			if (gen >= 70) {
+				format (file, " (");
+
+				control (file, "DP rc message type",
+					 dp_rc_msg_type_gen6,
+					 inst->bits3.gen7_dp.msg_type, &space);
+
+				format (file, ", %d, %d, %d)",
+					inst->bits3.gen7_dp.binding_table_index,
+					inst->bits3.gen7_dp.msg_control,
+					inst->bits3.gen7_dp.msg_type);
+			} else if (gen >= 60) {
+				format (file, " (");
+
+				control (file, "DP rc message type",
+					 dp_rc_msg_type_gen6,
+					 inst->bits3.gen6_dp.msg_type, &space);
+
+				format (file, ", %d, %d, %d, %d)",
+					inst->bits3.gen6_dp.binding_table_index,
+					inst->bits3.gen6_dp.msg_control,
+					inst->bits3.gen6_dp.msg_type,
+					inst->bits3.gen6_dp.send_commit_msg);
+			} else {
+				format (file, " (%d, %d, %d, %d)",
+					inst->bits3.dp_write.binding_table_index,
+					(inst->bits3.dp_write.last_render_target << 3) |
+					inst->bits3.dp_write.msg_control,
+					inst->bits3.dp_write.msg_type,
+					inst->bits3.dp_write.send_commit_msg);
+			}
+			break;
+
+		case BRW_SFID_URB:
+			if (gen >= 50) {
+				format (file, " %d", inst->bits3.urb_gen5.offset);
+			} else {
+				format (file, " %d", inst->bits3.urb.offset);
+			}
+
+			space = 1;
+			if (gen >= 50) {
+				control (file, "urb opcode", urb_opcode,
+					 inst->bits3.urb_gen5.opcode, &space);
+			}
+			control (file, "urb swizzle", urb_swizzle,
+				 inst->bits3.urb.swizzle_control, &space);
+			control (file, "urb allocate", urb_allocate,
+				 inst->bits3.urb.allocate, &space);
+			control (file, "urb used", urb_used,
+				 inst->bits3.urb.used, &space);
+			control (file, "urb complete", urb_complete,
+				 inst->bits3.urb.complete, &space);
+			break;
+		case BRW_SFID_THREAD_SPAWNER:
+			break;
+		case GEN7_SFID_DATAPORT_DATA_CACHE:
+			format (file, " (%d, %d, %d)",
+				inst->bits3.gen7_dp.binding_table_index,
+				inst->bits3.gen7_dp.msg_control,
+				inst->bits3.gen7_dp.msg_type);
+			break;
+
+
+		default:
+			format (file, "unsupported target %d", target);
+			break;
+		}
+		if (space)
+			string (file, " ");
+		if (gen >= 50) {
+			format (file, "mlen %d",
+				inst->bits3.generic_gen5.msg_length);
+			format (file, " rlen %d",
+				inst->bits3.generic_gen5.response_length);
+		} else {
+			format (file, "mlen %d",
+				inst->bits3.generic.msg_length);
+			format (file, " rlen %d",
+				inst->bits3.generic.response_length);
+		}
+	}
+	pad(file, 64);
+	if (inst->header.opcode != BRW_OPCODE_NOP) {
+		string(file, "{");
+		space = 1;
+		control(file, "access mode", access_mode, inst->header.access_mode, &space);
+		if (gen >= 60)
+			control(file, "write enable control", wectrl, inst->header.mask_control, &space);
+		else
+			control(file, "mask control", mask_ctrl, inst->header.mask_control, &space);
+		control(file, "dependency control", dep_ctrl, inst->header.dependency_control, &space);
+
+		if (gen >= 60)
+			qtr_ctrl(file, inst);
+		else {
+			if (inst->header.compression_control == BRW_COMPRESSION_COMPRESSED &&
+			    opcode[inst->header.opcode].ndst > 0 &&
+			    inst->bits1.da1.dest_reg_file == BRW_MESSAGE_REGISTER_FILE &&
+			    inst->bits1.da1.dest_reg_nr & (1 << 7)) {
+				format(file, " compr4");
+			} else {
+				control(file, "compression control", compr_ctrl,
+					inst->header.compression_control, &space);
+			}
+		}
+
+		control(file, "thread control", thread_ctrl, inst->header.thread_control, &space);
+		if (gen >= 60)
+			control(file, "acc write control", accwr, inst->header.acc_wr_control, &space);
+		if (inst->header.opcode == BRW_OPCODE_SEND ||
+		    inst->header.opcode == BRW_OPCODE_SENDC)
+			control(file, "end of thread", end_of_thread,
+				inst->bits3.generic.end_of_thread, &space);
+		if (space)
+			string(file, " ");
+		string(file, "}");
+	}
+	string(file, ";");
+	newline(file);
+}
diff --git a/src/sna/brw/brw_eu.c b/src/sna/brw/brw_eu.c
new file mode 100644
index 0000000..7c32ea1
--- /dev/null
+++ b/src/sna/brw/brw_eu.c
@@ -0,0 +1,150 @@
+/*
+ Copyright (C) Intel Corp.  2006.  All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+ /*
+  * Authors:
+  *   Keith Whitwell <keith at tungstengraphics.com>
+  */
+
+#include "brw_eu.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+/* Returns the corresponding conditional mod for swapping src0 and
+ * src1 in e.g. CMP.
+ */
+uint32_t
+brw_swap_cmod(uint32_t cmod)
+{
+	switch (cmod) {
+	case BRW_CONDITIONAL_Z:
+	case BRW_CONDITIONAL_NZ:
+		return cmod;
+	case BRW_CONDITIONAL_G:
+		return BRW_CONDITIONAL_LE;
+	case BRW_CONDITIONAL_GE:
+		return BRW_CONDITIONAL_L;
+	case BRW_CONDITIONAL_L:
+		return BRW_CONDITIONAL_GE;
+	case BRW_CONDITIONAL_LE:
+		return BRW_CONDITIONAL_G;
+	default:
+		return ~0;
+	}
+}
+
+/* How does predicate control work when execution_size != 8?  Do I
+ * need to test/set for 0xffff when execution_size is 16?
+ */
+void brw_set_predicate_control_flag_value( struct brw_compile *p, unsigned value )
+{
+	p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+	if (value != 0xff) {
+		if (value != p->flag_value) {
+			brw_MOV(p, brw_flag_reg(), brw_imm_uw(value));
+			p->flag_value = value;
+		}
+
+		p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+	}
+}
+
+void brw_set_compression_control(struct brw_compile *p,
+				 enum brw_compression compression_control)
+{
+	p->compressed = (compression_control == BRW_COMPRESSION_COMPRESSED);
+
+	if (p->gen >= 60) {
+		/* Since we don't use the 32-wide support in gen6, we translate
+		 * the pre-gen6 compression control here.
+		 */
+		switch (compression_control) {
+		case BRW_COMPRESSION_NONE:
+			/* This is the "use the first set of bits of dmask/vmask/arf
+			 * according to execsize" option.
+			 */
+			p->current->header.compression_control = GEN6_COMPRESSION_1Q;
+			break;
+		case BRW_COMPRESSION_2NDHALF:
+			/* For 8-wide, this is "use the second set of 8 bits." */
+			p->current->header.compression_control = GEN6_COMPRESSION_2Q;
+			break;
+		case BRW_COMPRESSION_COMPRESSED:
+			/* For 16-wide instruction compression, use the first set of 16 bits
+			 * since we don't do 32-wide dispatch.
+			 */
+			p->current->header.compression_control = GEN6_COMPRESSION_1H;
+			break;
+		default:
+			assert(!"not reached");
+			p->current->header.compression_control = GEN6_COMPRESSION_1H;
+			break;
+		}
+	} else {
+		p->current->header.compression_control = compression_control;
+	}
+}
+
+void brw_push_insn_state( struct brw_compile *p )
+{
+	assert(p->current != &p->stack[BRW_EU_MAX_INSN_STACK-1]);
+	memcpy(p->current+1, p->current, sizeof(struct brw_instruction));
+	p->compressed_stack[p->current - p->stack] = p->compressed;
+	p->current++;
+}
+
+void brw_pop_insn_state( struct brw_compile *p )
+{
+	assert(p->current != p->stack);
+	p->current--;
+	p->compressed = p->compressed_stack[p->current - p->stack];
+}
+
+void brw_compile_init(struct brw_compile *p, int gen, void *store)
+{
+	assert(gen);
+
+	p->gen = gen;
+	p->store = store;
+
+	p->nr_insn = 0;
+	p->current = p->stack;
+	p->compressed = false;
+	memset(p->current, 0, sizeof(p->current[0]));
+
+	/* Some defaults?
+	*/
+	brw_set_mask_control(p, BRW_MASK_ENABLE); /* what does this do? */
+	brw_set_saturate(p, 0);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_set_predicate_control_flag_value(p, 0xff);
+
+	p->if_stack_depth = 0;
+	p->if_stack_array_size = 0;
+	p->if_stack = NULL;
+}
diff --git a/src/sna/brw/brw_eu.h b/src/sna/brw/brw_eu.h
new file mode 100644
index 0000000..65e66d5
--- /dev/null
+++ b/src/sna/brw/brw_eu.h
@@ -0,0 +1,2266 @@
+/*
+   Copyright (C) Intel Corp.  2006.  All Rights Reserved.
+   Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+   develop this 3D driver.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice (including the
+   next paragraph) shall be included in all copies or substantial
+   portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+   IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+   LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+   OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+   WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+/*
+ * Authors:
+ *   Keith Whitwell <keith at tungstengraphics.com>
+ */
+
+
+#ifndef BRW_EU_H
+#define BRW_EU_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <assert.h>
+
+#define BRW_SWIZZLE4(a,b,c,d) (((a)<<0) | ((b)<<2) | ((c)<<4) | ((d)<<6))
+#define BRW_GET_SWZ(swz, idx) (((swz) >> ((idx)*2)) & 0x3)
+
+#define BRW_SWIZZLE_NOOP      BRW_SWIZZLE4(0,1,2,3)
+#define BRW_SWIZZLE_XYZW      BRW_SWIZZLE4(0,1,2,3)
+#define BRW_SWIZZLE_XXXX      BRW_SWIZZLE4(0,0,0,0)
+#define BRW_SWIZZLE_YYYY      BRW_SWIZZLE4(1,1,1,1)
+#define BRW_SWIZZLE_ZZZZ      BRW_SWIZZLE4(2,2,2,2)
+#define BRW_SWIZZLE_WWWW      BRW_SWIZZLE4(3,3,3,3)
+#define BRW_SWIZZLE_XYXY      BRW_SWIZZLE4(0,1,0,1)
+
+#define WRITEMASK_X 0x1
+#define WRITEMASK_Y 0x2
+#define WRITEMASK_Z 0x4
+#define WRITEMASK_W 0x8
+
+#define WRITEMASK_XY (WRITEMASK_X | WRITEMASK_Y)
+#define WRITEMASK_XYZ (WRITEMASK_X | WRITEMASK_Y | WRITEMASK_Z)
+#define WRITEMASK_XYZW (WRITEMASK_X | WRITEMASK_Y | WRITEMASK_Z | WRITEMASK_W)
+
+/** Number of general purpose registers (VS, WM, etc) */
+#define BRW_MAX_GRF 128
+
+/** Number of message register file registers */
+#define BRW_MAX_MRF 16
+
+
+#define BRW_ALIGN_1   0
+#define BRW_ALIGN_16  1
+
+#define BRW_ADDRESS_DIRECT                        0
+#define BRW_ADDRESS_REGISTER_INDIRECT_REGISTER    1
+
+#define BRW_CHANNEL_X     0
+#define BRW_CHANNEL_Y     1
+#define BRW_CHANNEL_Z     2
+#define BRW_CHANNEL_W     3
+
+enum brw_compression {
+	BRW_COMPRESSION_NONE,
+	BRW_COMPRESSION_2NDHALF,
+	BRW_COMPRESSION_COMPRESSED,
+};
+
+#define GEN6_COMPRESSION_1Q		0
+#define GEN6_COMPRESSION_2Q		1
+#define GEN6_COMPRESSION_3Q		2
+#define GEN6_COMPRESSION_4Q		3
+#define GEN6_COMPRESSION_1H		0
+#define GEN6_COMPRESSION_2H		2
+
+#define BRW_CONDITIONAL_NONE  0
+#define BRW_CONDITIONAL_Z     1
+#define BRW_CONDITIONAL_NZ    2
+#define BRW_CONDITIONAL_EQ    1	/* Z */
+#define BRW_CONDITIONAL_NEQ   2	/* NZ */
+#define BRW_CONDITIONAL_G     3
+#define BRW_CONDITIONAL_GE    4
+#define BRW_CONDITIONAL_L     5
+#define BRW_CONDITIONAL_LE    6
+#define BRW_CONDITIONAL_R     7
+#define BRW_CONDITIONAL_O     8
+#define BRW_CONDITIONAL_U     9
+
+#define BRW_DEBUG_NONE        0
+#define BRW_DEBUG_BREAKPOINT  1
+
+#define BRW_DEPENDENCY_NORMAL         0
+#define BRW_DEPENDENCY_NOTCLEARED     1
+#define BRW_DEPENDENCY_NOTCHECKED     2
+#define BRW_DEPENDENCY_DISABLE        3
+
+#define BRW_EXECUTE_1     0
+#define BRW_EXECUTE_2     1
+#define BRW_EXECUTE_4     2
+#define BRW_EXECUTE_8     3
+#define BRW_EXECUTE_16    4
+#define BRW_EXECUTE_32    5
+
+#define BRW_HORIZONTAL_STRIDE_0   0
+#define BRW_HORIZONTAL_STRIDE_1   1
+#define BRW_HORIZONTAL_STRIDE_2   2
+#define BRW_HORIZONTAL_STRIDE_4   3
+
+#define BRW_INSTRUCTION_NORMAL    0
+#define BRW_INSTRUCTION_SATURATE  1
+
+#define BRW_MASK_ENABLE   0
+#define BRW_MASK_DISABLE  1
+
+/** @{
+ *
+ * Gen6 has replaced "mask enable/disable" with WECtrl, which is
+ * effectively the same but much simpler to think about.  Now, there
+ * are two contributors ANDed together to whether channels are
+ * executed: The predication on the instruction, and the channel write
+ * enable.
+ */
+/**
+ * This is the default value.  It means that a channel's write enable is set
+ * if the per-channel IP is pointing at this instruction.
+ */
+#define BRW_WE_NORMAL		0
+/**
+ * This is used like BRW_MASK_DISABLE, and causes all channels to have
+ * their write enable set.  Note that predication still contributes to
+ * whether the channel actually gets written.
+ */
+#define BRW_WE_ALL		1
+/** @} */
+
+enum opcode {
+	/* These are the actual hardware opcodes. */
+	BRW_OPCODE_MOV =	1,
+	BRW_OPCODE_SEL =	2,
+	BRW_OPCODE_NOT =	4,
+	BRW_OPCODE_AND =	5,
+	BRW_OPCODE_OR =	6,
+	BRW_OPCODE_XOR =	7,
+	BRW_OPCODE_SHR =	8,
+	BRW_OPCODE_SHL =	9,
+	BRW_OPCODE_RSR =	10,
+	BRW_OPCODE_RSL =	11,
+	BRW_OPCODE_ASR =	12,
+	BRW_OPCODE_CMP =	16,
+	BRW_OPCODE_CMPN =	17,
+	BRW_OPCODE_JMPI =	32,
+	BRW_OPCODE_IF =	34,
+	BRW_OPCODE_IFF =	35,
+	BRW_OPCODE_ELSE =	36,
+	BRW_OPCODE_ENDIF =	37,
+	BRW_OPCODE_DO =	38,
+	BRW_OPCODE_WHILE =	39,
+	BRW_OPCODE_BREAK =	40,
+	BRW_OPCODE_CONTINUE = 41,
+	BRW_OPCODE_HALT =	42,
+	BRW_OPCODE_MSAVE =	44,
+	BRW_OPCODE_MRESTORE = 45,
+	BRW_OPCODE_PUSH =	46,
+	BRW_OPCODE_POP =	47,
+	BRW_OPCODE_WAIT =	48,
+	BRW_OPCODE_SEND =	49,
+	BRW_OPCODE_SENDC =	50,
+	BRW_OPCODE_MATH =	56,
+	BRW_OPCODE_ADD =	64,
+	BRW_OPCODE_MUL =	65,
+	BRW_OPCODE_AVG =	66,
+	BRW_OPCODE_FRC =	67,
+	BRW_OPCODE_RNDU =	68,
+	BRW_OPCODE_RNDD =	69,
+	BRW_OPCODE_RNDE =	70,
+	BRW_OPCODE_RNDZ =	71,
+	BRW_OPCODE_MAC =	72,
+	BRW_OPCODE_MACH =	73,
+	BRW_OPCODE_LZD =	74,
+	BRW_OPCODE_SAD2 =	80,
+	BRW_OPCODE_SADA2 =	81,
+	BRW_OPCODE_DP4 =	84,
+	BRW_OPCODE_DPH =	85,
+	BRW_OPCODE_DP3 =	86,
+	BRW_OPCODE_DP2 =	87,
+	BRW_OPCODE_DPA2 =	88,
+	BRW_OPCODE_LINE =	89,
+	BRW_OPCODE_PLN =	90,
+	BRW_OPCODE_NOP =	126,
+
+	/* These are compiler backend opcodes that get translated into other
+	 * instructions.
+	 */
+	FS_OPCODE_FB_WRITE = 128,
+	SHADER_OPCODE_RCP,
+	SHADER_OPCODE_RSQ,
+	SHADER_OPCODE_SQRT,
+	SHADER_OPCODE_EXP2,
+	SHADER_OPCODE_LOG2,
+	SHADER_OPCODE_POW,
+	SHADER_OPCODE_SIN,
+	SHADER_OPCODE_COS,
+	FS_OPCODE_DDX,
+	FS_OPCODE_DDY,
+	FS_OPCODE_PIXEL_X,
+	FS_OPCODE_PIXEL_Y,
+	FS_OPCODE_CINTERP,
+	FS_OPCODE_LINTERP,
+	FS_OPCODE_TEX,
+	FS_OPCODE_TXB,
+	FS_OPCODE_TXD,
+	FS_OPCODE_TXF,
+	FS_OPCODE_TXL,
+	FS_OPCODE_TXS,
+	FS_OPCODE_DISCARD,
+	FS_OPCODE_SPILL,
+	FS_OPCODE_UNSPILL,
+	FS_OPCODE_PULL_CONSTANT_LOAD,
+
+	VS_OPCODE_URB_WRITE,
+	VS_OPCODE_SCRATCH_READ,
+	VS_OPCODE_SCRATCH_WRITE,
+	VS_OPCODE_PULL_CONSTANT_LOAD,
+};
+
+#define BRW_PREDICATE_NONE             0
+#define BRW_PREDICATE_NORMAL           1
+#define BRW_PREDICATE_ALIGN1_ANYV             2
+#define BRW_PREDICATE_ALIGN1_ALLV             3
+#define BRW_PREDICATE_ALIGN1_ANY2H            4
+#define BRW_PREDICATE_ALIGN1_ALL2H            5
+#define BRW_PREDICATE_ALIGN1_ANY4H            6
+#define BRW_PREDICATE_ALIGN1_ALL4H            7
+#define BRW_PREDICATE_ALIGN1_ANY8H            8
+#define BRW_PREDICATE_ALIGN1_ALL8H            9
+#define BRW_PREDICATE_ALIGN1_ANY16H           10
+#define BRW_PREDICATE_ALIGN1_ALL16H           11
+#define BRW_PREDICATE_ALIGN16_REPLICATE_X     2
+#define BRW_PREDICATE_ALIGN16_REPLICATE_Y     3
+#define BRW_PREDICATE_ALIGN16_REPLICATE_Z     4
+#define BRW_PREDICATE_ALIGN16_REPLICATE_W     5
+#define BRW_PREDICATE_ALIGN16_ANY4H           6
+#define BRW_PREDICATE_ALIGN16_ALL4H           7
+
+#define BRW_ARCHITECTURE_REGISTER_FILE    0
+#define BRW_GENERAL_REGISTER_FILE         1
+#define BRW_MESSAGE_REGISTER_FILE         2
+#define BRW_IMMEDIATE_VALUE               3
+
+#define BRW_REGISTER_TYPE_UD  0
+#define BRW_REGISTER_TYPE_D   1
+#define BRW_REGISTER_TYPE_UW  2
+#define BRW_REGISTER_TYPE_W   3
+#define BRW_REGISTER_TYPE_UB  4
+#define BRW_REGISTER_TYPE_B   5
+#define BRW_REGISTER_TYPE_VF  5	/* packed float vector, immediates only? */
+#define BRW_REGISTER_TYPE_HF  6
+#define BRW_REGISTER_TYPE_V   6	/* packed int vector, immediates only, uword dest only */
+#define BRW_REGISTER_TYPE_F   7
+
+#define BRW_ARF_NULL                  0x00
+#define BRW_ARF_ADDRESS               0x10
+#define BRW_ARF_ACCUMULATOR           0x20
+#define BRW_ARF_FLAG                  0x30
+#define BRW_ARF_MASK                  0x40
+#define BRW_ARF_MASK_STACK            0x50
+#define BRW_ARF_MASK_STACK_DEPTH      0x60
+#define BRW_ARF_STATE                 0x70
+#define BRW_ARF_CONTROL               0x80
+#define BRW_ARF_NOTIFICATION_COUNT    0x90
+#define BRW_ARF_IP                    0xA0
+
+#define BRW_MRF_COMPR4			(1 << 7)
+
+#define BRW_AMASK   0
+#define BRW_IMASK   1
+#define BRW_LMASK   2
+#define BRW_CMASK   3
+
+#define BRW_THREAD_NORMAL     0
+#define BRW_THREAD_ATOMIC     1
+#define BRW_THREAD_SWITCH     2
+
+#define BRW_VERTICAL_STRIDE_0                 0
+#define BRW_VERTICAL_STRIDE_1                 1
+#define BRW_VERTICAL_STRIDE_2                 2
+#define BRW_VERTICAL_STRIDE_4                 3
+#define BRW_VERTICAL_STRIDE_8                 4
+#define BRW_VERTICAL_STRIDE_16                5
+#define BRW_VERTICAL_STRIDE_32                6
+#define BRW_VERTICAL_STRIDE_64                7
+#define BRW_VERTICAL_STRIDE_128               8
+#define BRW_VERTICAL_STRIDE_256               9
+#define BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL   0xF
+
+#define BRW_WIDTH_1       0
+#define BRW_WIDTH_2       1
+#define BRW_WIDTH_4       2
+#define BRW_WIDTH_8       3
+#define BRW_WIDTH_16      4
+
+#define BRW_STATELESS_BUFFER_BOUNDARY_1K      0
+#define BRW_STATELESS_BUFFER_BOUNDARY_2K      1
+#define BRW_STATELESS_BUFFER_BOUNDARY_4K      2
+#define BRW_STATELESS_BUFFER_BOUNDARY_8K      3
+#define BRW_STATELESS_BUFFER_BOUNDARY_16K     4
+#define BRW_STATELESS_BUFFER_BOUNDARY_32K     5
+#define BRW_STATELESS_BUFFER_BOUNDARY_64K     6
+#define BRW_STATELESS_BUFFER_BOUNDARY_128K    7
+#define BRW_STATELESS_BUFFER_BOUNDARY_256K    8
+#define BRW_STATELESS_BUFFER_BOUNDARY_512K    9
+#define BRW_STATELESS_BUFFER_BOUNDARY_1M      10
+#define BRW_STATELESS_BUFFER_BOUNDARY_2M      11
+
+#define BRW_POLYGON_FACING_FRONT      0
+#define BRW_POLYGON_FACING_BACK       1
+
+#define BRW_MESSAGE_TARGET_NULL               0
+#define BRW_MESSAGE_TARGET_MATH               1 /* reserved on GEN6 */
+#define BRW_MESSAGE_TARGET_SAMPLER            2
+#define BRW_MESSAGE_TARGET_GATEWAY            3
+#define BRW_MESSAGE_TARGET_DATAPORT_READ      4
+#define BRW_MESSAGE_TARGET_DATAPORT_WRITE     5
+#define BRW_MESSAGE_TARGET_URB                6
+#define BRW_MESSAGE_TARGET_THREAD_SPAWNER     7
+
+#define GEN6_MESSAGE_TARGET_DP_SAMPLER_CACHE  4
+#define GEN6_MESSAGE_TARGET_DP_RENDER_CACHE   5
+#define GEN6_MESSAGE_TARGET_DP_CONST_CACHE    9
+
+#define BRW_SAMPLER_RETURN_FORMAT_FLOAT32     0
+#define BRW_SAMPLER_RETURN_FORMAT_UINT32      2
+#define BRW_SAMPLER_RETURN_FORMAT_SINT32      3
+
+#define BRW_SAMPLER_MESSAGE_SAMPLE	              0
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE              0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE             0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS        0
+#define BRW_SAMPLER_MESSAGE_SIMD8_KILLPIX             1
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD        1
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD         1
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS  2
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS    2
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE    0
+#define BRW_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE     2
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_BIAS_COMPARE 0
+#define BRW_SAMPLER_MESSAGE_SIMD8_SAMPLE_LOD_COMPARE  1
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_RESINFO           2
+#define BRW_SAMPLER_MESSAGE_SIMD16_RESINFO            2
+#define BRW_SAMPLER_MESSAGE_SIMD4X2_LD                3
+#define BRW_SAMPLER_MESSAGE_SIMD8_LD                  3
+#define BRW_SAMPLER_MESSAGE_SIMD16_LD                 3
+
+#define GEN5_SAMPLER_MESSAGE_SAMPLE              0
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS         1
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_LOD          2
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_COMPARE      3
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_DERIVS       4
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_BIAS_COMPARE 5
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_LOD_COMPARE  6
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_LD           7
+#define GEN5_SAMPLER_MESSAGE_SAMPLE_RESINFO      10
+
+/* for GEN5 only */
+#define BRW_SAMPLER_SIMD_MODE_SIMD4X2                   0
+#define BRW_SAMPLER_SIMD_MODE_SIMD8                     1
+#define BRW_SAMPLER_SIMD_MODE_SIMD16                    2
+#define BRW_SAMPLER_SIMD_MODE_SIMD32_64                 3
+
+#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW   0
+#define BRW_DATAPORT_OWORD_BLOCK_1_OWORDHIGH  1
+#define BRW_DATAPORT_OWORD_BLOCK_2_OWORDS     2
+#define BRW_DATAPORT_OWORD_BLOCK_4_OWORDS     3
+#define BRW_DATAPORT_OWORD_BLOCK_8_OWORDS     4
+
+#define BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD     0
+#define BRW_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS    2
+
+#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS   2
+#define BRW_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS  3
+
+/* This one stays the same across generations. */
+#define BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ          0
+/* GEN4 */
+#define BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ     1
+#define BRW_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ          2
+#define BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ      3
+/* G45, GEN5 */
+#define G45_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ	    1
+#define G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ     2
+#define G45_DATAPORT_READ_MESSAGE_AVC_LOOP_FILTER_READ	    3
+#define G45_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ          4
+#define G45_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ      6
+/* GEN6 */
+#define GEN6_DATAPORT_READ_MESSAGE_RENDER_UNORM_READ	    1
+#define GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ     2
+#define GEN6_DATAPORT_READ_MESSAGE_MEDIA_BLOCK_READ          4
+#define GEN6_DATAPORT_READ_MESSAGE_OWORD_UNALIGN_BLOCK_READ  5
+#define GEN6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ      6
+
+#define BRW_DATAPORT_READ_TARGET_DATA_CACHE      0
+#define BRW_DATAPORT_READ_TARGET_RENDER_CACHE    1
+#define BRW_DATAPORT_READ_TARGET_SAMPLER_CACHE   2
+
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE                0
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED     1
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01         2
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23         3
+#define BRW_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01       4
+
+/**
+ * Message target: Shared Function ID for where to SEND a message.
+ *
+ * These are enumerated in the ISA reference under "send - Send Message".
+ * In particular, see the following tables:
+ * - G45 PRM, Volume 4, Table 14-15 "Message Descriptor Definition"
+ * - Sandybridge PRM, Volume 4 Part 2, Table 8-16 "Extended Message Descriptor"
+ * - BSpec, Volume 1a (GPU Overview) / Graphics Processing Engine (GPE) /
+ *   Overview / GPE Function IDs
+ */
+enum brw_message_target {
+   BRW_SFID_NULL                     = 0,
+   BRW_SFID_MATH                     = 1, /* Only valid on Gen4-5 */
+   BRW_SFID_SAMPLER                  = 2,
+   BRW_SFID_MESSAGE_GATEWAY          = 3,
+   BRW_SFID_DATAPORT_READ            = 4,
+   BRW_SFID_DATAPORT_WRITE           = 5,
+   BRW_SFID_URB                      = 6,
+   BRW_SFID_THREAD_SPAWNER           = 7,
+
+   GEN6_SFID_DATAPORT_SAMPLER_CACHE  = 4,
+   GEN6_SFID_DATAPORT_RENDER_CACHE   = 5,
+   GEN6_SFID_DATAPORT_CONSTANT_CACHE = 9,
+
+   GEN7_SFID_DATAPORT_DATA_CACHE     = 10,
+};
+
+#define GEN7_MESSAGE_TARGET_DP_DATA_CACHE     10
+
+#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE                0
+#define BRW_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE           1
+#define BRW_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE                2
+#define BRW_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE            3
+#define BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE              4
+#define BRW_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE     5
+#define BRW_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE               7
+
+/* GEN6 */
+#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_ATOMIC_WRITE              7
+#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE               8
+#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE          9
+#define GEN6_DATAPORT_WRITE_MESSAGE_MEDIA_BLOCK_WRITE               10
+#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE           11
+#define GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE             12
+#define GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VB_WRITE               13
+#define GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_UNORM_WRITE       14
+
+#define BRW_MATH_FUNCTION_INV                              1
+#define BRW_MATH_FUNCTION_LOG                              2
+#define BRW_MATH_FUNCTION_EXP                              3
+#define BRW_MATH_FUNCTION_SQRT                             4
+#define BRW_MATH_FUNCTION_RSQ                              5
+#define BRW_MATH_FUNCTION_SIN                              6 /* was 7 */
+#define BRW_MATH_FUNCTION_COS                              7 /* was 8 */
+#define BRW_MATH_FUNCTION_SINCOS                           8 /* was 6 */
+#define BRW_MATH_FUNCTION_TAN                              9 /* gen4 */
+#define BRW_MATH_FUNCTION_FDIV                             9 /* gen6+ */
+#define BRW_MATH_FUNCTION_POW                              10
+#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER   11
+#define BRW_MATH_FUNCTION_INT_DIV_QUOTIENT                 12
+#define BRW_MATH_FUNCTION_INT_DIV_REMAINDER                13
+
+#define BRW_MATH_INTEGER_UNSIGNED     0
+#define BRW_MATH_INTEGER_SIGNED       1
+
+#define BRW_MATH_PRECISION_FULL        0
+#define BRW_MATH_PRECISION_PARTIAL     1
+
+#define BRW_MATH_SATURATE_NONE         0
+#define BRW_MATH_SATURATE_SATURATE     1
+
+#define BRW_MATH_DATA_VECTOR  0
+#define BRW_MATH_DATA_SCALAR  1
+
+#define BRW_URB_OPCODE_WRITE  0
+
+#define BRW_URB_SWIZZLE_NONE          0
+#define BRW_URB_SWIZZLE_INTERLEAVE    1
+#define BRW_URB_SWIZZLE_TRANSPOSE     2
+
+#define BRW_SCRATCH_SPACE_SIZE_1K     0
+#define BRW_SCRATCH_SPACE_SIZE_2K     1
+#define BRW_SCRATCH_SPACE_SIZE_4K     2
+#define BRW_SCRATCH_SPACE_SIZE_8K     3
+#define BRW_SCRATCH_SPACE_SIZE_16K    4
+#define BRW_SCRATCH_SPACE_SIZE_32K    5
+#define BRW_SCRATCH_SPACE_SIZE_64K    6
+#define BRW_SCRATCH_SPACE_SIZE_128K   7
+#define BRW_SCRATCH_SPACE_SIZE_256K   8
+#define BRW_SCRATCH_SPACE_SIZE_512K   9
+#define BRW_SCRATCH_SPACE_SIZE_1M     10
+#define BRW_SCRATCH_SPACE_SIZE_2M     11
+
+#define REG_SIZE (8*4)
+
+struct brw_instruction {
+	struct {
+		unsigned opcode:7;
+		unsigned pad:1;
+		unsigned access_mode:1;
+		unsigned mask_control:1;
+		unsigned dependency_control:2;
+		unsigned compression_control:2; /* gen6: quater control */
+		unsigned thread_control:2;
+		unsigned predicate_control:4;
+		unsigned predicate_inverse:1;
+		unsigned execution_size:3;
+		/**
+		 * Conditional Modifier for most instructions.  On Gen6+, this is also
+		 * used for the SEND instruction's Message Target/SFID.
+		 */
+		unsigned destreg__conditionalmod:4;
+		unsigned acc_wr_control:1;
+		unsigned cmpt_control:1;
+		unsigned debug_control:1;
+		unsigned saturate:1;
+	} header;
+
+	union {
+		struct {
+			unsigned dest_reg_file:2;
+			unsigned dest_reg_type:3;
+			unsigned src0_reg_file:2;
+			unsigned src0_reg_type:3;
+			unsigned src1_reg_file:2;
+			unsigned src1_reg_type:3;
+			unsigned pad:1;
+			unsigned dest_subreg_nr:5;
+			unsigned dest_reg_nr:8;
+			unsigned dest_horiz_stride:2;
+			unsigned dest_address_mode:1;
+		} da1;
+
+		struct {
+			unsigned dest_reg_file:2;
+			unsigned dest_reg_type:3;
+			unsigned src0_reg_file:2;
+			unsigned src0_reg_type:3;
+			unsigned src1_reg_file:2;        /* 0x00000c00 */
+			unsigned src1_reg_type:3;        /* 0x00007000 */
+			unsigned pad:1;
+			int dest_indirect_offset:10;	/* offset against the deref'd address reg */
+			unsigned dest_subreg_nr:3; /* subnr for the address reg a0.x */
+			unsigned dest_horiz_stride:2;
+			unsigned dest_address_mode:1;
+		} ia1;
+
+		struct {
+			unsigned dest_reg_file:2;
+			unsigned dest_reg_type:3;
+			unsigned src0_reg_file:2;
+			unsigned src0_reg_type:3;
+			unsigned src1_reg_file:2;
+			unsigned src1_reg_type:3;
+			unsigned pad:1;
+			unsigned dest_writemask:4;
+			unsigned dest_subreg_nr:1;
+			unsigned dest_reg_nr:8;
+			unsigned dest_horiz_stride:2;
+			unsigned dest_address_mode:1;
+		} da16;
+
+		struct {
+			unsigned dest_reg_file:2;
+			unsigned dest_reg_type:3;
+			unsigned src0_reg_file:2;
+			unsigned src0_reg_type:3;
+			unsigned pad0:6;
+			unsigned dest_writemask:4;
+			int dest_indirect_offset:6;
+			unsigned dest_subreg_nr:3;
+			unsigned dest_horiz_stride:2;
+			unsigned dest_address_mode:1;
+		} ia16;
+
+		struct {
+			unsigned dest_reg_file:2;
+			unsigned dest_reg_type:3;
+			unsigned src0_reg_file:2;
+			unsigned src0_reg_type:3;
+			unsigned src1_reg_file:2;
+			unsigned src1_reg_type:3;
+			unsigned pad:1;
+
+			int jump_count:16;
+		} branch_gen6;
+
+		struct {
+			unsigned dest_reg_file:1;
+			unsigned flag_subreg_num:1;
+			unsigned pad0:2;
+			unsigned src0_abs:1;
+			unsigned src0_negate:1;
+			unsigned src1_abs:1;
+			unsigned src1_negate:1;
+			unsigned src2_abs:1;
+			unsigned src2_negate:1;
+			unsigned pad1:7;
+			unsigned dest_writemask:4;
+			unsigned dest_subreg_nr:3;
+			unsigned dest_reg_nr:8;
+		} da3src;
+	} bits1;
+
+
+	union {
+		struct {
+			unsigned src0_subreg_nr:5;
+			unsigned src0_reg_nr:8;
+			unsigned src0_abs:1;
+			unsigned src0_negate:1;
+			unsigned src0_address_mode:1;
+			unsigned src0_horiz_stride:2;
+			unsigned src0_width:3;
+			unsigned src0_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad:5;
+		} da1;
+
+		struct {
+			int src0_indirect_offset:10;
+			unsigned src0_subreg_nr:3;
+			unsigned src0_abs:1;
+			unsigned src0_negate:1;
+			unsigned src0_address_mode:1;
+			unsigned src0_horiz_stride:2;
+			unsigned src0_width:3;
+			unsigned src0_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad:5;
+		} ia1;
+
+		struct {
+			unsigned src0_swz_x:2;
+			unsigned src0_swz_y:2;
+			unsigned src0_subreg_nr:1;
+			unsigned src0_reg_nr:8;
+			unsigned src0_abs:1;
+			unsigned src0_negate:1;
+			unsigned src0_address_mode:1;
+			unsigned src0_swz_z:2;
+			unsigned src0_swz_w:2;
+			unsigned pad0:1;
+			unsigned src0_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad1:5;
+		} da16;
+
+		struct {
+			unsigned src0_swz_x:2;
+			unsigned src0_swz_y:2;
+			int src0_indirect_offset:6;
+			unsigned src0_subreg_nr:3;
+			unsigned src0_abs:1;
+			unsigned src0_negate:1;
+			unsigned src0_address_mode:1;
+			unsigned src0_swz_z:2;
+			unsigned src0_swz_w:2;
+			unsigned pad0:1;
+			unsigned src0_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad1:5;
+		} ia16;
+
+		/* Extended Message Descriptor for Ironlake (Gen5) SEND instruction.
+		 *
+		 * Does not apply to Gen6+.  The SFID/message target moved to bits
+		 * 27:24 of the header (destreg__conditionalmod); EOT is in bits3.
+		 */
+		struct {
+			unsigned pad:26;
+			unsigned end_of_thread:1;
+			unsigned pad1:1;
+			unsigned sfid:4;
+		} send_gen5;  /* for Ironlake only */
+
+		struct {
+			unsigned src0_rep_ctrl:1;
+			unsigned src0_swizzle:8;
+			unsigned src0_subreg_nr:3;
+			unsigned src0_reg_nr:8;
+			unsigned pad0:1;
+			unsigned src1_rep_ctrl:1;
+			unsigned src1_swizzle:8;
+			unsigned src1_subreg_nr_low:2;
+		} da3src;
+	} bits2;
+
+	union {
+		struct {
+			unsigned src1_subreg_nr:5;
+			unsigned src1_reg_nr:8;
+			unsigned src1_abs:1;
+			unsigned src1_negate:1;
+			unsigned src1_address_mode:1;
+			unsigned src1_horiz_stride:2;
+			unsigned src1_width:3;
+			unsigned src1_vert_stride:4;
+			unsigned pad0:7;
+		} da1;
+
+		struct {
+			unsigned src1_swz_x:2;
+			unsigned src1_swz_y:2;
+			unsigned src1_subreg_nr:1;
+			unsigned src1_reg_nr:8;
+			unsigned src1_abs:1;
+			unsigned src1_negate:1;
+			unsigned src1_address_mode:1;
+			unsigned src1_swz_z:2;
+			unsigned src1_swz_w:2;
+			unsigned pad1:1;
+			unsigned src1_vert_stride:4;
+			unsigned pad2:7;
+		} da16;
+
+		struct {
+			int src1_indirect_offset:10;
+			unsigned src1_subreg_nr:3;
+			unsigned src1_abs:1;
+			unsigned src1_negate:1;
+			unsigned src1_address_mode:1;
+			unsigned src1_horiz_stride:2;
+			unsigned src1_width:3;
+			unsigned src1_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad1:5;
+		} ia1;
+
+		struct {
+			unsigned src1_swz_x:2;
+			unsigned src1_swz_y:2;
+			int  src1_indirect_offset:6;
+			unsigned src1_subreg_nr:3;
+			unsigned src1_abs:1;
+			unsigned src1_negate:1;
+			unsigned pad0:1;
+			unsigned src1_swz_z:2;
+			unsigned src1_swz_w:2;
+			unsigned pad1:1;
+			unsigned src1_vert_stride:4;
+			unsigned flag_subreg_nr:1;
+			unsigned flag_reg_nr:1;
+			unsigned pad2:5;
+		} ia16;
+
+		struct {
+			int jump_count:16;	/* note: signed */
+			unsigned pop_count:4;
+			unsigned pad0:12;
+		} if_else;
+
+		/* This is also used for gen7 IF/ELSE instructions */
+		struct {
+			/* Signed jump distance to the ip to jump to if all channels
+			 * are disabled after the break or continue.  It should point
+			 * to the end of the innermost control flow block, as that's
+			 * where some channel could get re-enabled.
+			 */
+			int jip:16;
+
+			/* Signed jump distance to the location to resume execution
+			 * of this channel if it's enabled for the break or continue.
+			 */
+			int uip:16;
+		} break_cont;
+
+		/**
+		 * \defgroup SEND instructions / Message Descriptors
+		 *
+		 * @{
+		 */
+
+		/**
+		 * Generic Message Descriptor for Gen4 SEND instructions.  The structs
+		 * below expand function_control to something specific for their
+		 * message.  Due to struct packing issues, they duplicate these bits.
+		 *
+		 * See the G45 PRM, Volume 4, Table 14-15.
+		 */
+		struct {
+			unsigned function_control:16;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} generic;
+
+		/**
+		 * Generic Message Descriptor for Gen5-7 SEND instructions.
+		 *
+		 * See the Sandybridge PRM, Volume 2 Part 2, Table 8-15.  (Sadly, most
+		 * of the information on the SEND instruction is missing from the public
+		 * Ironlake PRM.)
+		 *
+		 * The table claims that bit 31 is reserved/MBZ on Gen6+, but it lies.
+		 * According to the SEND instruction description:
+		 * "The MSb of the message description, the EOT field, always comes from
+		 *  bit 127 of the instruction word"...which is bit 31 of this field.
+		 */
+		struct {
+			unsigned function_control:19;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} generic_gen5;
+
+		/** G45 PRM, Volume 4, Section 6.1.1.1 */
+		struct {
+			unsigned function:4;
+			unsigned int_type:1;
+			unsigned precision:1;
+			unsigned saturate:1;
+			unsigned data_type:1;
+			unsigned pad0:8;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} math;
+
+		/** Ironlake PRM, Volume 4 Part 1, Section 6.1.1.1 */
+		struct {
+			unsigned function:4;
+			unsigned int_type:1;
+			unsigned precision:1;
+			unsigned saturate:1;
+			unsigned data_type:1;
+			unsigned snapshot:1;
+			unsigned pad0:10;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} math_gen5;
+
+		/** G45 PRM, Volume 4, Section 4.8.1.1.1 [DevBW] and [DevCL] */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned sampler:4;
+			unsigned return_format:2;
+			unsigned msg_type:2;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} sampler;
+
+		/** G45 PRM, Volume 4, Section 4.8.1.1.2 [DevCTG] */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned sampler:4;
+			unsigned msg_type:4;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} sampler_g4x;
+
+		/** Ironlake PRM, Volume 4 Part 1, Section 4.11.1.1.3 */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned sampler:4;
+			unsigned msg_type:4;
+			unsigned simd_mode:2;
+			unsigned pad0:1;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} sampler_gen5;
+
+		struct {
+			unsigned binding_table_index:8;
+			unsigned sampler:4;
+			unsigned msg_type:5;
+			unsigned simd_mode:2;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} sampler_gen7;
+
+		struct brw_urb_immediate {
+			unsigned opcode:4;
+			unsigned offset:6;
+			unsigned swizzle_control:2;
+			unsigned pad:1;
+			unsigned allocate:1;
+			unsigned used:1;
+			unsigned complete:1;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} urb;
+
+		struct {
+			unsigned opcode:4;
+			unsigned offset:6;
+			unsigned swizzle_control:2;
+			unsigned pad:1;
+			unsigned allocate:1;
+			unsigned used:1;
+			unsigned complete:1;
+			unsigned pad0:3;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} urb_gen5;
+
+		struct {
+			unsigned opcode:3;
+			unsigned offset:11;
+			unsigned swizzle_control:1;
+			unsigned complete:1;
+			unsigned per_slot_offset:1;
+			unsigned pad0:2;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} urb_gen7;
+
+		/** 965 PRM, Volume 4, Section 5.10.1.1: Message Descriptor */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:4;
+			unsigned msg_type:2;
+			unsigned target_cache:2;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} dp_read;
+
+		/** G45 PRM, Volume 4, Section 5.10.1.1.2 */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned msg_type:3;
+			unsigned target_cache:2;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} dp_read_g4x;
+
+		/** Ironlake PRM, Volume 4 Part 1, Section 5.10.2.1.2. */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned msg_type:3;
+			unsigned target_cache:2;
+			unsigned pad0:3;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} dp_read_gen5;
+
+		/** G45 PRM, Volume 4, Section 5.10.1.1.2.  For both Gen4 and G45. */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned last_render_target:1;
+			unsigned msg_type:3;
+			unsigned send_commit_msg:1;
+			unsigned response_length:4;
+			unsigned msg_length:4;
+			unsigned msg_target:4;
+			unsigned pad1:3;
+			unsigned end_of_thread:1;
+		} dp_write;
+
+		/** Ironlake PRM, Volume 4 Part 1, Section 5.10.2.1.2. */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned last_render_target:1;
+			unsigned msg_type:3;
+			unsigned send_commit_msg:1;
+			unsigned pad0:3;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} dp_write_gen5;
+
+		/**
+		 * Message for the Sandybridge Sampler Cache or Constant Cache Data Port.
+		 *
+		 * See the Sandybridge PRM, Volume 4 Part 1, Section 3.9.2.1.1.
+		 **/
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:5;
+			unsigned msg_type:3;
+			unsigned pad0:3;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} gen6_dp_sampler_const_cache;
+
+		/**
+		 * Message for the Sandybridge Render Cache Data Port.
+		 *
+		 * Most fields are defined in the Sandybridge PRM, Volume 4 Part 1,
+		 * Section 3.9.2.1.1: Message Descriptor.
+		 *
+		 * "Slot Group Select" and "Last Render Target" are part of the
+		 * 5-bit message control for Render Target Write messages.  See
+		 * Section 3.9.9.2.1 of the same volume.
+		 */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned slot_group_select:1;
+			unsigned last_render_target:1;
+			unsigned msg_type:4;
+			unsigned send_commit_msg:1;
+			unsigned pad0:1;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad1:2;
+			unsigned end_of_thread:1;
+		} gen6_dp;
+
+		/**
+		 * Message for any of the Gen7 Data Port caches.
+		 *
+		 * Most fields are defined in BSpec volume 5c.2 Data Port / Messages /
+		 * Data Port Messages / Message Descriptor.  Once again, "Slot Group
+		 * Select" and "Last Render Target" are part of the 6-bit message
+		 * control for Render Target Writes.
+		 */
+		struct {
+			unsigned binding_table_index:8;
+			unsigned msg_control:3;
+			unsigned slot_group_select:1;
+			unsigned last_render_target:1;
+			unsigned msg_control_pad:1;
+			unsigned msg_type:4;
+			unsigned pad1:1;
+			unsigned header_present:1;
+			unsigned response_length:5;
+			unsigned msg_length:4;
+			unsigned pad2:2;
+			unsigned end_of_thread:1;
+		} gen7_dp;
+		/** @} */
+
+		struct {
+			unsigned src1_subreg_nr_high:1;
+			unsigned src1_reg_nr:8;
+			unsigned pad0:1;
+			unsigned src2_rep_ctrl:1;
+			unsigned src2_swizzle:8;
+			unsigned src2_subreg_nr:3;
+			unsigned src2_reg_nr:8;
+			unsigned pad1:2;
+		} da3src;
+
+		int d;
+		unsigned ud;
+		float f;
+	} bits3;
+};
+
+
+/* These aren't hardware structs, just something useful for us to pass around:
+ *
+ * Align1 operation has a lot of control over input ranges.  Used in
+ * WM programs to implement shaders decomposed into "channel serial"
+ * or "structure of array" form:
+ */
+struct brw_reg {
+	unsigned type:4;
+	unsigned file:2;
+	unsigned nr:8;
+	unsigned subnr:5;		/* :1 in align16 */
+	unsigned negate:1;		/* source only */
+	unsigned abs:1;		/* source only */
+	unsigned vstride:4;		/* source only */
+	unsigned width:3;		/* src only, align1 only */
+	unsigned hstride:2;   		/* align1 only */
+	unsigned address_mode:1;	/* relative addressing, hopefully! */
+	unsigned pad0:1;
+
+	union {
+		struct {
+			unsigned swizzle:8;		/* src only, align16 only */
+			unsigned writemask:4;		/* dest only, align16 only */
+			int  indirect_offset:10;	/* relative addressing offset */
+			unsigned pad1:10;		/* two dwords total */
+		} bits;
+
+		float f;
+		int   d;
+		unsigned ud;
+	} dw1;
+};
+
+struct brw_indirect {
+	unsigned addr_subnr:4;
+	int addr_offset:10;
+	unsigned pad:18;
+};
+
+#define BRW_EU_MAX_INSN_STACK 5
+#define BRW_EU_MAX_INSN 10000
+
+struct brw_compile {
+	struct brw_instruction *store;
+	unsigned nr_insn;
+
+	int gen;
+
+	/* Allow clients to push/pop instruction state:
+	*/
+	struct brw_instruction stack[BRW_EU_MAX_INSN_STACK];
+	bool compressed_stack[BRW_EU_MAX_INSN_STACK];
+	struct brw_instruction *current;
+
+	unsigned flag_value;
+	bool single_program_flow;
+	bool compressed;
+
+	/* Control flow stacks:
+	 * - if_stack contains IF and ELSE instructions which must be patched
+	 *   (and popped) once the matching ENDIF instruction is encountered.
+	 */
+	struct brw_instruction **if_stack;
+	int if_stack_depth;
+	int if_stack_array_size;
+};
+
+static inline int type_sz(unsigned type)
+{
+	switch (type) {
+	case BRW_REGISTER_TYPE_UD:
+	case BRW_REGISTER_TYPE_D:
+	case BRW_REGISTER_TYPE_F:
+		return 4;
+	case BRW_REGISTER_TYPE_HF:
+	case BRW_REGISTER_TYPE_UW:
+	case BRW_REGISTER_TYPE_W:
+		return 2;
+	case BRW_REGISTER_TYPE_UB:
+	case BRW_REGISTER_TYPE_B:
+		return 1;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * Construct a brw_reg.
+ * \param file  one of the BRW_x_REGISTER_FILE values
+ * \param nr  register number/index
+ * \param subnr  register sub number
+ * \param type  one of BRW_REGISTER_TYPE_x
+ * \param vstride  one of BRW_VERTICAL_STRIDE_x
+ * \param width  one of BRW_WIDTH_x
+ * \param hstride  one of BRW_HORIZONTAL_STRIDE_x
+ * \param swizzle  one of BRW_SWIZZLE_x
+ * \param writemask  WRITEMASK_X/Y/Z/W bitfield
+ */
+static inline struct brw_reg brw_reg(unsigned file,
+				     unsigned nr,
+				     unsigned subnr,
+				     unsigned type,
+				     unsigned vstride,
+				     unsigned width,
+				     unsigned hstride,
+				     unsigned swizzle,
+				     unsigned writemask)
+{
+	struct brw_reg reg;
+	if (file == BRW_GENERAL_REGISTER_FILE)
+		assert(nr < BRW_MAX_GRF);
+	else if (file == BRW_MESSAGE_REGISTER_FILE)
+		assert((nr & ~(1 << 7)) < BRW_MAX_MRF);
+	else if (file == BRW_ARCHITECTURE_REGISTER_FILE)
+		assert(nr <= BRW_ARF_IP);
+
+	reg.type = type;
+	reg.file = file;
+	reg.nr = nr;
+	reg.subnr = subnr * type_sz(type);
+	reg.negate = 0;
+	reg.abs = 0;
+	reg.vstride = vstride;
+	reg.width = width;
+	reg.hstride = hstride;
+	reg.address_mode = BRW_ADDRESS_DIRECT;
+	reg.pad0 = 0;
+
+	/* Could do better: If the reg is r5.3<0;1,0>, we probably want to
+	 * set swizzle and writemask to W, as the lower bits of subnr will
+	 * be lost when converted to align16.  This is probably too much to
+	 * keep track of as you'd want it adjusted by suboffset(), etc.
+	 * Perhaps fix up when converting to align16?
+	 */
+	reg.dw1.bits.swizzle = swizzle;
+	reg.dw1.bits.writemask = writemask;
+	reg.dw1.bits.indirect_offset = 0;
+	reg.dw1.bits.pad1 = 0;
+	return reg;
+}
+
+/** Construct float[16] register */
+static inline struct brw_reg brw_vec16_reg(unsigned file,
+					   unsigned nr,
+					   unsigned subnr)
+{
+	return brw_reg(file,
+		       nr,
+		       subnr,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_16,
+		       BRW_WIDTH_16,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+/** Construct float[8] register */
+static inline struct brw_reg brw_vec8_reg(unsigned file,
+					  unsigned nr,
+					  unsigned subnr)
+{
+	return brw_reg(file,
+		       nr,
+		       subnr,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_8,
+		       BRW_WIDTH_8,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+/** Construct float[4] register */
+static inline struct brw_reg brw_vec4_reg(unsigned file,
+					  unsigned nr,
+					  unsigned subnr)
+{
+	return brw_reg(file,
+		       nr,
+		       subnr,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_4,
+		       BRW_WIDTH_4,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYZW,
+		       WRITEMASK_XYZW);
+}
+
+/** Construct float[2] register */
+static inline struct brw_reg brw_vec2_reg(unsigned file,
+					  unsigned nr,
+					  unsigned subnr)
+{
+	return brw_reg(file,
+		       nr,
+		       subnr,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_2,
+		       BRW_WIDTH_2,
+		       BRW_HORIZONTAL_STRIDE_1,
+		       BRW_SWIZZLE_XYXY,
+		       WRITEMASK_XY);
+}
+
+/** Construct float[1] register */
+static inline struct brw_reg brw_vec1_reg(unsigned file,
+					  unsigned nr,
+					  unsigned subnr)
+{
+	return brw_reg(file,
+		       nr,
+		       subnr,
+		       BRW_REGISTER_TYPE_F,
+		       BRW_VERTICAL_STRIDE_0,
+		       BRW_WIDTH_1,
+		       BRW_HORIZONTAL_STRIDE_0,
+		       BRW_SWIZZLE_XXXX,
+		       WRITEMASK_X);
+}
+
+
+static inline struct brw_reg __retype(struct brw_reg reg,
+				      unsigned type)
+{
+	reg.type = type;
+	return reg;
+}
+
+static inline struct brw_reg __retype_d(struct brw_reg reg)
+{
+	return __retype(reg, BRW_REGISTER_TYPE_D);
+}
+
+static inline struct brw_reg __retype_ud(struct brw_reg reg)
+{
+	return __retype(reg, BRW_REGISTER_TYPE_UD);
+}
+
+static inline struct brw_reg __retype_uw(struct brw_reg reg)
+{
+	return __retype(reg, BRW_REGISTER_TYPE_UW);
+}
+
+static inline struct brw_reg __sechalf(struct brw_reg reg)
+{
+	if (reg.vstride)
+		reg.nr++;
+	return reg;
+}
+
+static inline struct brw_reg __suboffset(struct brw_reg reg,
+					 unsigned delta)
+{
+	reg.subnr += delta * type_sz(reg.type);
+	return reg;
+}
+
+static inline struct brw_reg __offset(struct brw_reg reg,
+				      unsigned delta)
+{
+	reg.nr += delta;
+	return reg;
+}
+
+static inline struct brw_reg byte_offset(struct brw_reg reg,
+					 unsigned bytes)
+{
+	unsigned newoffset = reg.nr * REG_SIZE + reg.subnr + bytes;
+	reg.nr = newoffset / REG_SIZE;
+	reg.subnr = newoffset % REG_SIZE;
+	return reg;
+}
+
+
+/** Construct unsigned word[16] register */
+static inline struct brw_reg brw_uw16_reg(unsigned file,
+					  unsigned nr,
+					  unsigned subnr)
+{
+	return __suboffset(__retype(brw_vec16_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+/** Construct unsigned word[8] register */
+static inline struct brw_reg brw_uw8_reg(unsigned file,
+					 unsigned nr,
+					 unsigned subnr)
+{
+	return __suboffset(__retype(brw_vec8_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+/** Construct unsigned word[1] register */
+static inline struct brw_reg brw_uw1_reg(unsigned file,
+					 unsigned nr,
+					 unsigned subnr)
+{
+	return __suboffset(__retype(brw_vec1_reg(file, nr, 0), BRW_REGISTER_TYPE_UW), subnr);
+}
+
+static inline struct brw_reg brw_imm_reg(unsigned type)
+{
+	return brw_reg( BRW_IMMEDIATE_VALUE,
+			0,
+			0,
+			type,
+			BRW_VERTICAL_STRIDE_0,
+			BRW_WIDTH_1,
+			BRW_HORIZONTAL_STRIDE_0,
+			0,
+			0);
+}
+
+/** Construct float immediate register */
+static inline struct brw_reg brw_imm_f(float f)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_F);
+	imm.dw1.f = f;
+	return imm;
+}
+
+/** Construct integer immediate register */
+static inline struct brw_reg brw_imm_d(int d)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_D);
+	imm.dw1.d = d;
+	return imm;
+}
+
+/** Construct uint immediate register */
+static inline struct brw_reg brw_imm_ud(unsigned ud)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_UD);
+	imm.dw1.ud = ud;
+	return imm;
+}
+
+/** Construct ushort immediate register */
+static inline struct brw_reg brw_imm_uw(uint16_t uw)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_UW);
+	imm.dw1.ud = uw | (uw << 16);
+	return imm;
+}
+
+/** Construct short immediate register */
+static inline struct brw_reg brw_imm_w(int16_t w)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_W);
+	imm.dw1.d = w | (w << 16);
+	return imm;
+}
+
+/* brw_imm_b and brw_imm_ub aren't supported by hardware - the type
+ * numbers alias with _V and _VF below:
+ */
+
+/** Construct vector of eight signed half-byte values */
+static inline struct brw_reg brw_imm_v(unsigned v)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_V);
+	imm.vstride = BRW_VERTICAL_STRIDE_0;
+	imm.width = BRW_WIDTH_8;
+	imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+	imm.dw1.ud = v;
+	return imm;
+}
+
+/** Construct vector of four 8-bit float values */
+static inline struct brw_reg brw_imm_vf(unsigned v)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_VF);
+	imm.vstride = BRW_VERTICAL_STRIDE_0;
+	imm.width = BRW_WIDTH_4;
+	imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+	imm.dw1.ud = v;
+	return imm;
+}
+
+#define VF_ZERO 0x0
+#define VF_ONE  0x30
+#define VF_NEG  (1<<7)
+
+static inline struct brw_reg brw_imm_vf4(unsigned v0,
+					 unsigned v1,
+					 unsigned v2,
+					 unsigned v3)
+{
+	struct brw_reg imm = brw_imm_reg(BRW_REGISTER_TYPE_VF);
+	imm.vstride = BRW_VERTICAL_STRIDE_0;
+	imm.width = BRW_WIDTH_4;
+	imm.hstride = BRW_HORIZONTAL_STRIDE_1;
+	imm.dw1.ud = ((v0 << 0) |
+		      (v1 << 8) |
+		      (v2 << 16) |
+		      (v3 << 24));
+	return imm;
+}
+
+static inline struct brw_reg brw_address(struct brw_reg reg)
+{
+	return brw_imm_uw(reg.nr * REG_SIZE + reg.subnr);
+}
+
+/** Construct float[1] general-purpose register */
+static inline struct brw_reg brw_vec1_grf(unsigned nr, unsigned subnr)
+{
+	return brw_vec1_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+/** Construct float[2] general-purpose register */
+static inline struct brw_reg brw_vec2_grf(unsigned nr, unsigned subnr)
+{
+	return brw_vec2_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+/** Construct float[4] general-purpose register */
+static inline struct brw_reg brw_vec4_grf(unsigned nr, unsigned subnr)
+{
+	return brw_vec4_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+/** Construct float[8] general-purpose register */
+static inline struct brw_reg brw_vec8_grf(unsigned nr, unsigned subnr)
+{
+	return brw_vec8_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static inline struct brw_reg brw_uw8_grf(unsigned nr, unsigned subnr)
+{
+	return brw_uw8_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+static inline struct brw_reg brw_uw16_grf(unsigned nr, unsigned subnr)
+{
+	return brw_uw16_reg(BRW_GENERAL_REGISTER_FILE, nr, subnr);
+}
+
+/** Construct null register (usually used for setting condition codes) */
+static inline struct brw_reg brw_null_reg(void)
+{
+	return brw_vec8_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+			    BRW_ARF_NULL,
+			    0);
+}
+
+static inline struct brw_reg brw_address_reg(unsigned subnr)
+{
+	return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+			   BRW_ARF_ADDRESS,
+			   subnr);
+}
+
+/* If/else instructions break in align16 mode if writemask & swizzle
+ * aren't xyzw.  This goes against the convention for other scalar
+ * regs:
+ */
+static inline struct brw_reg brw_ip_reg(void)
+{
+	return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+		       BRW_ARF_IP,
+		       0,
+		       BRW_REGISTER_TYPE_UD,
+		       BRW_VERTICAL_STRIDE_4, /* ? */
+		       BRW_WIDTH_1,
+		       BRW_HORIZONTAL_STRIDE_0,
+		       BRW_SWIZZLE_XYZW, /* NOTE! */
+		       WRITEMASK_XYZW); /* NOTE! */
+}
+
+static inline struct brw_reg brw_acc_reg(void)
+{
+	return brw_vec8_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+			    BRW_ARF_ACCUMULATOR,
+			    0);
+}
+
+static inline struct brw_reg brw_notification_1_reg(void)
+{
+	return brw_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+		       BRW_ARF_NOTIFICATION_COUNT,
+		       1,
+		       BRW_REGISTER_TYPE_UD,
+		       BRW_VERTICAL_STRIDE_0,
+		       BRW_WIDTH_1,
+		       BRW_HORIZONTAL_STRIDE_0,
+		       BRW_SWIZZLE_XXXX,
+		       WRITEMASK_X);
+}
+
+static inline struct brw_reg brw_flag_reg(void)
+{
+	return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+			   BRW_ARF_FLAG,
+			   0);
+}
+
+static inline struct brw_reg brw_mask_reg(unsigned subnr)
+{
+	return brw_uw1_reg(BRW_ARCHITECTURE_REGISTER_FILE,
+			   BRW_ARF_MASK,
+			   subnr);
+}
+
+static inline struct brw_reg brw_message_reg(unsigned nr)
+{
+	assert((nr & ~(1 << 7)) < BRW_MAX_MRF);
+	return brw_vec8_reg(BRW_MESSAGE_REGISTER_FILE, nr, 0);
+}
+
+static inline struct brw_reg brw_message4_reg(unsigned nr,
+					      int subnr)
+{
+	assert((nr & ~(1 << 7)) < BRW_MAX_MRF);
+	return brw_vec4_reg(BRW_MESSAGE_REGISTER_FILE, nr, subnr);
+}
+
+/* This is almost always called with a numeric constant argument, so
+ * make things easy to evaluate at compile time:
+ */
+static inline unsigned cvt(unsigned val)
+{
+	switch (val) {
+	case 0: return 0;
+	case 1: return 1;
+	case 2: return 2;
+	case 4: return 3;
+	case 8: return 4;
+	case 16: return 5;
+	case 32: return 6;
+	}
+	return 0;
+}
+
+static inline struct brw_reg __stride(struct brw_reg reg,
+				    unsigned vstride,
+				    unsigned width,
+				    unsigned hstride)
+{
+	reg.vstride = cvt(vstride);
+	reg.width = cvt(width) - 1;
+	reg.hstride = cvt(hstride);
+	return reg;
+}
+
+static inline struct brw_reg vec16(struct brw_reg reg)
+{
+	return __stride(reg, 16,16,1);
+}
+
+static inline struct brw_reg vec8(struct brw_reg reg)
+{
+	return __stride(reg, 8,8,1);
+}
+
+static inline struct brw_reg vec4(struct brw_reg reg)
+{
+	return __stride(reg, 4,4,1);
+}
+
+static inline struct brw_reg vec2(struct brw_reg reg)
+{
+	return __stride(reg, 2,2,1);
+}
+
+static inline struct brw_reg vec1(struct brw_reg reg)
+{
+	return __stride(reg, 0,1,0);
+}
+
+static inline struct brw_reg get_element(struct brw_reg reg, unsigned elt)
+{
+	return vec1(__suboffset(reg, elt));
+}
+
+static inline struct brw_reg get_element_ud(struct brw_reg reg, unsigned elt)
+{
+	return vec1(__suboffset(__retype(reg, BRW_REGISTER_TYPE_UD), elt));
+}
+
+static inline struct brw_reg brw_swizzle(struct brw_reg reg,
+					 unsigned x,
+					 unsigned y,
+					 unsigned z,
+					 unsigned w)
+{
+	assert(reg.file != BRW_IMMEDIATE_VALUE);
+
+	reg.dw1.bits.swizzle = BRW_SWIZZLE4(BRW_GET_SWZ(reg.dw1.bits.swizzle, x),
+					    BRW_GET_SWZ(reg.dw1.bits.swizzle, y),
+					    BRW_GET_SWZ(reg.dw1.bits.swizzle, z),
+					    BRW_GET_SWZ(reg.dw1.bits.swizzle, w));
+	return reg;
+}
+
+static inline struct brw_reg brw_swizzle1(struct brw_reg reg,
+					  unsigned x)
+{
+	return brw_swizzle(reg, x, x, x, x);
+}
+
+static inline struct brw_reg brw_writemask(struct brw_reg reg,
+					   unsigned mask)
+{
+	assert(reg.file != BRW_IMMEDIATE_VALUE);
+	reg.dw1.bits.writemask &= mask;
+	return reg;
+}
+
+static inline struct brw_reg brw_set_writemask(struct brw_reg reg,
+					       unsigned mask)
+{
+	assert(reg.file != BRW_IMMEDIATE_VALUE);
+	reg.dw1.bits.writemask = mask;
+	return reg;
+}
+
+static inline struct brw_reg brw_negate(struct brw_reg reg)
+{
+	reg.negate ^= 1;
+	return reg;
+}
+
+static inline struct brw_reg brw_abs(struct brw_reg reg)
+{
+	reg.abs = 1;
+	return reg;
+}
+
+/***********************************************************************
+*/
+static inline struct brw_reg brw_vec4_indirect(unsigned subnr,
+					       int offset)
+{
+	struct brw_reg reg =  brw_vec4_grf(0, 0);
+	reg.subnr = subnr;
+	reg.address_mode = BRW_ADDRESS_REGISTER_INDIRECT_REGISTER;
+	reg.dw1.bits.indirect_offset = offset;
+	return reg;
+}
+
+static inline struct brw_reg brw_vec1_indirect(unsigned subnr,
+					       int offset)
+{
+	struct brw_reg reg =  brw_vec1_grf(0, 0);
+	reg.subnr = subnr;
+	reg.address_mode = BRW_ADDRESS_REGISTER_INDIRECT_REGISTER;
+	reg.dw1.bits.indirect_offset = offset;
+	return reg;
+}
+
+static inline struct brw_reg deref_4f(struct brw_indirect ptr, int offset)
+{
+	return brw_vec4_indirect(ptr.addr_subnr, ptr.addr_offset + offset);
+}
+
+static inline struct brw_reg deref_1f(struct brw_indirect ptr, int offset)
+{
+	return brw_vec1_indirect(ptr.addr_subnr, ptr.addr_offset + offset);
+}
+
+static inline struct brw_reg deref_4b(struct brw_indirect ptr, int offset)
+{
+	return __retype(deref_4f(ptr, offset), BRW_REGISTER_TYPE_B);
+}
+
+static inline struct brw_reg deref_1uw(struct brw_indirect ptr, int offset)
+{
+	return __retype(deref_1f(ptr, offset), BRW_REGISTER_TYPE_UW);
+}
+
+static inline struct brw_reg deref_1d(struct brw_indirect ptr, int offset)
+{
+	return __retype(deref_1f(ptr, offset), BRW_REGISTER_TYPE_D);
+}
+
+static inline struct brw_reg deref_1ud(struct brw_indirect ptr, int offset)
+{
+	return __retype(deref_1f(ptr, offset), BRW_REGISTER_TYPE_UD);
+}
+
+static inline struct brw_reg get_addr_reg(struct brw_indirect ptr)
+{
+	return brw_address_reg(ptr.addr_subnr);
+}
+
+static inline struct brw_indirect brw_indirect_offset(struct brw_indirect ptr, int offset)
+{
+	ptr.addr_offset += offset;
+	return ptr;
+}
+
+static inline struct brw_indirect brw_indirect(unsigned addr_subnr, int offset)
+{
+	struct brw_indirect ptr;
+	ptr.addr_subnr = addr_subnr;
+	ptr.addr_offset = offset;
+	ptr.pad = 0;
+	return ptr;
+}
+
+/** Do two brw_regs refer to the same register? */
+static inline bool brw_same_reg(struct brw_reg r1, struct brw_reg r2)
+{
+	return r1.file == r2.file && r1.nr == r2.nr;
+}
+
+static inline struct brw_instruction *current_insn( struct brw_compile *p)
+{
+	return &p->store[p->nr_insn];
+}
+
+static inline void brw_set_predicate_control( struct brw_compile *p, unsigned pc )
+{
+	p->current->header.predicate_control = pc;
+}
+
+static inline void brw_set_predicate_inverse(struct brw_compile *p, bool predicate_inverse)
+{
+	p->current->header.predicate_inverse = predicate_inverse;
+}
+
+static inline void brw_set_conditionalmod( struct brw_compile *p, unsigned conditional )
+{
+	p->current->header.destreg__conditionalmod = conditional;
+}
+
+static inline void brw_set_access_mode(struct brw_compile *p, unsigned access_mode)
+{
+	p->current->header.access_mode = access_mode;
+}
+
+static inline void brw_set_mask_control(struct brw_compile *p, unsigned value)
+{
+	p->current->header.mask_control = value;
+}
+
+static inline void brw_set_saturate(struct brw_compile *p, unsigned value)
+{
+	p->current->header.saturate = value;
+}
+
+static inline void brw_set_acc_write_control(struct brw_compile *p, unsigned value)
+{
+	if (p->gen >= 60)
+		p->current->header.acc_wr_control = value;
+}
+
+void brw_pop_insn_state(struct brw_compile *p);
+void brw_push_insn_state(struct brw_compile *p);
+void brw_set_compression_control(struct brw_compile *p, enum brw_compression control);
+void brw_set_predicate_control_flag_value( struct brw_compile *p, unsigned value );
+
+void brw_compile_init(struct brw_compile *p, int gen, void *store);
+
+void brw_set_dest(struct brw_compile *p, struct brw_instruction *insn,
+		  struct brw_reg dest);
+void brw_set_src0(struct brw_compile *p, struct brw_instruction *insn,
+		  struct brw_reg reg);
+void brw_set_src1(struct brw_compile *p,
+		  struct brw_instruction *insn,
+		  struct brw_reg reg);
+
+void gen6_resolve_implied_move(struct brw_compile *p,
+			       struct brw_reg *src,
+			       unsigned msg_reg_nr);
+
+static inline struct brw_instruction *
+brw_next_insn(struct brw_compile *p, unsigned opcode)
+{
+	struct brw_instruction *insn;
+
+	assert(p->nr_insn + 1 < BRW_EU_MAX_INSN);
+
+	insn = &p->store[p->nr_insn++];
+	*insn = *p->current;
+
+	if (p->current->header.destreg__conditionalmod) {
+		p->current->header.destreg__conditionalmod = 0;
+		p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+	}
+
+	insn->header.opcode = opcode;
+	return insn;
+}
+
+/* Helpers for regular instructions: */
+#define ALU1(OP)							\
+static inline struct brw_instruction *brw_##OP(struct brw_compile *p,	\
+					       struct brw_reg dest,	\
+					       struct brw_reg src0)	\
+{									\
+   return brw_alu1(p, BRW_OPCODE_##OP, dest, src0);			\
+}
+
+#define ALU2(OP)							\
+static inline struct brw_instruction *brw_##OP(struct brw_compile *p,	\
+					       struct brw_reg dest,	\
+					       struct brw_reg src0,	\
+						struct brw_reg src1)	\
+{									\
+   return brw_alu2(p, BRW_OPCODE_##OP, dest, src0, src1);		\
+}
+
+/* Rounding operations (other than RNDD) require two instructions - the first
+ * stores a rounded value (possibly the wrong way) in the dest register, but
+ * also sets a per-channel "increment bit" in the flag register.  A predicated
+ * add of 1.0 fixes dest to contain the desired result.
+ *
+ * Sandybridge and later appear to round correctly without an ADD.
+ */
+#define ROUND(OP)							\
+static inline void brw_##OP(struct brw_compile *p,			\
+			    struct brw_reg dest,			\
+			    struct brw_reg src)				\
+{									\
+	struct brw_instruction *rnd, *add;				\
+	rnd = brw_next_insn(p, BRW_OPCODE_##OP);			\
+	brw_set_dest(p, rnd, dest);					\
+	brw_set_src0(p, rnd, src);					\
+	if (p->gen < 60) {						\
+		/* turn on round-increments */				\
+		rnd->header.destreg__conditionalmod = BRW_CONDITIONAL_R; \
+		add = brw_ADD(p, dest, dest, brw_imm_f(1.0f));		\
+		add->header.predicate_control = BRW_PREDICATE_NORMAL;	\
+	}								\
+}
+
+static inline struct brw_instruction *brw_alu1(struct brw_compile *p,
+					       unsigned opcode,
+					       struct brw_reg dest,
+					       struct brw_reg src)
+{
+	struct brw_instruction *insn = brw_next_insn(p, opcode);
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src);
+	return insn;
+}
+
+static inline struct brw_instruction *brw_alu2(struct brw_compile *p,
+					       unsigned opcode,
+					       struct brw_reg dest,
+					       struct brw_reg src0,
+					       struct brw_reg src1 )
+{
+	struct brw_instruction *insn = brw_next_insn(p, opcode);
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, src1);
+	return insn;
+}
+
+static inline struct brw_instruction *brw_ADD(struct brw_compile *p,
+					      struct brw_reg dest,
+					      struct brw_reg src0,
+					      struct brw_reg src1)
+{
+	/* 6.2.2: add */
+	if (src0.type == BRW_REGISTER_TYPE_F ||
+	    (src0.file == BRW_IMMEDIATE_VALUE &&
+	     src0.type == BRW_REGISTER_TYPE_VF)) {
+		assert(src1.type != BRW_REGISTER_TYPE_UD);
+		assert(src1.type != BRW_REGISTER_TYPE_D);
+	}
+
+	if (src1.type == BRW_REGISTER_TYPE_F ||
+	    (src1.file == BRW_IMMEDIATE_VALUE &&
+	     src1.type == BRW_REGISTER_TYPE_VF)) {
+		assert(src0.type != BRW_REGISTER_TYPE_UD);
+		assert(src0.type != BRW_REGISTER_TYPE_D);
+	}
+
+	return brw_alu2(p, BRW_OPCODE_ADD, dest, src0, src1);
+}
+
+static inline struct brw_instruction *brw_MUL(struct brw_compile *p,
+					      struct brw_reg dest,
+					      struct brw_reg src0,
+					      struct brw_reg src1)
+{
+	/* 6.32.38: mul */
+	if (src0.type == BRW_REGISTER_TYPE_D ||
+	    src0.type == BRW_REGISTER_TYPE_UD ||
+	    src1.type == BRW_REGISTER_TYPE_D ||
+	    src1.type == BRW_REGISTER_TYPE_UD) {
+		assert(dest.type != BRW_REGISTER_TYPE_F);
+	}
+
+	if (src0.type == BRW_REGISTER_TYPE_F ||
+	    (src0.file == BRW_IMMEDIATE_VALUE &&
+	     src0.type == BRW_REGISTER_TYPE_VF)) {
+		assert(src1.type != BRW_REGISTER_TYPE_UD);
+		assert(src1.type != BRW_REGISTER_TYPE_D);
+	}
+
+	if (src1.type == BRW_REGISTER_TYPE_F ||
+	    (src1.file == BRW_IMMEDIATE_VALUE &&
+	     src1.type == BRW_REGISTER_TYPE_VF)) {
+		assert(src0.type != BRW_REGISTER_TYPE_UD);
+		assert(src0.type != BRW_REGISTER_TYPE_D);
+	}
+
+	assert(src0.file != BRW_ARCHITECTURE_REGISTER_FILE ||
+	       src0.nr != BRW_ARF_ACCUMULATOR);
+	assert(src1.file != BRW_ARCHITECTURE_REGISTER_FILE ||
+	       src1.nr != BRW_ARF_ACCUMULATOR);
+
+	return brw_alu2(p, BRW_OPCODE_MUL, dest, src0, src1);
+}
+
+static inline struct brw_instruction *brw_JMPI(struct brw_compile *p,
+					       struct brw_reg dest,
+					       struct brw_reg src0,
+					       struct brw_reg src1)
+{
+	struct brw_instruction *insn = brw_alu2(p, BRW_OPCODE_JMPI, dest, src0, src1);
+
+	insn->header.execution_size = 1;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.mask_control = BRW_MASK_DISABLE;
+
+	p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+	return insn;
+}
+
+
+ALU1(MOV);
+ALU2(SEL);
+ALU1(NOT);
+ALU2(AND);
+ALU2(OR);
+ALU2(XOR);
+ALU2(SHR);
+ALU2(SHL);
+ALU2(RSR);
+ALU2(RSL);
+ALU2(ASR);
+ALU1(FRC);
+ALU1(RNDD);
+ALU2(MAC);
+ALU2(MACH);
+ALU1(LZD);
+ALU2(DP4);
+ALU2(DPH);
+ALU2(DP3);
+ALU2(DP2);
+ALU2(LINE);
+ALU2(PLN);
+
+ROUND(RNDZ);
+ROUND(RNDE);
+
+#undef ALU1
+#undef ALU2
+#undef ROUND
+
+/* Helpers for SEND instruction */
+void brw_set_dp_read_message(struct brw_compile *p,
+			     struct brw_instruction *insn,
+			     unsigned binding_table_index,
+			     unsigned msg_control,
+			     unsigned msg_type,
+			     unsigned target_cache,
+			     unsigned msg_length,
+			     unsigned response_length);
+
+void brw_set_dp_write_message(struct brw_compile *p,
+			      struct brw_instruction *insn,
+			      unsigned binding_table_index,
+			      unsigned msg_control,
+			      unsigned msg_type,
+			      unsigned msg_length,
+			      bool header_present,
+			      bool last_render_target,
+			      unsigned response_length,
+			      bool end_of_thread,
+			      bool send_commit_msg);
+
+void brw_urb_WRITE(struct brw_compile *p,
+		   struct brw_reg dest,
+		   unsigned msg_reg_nr,
+		   struct brw_reg src0,
+		   bool allocate,
+		   bool used,
+		   unsigned msg_length,
+		   unsigned response_length,
+		   bool eot,
+		   bool writes_complete,
+		   unsigned offset,
+		   unsigned swizzle);
+
+void brw_ff_sync(struct brw_compile *p,
+		 struct brw_reg dest,
+		 unsigned msg_reg_nr,
+		 struct brw_reg src0,
+		 bool allocate,
+		 unsigned response_length,
+		 bool eot);
+
+void brw_fb_WRITE(struct brw_compile *p,
+		  int dispatch_width,
+                  unsigned msg_reg_nr,
+                  struct brw_reg src0,
+                  unsigned msg_control,
+                  unsigned binding_table_index,
+                  unsigned msg_length,
+                  unsigned response_length,
+                  bool eot,
+                  bool header_present);
+
+void brw_SAMPLE(struct brw_compile *p,
+		struct brw_reg dest,
+		unsigned msg_reg_nr,
+		struct brw_reg src0,
+		unsigned binding_table_index,
+		unsigned sampler,
+		unsigned writemask,
+		unsigned msg_type,
+		unsigned response_length,
+		unsigned msg_length,
+		bool header_present,
+		unsigned simd_mode);
+
+void brw_math_16(struct brw_compile *p,
+		 struct brw_reg dest,
+		 unsigned function,
+		 unsigned saturate,
+		 unsigned msg_reg_nr,
+		 struct brw_reg src,
+		 unsigned precision);
+
+void brw_math(struct brw_compile *p,
+	      struct brw_reg dest,
+	      unsigned function,
+	      unsigned saturate,
+	      unsigned msg_reg_nr,
+	      struct brw_reg src,
+	      unsigned data_type,
+	      unsigned precision);
+
+void brw_math2(struct brw_compile *p,
+	       struct brw_reg dest,
+	       unsigned function,
+	       struct brw_reg src0,
+	       struct brw_reg src1);
+
+void brw_oword_block_read(struct brw_compile *p,
+			  struct brw_reg dest,
+			  struct brw_reg mrf,
+			  uint32_t offset,
+			  uint32_t bind_table_index);
+
+void brw_oword_block_read_scratch(struct brw_compile *p,
+				  struct brw_reg dest,
+				  struct brw_reg mrf,
+				  int num_regs,
+				  unsigned offset);
+
+void brw_oword_block_write_scratch(struct brw_compile *p,
+				   struct brw_reg mrf,
+				   int num_regs,
+				   unsigned offset);
+
+void brw_dword_scattered_read(struct brw_compile *p,
+			      struct brw_reg dest,
+			      struct brw_reg mrf,
+			      uint32_t bind_table_index);
+
+void brw_dp_READ_4_vs(struct brw_compile *p,
+		      struct brw_reg dest,
+		      unsigned location,
+		      unsigned bind_table_index);
+
+void brw_dp_READ_4_vs_relative(struct brw_compile *p,
+			       struct brw_reg dest,
+			       struct brw_reg addrReg,
+			       unsigned offset,
+			       unsigned bind_table_index);
+
+/* If/else/endif.  Works by manipulating the execution flags on each
+ * channel.
+ */
+struct brw_instruction *brw_IF(struct brw_compile *p,
+			       unsigned execute_size);
+struct brw_instruction *gen6_IF(struct brw_compile *p, uint32_t conditional,
+				struct brw_reg src0, struct brw_reg src1);
+
+void brw_ELSE(struct brw_compile *p);
+void brw_ENDIF(struct brw_compile *p);
+
+/* DO/WHILE loops:
+*/
+struct brw_instruction *brw_DO(struct brw_compile *p,
+			       unsigned execute_size);
+
+struct brw_instruction *brw_WHILE(struct brw_compile *p,
+				  struct brw_instruction *patch_insn);
+
+struct brw_instruction *brw_BREAK(struct brw_compile *p, int pop_count);
+struct brw_instruction *brw_CONT(struct brw_compile *p, int pop_count);
+struct brw_instruction *gen6_CONT(struct brw_compile *p,
+				  struct brw_instruction *do_insn);
+/* Forward jumps:
+*/
+void brw_land_fwd_jump(struct brw_compile *p,
+		       struct brw_instruction *jmp_insn);
+
+void brw_NOP(struct brw_compile *p);
+
+void brw_WAIT(struct brw_compile *p);
+
+/* Special case: there is never a destination, execution size will be
+ * taken from src0:
+ */
+void brw_CMP(struct brw_compile *p,
+	     struct brw_reg dest,
+	     unsigned conditional,
+	     struct brw_reg src0,
+	     struct brw_reg src1);
+
+void brw_print_reg(struct brw_reg reg);
+
+static inline void brw_math_invert(struct brw_compile *p,
+				   struct brw_reg dst,
+				   struct brw_reg src)
+{
+	brw_math(p,
+		 dst,
+		 BRW_MATH_FUNCTION_INV,
+		 BRW_MATH_SATURATE_NONE,
+		 0,
+		 src,
+		 BRW_MATH_PRECISION_FULL,
+		 BRW_MATH_DATA_VECTOR);
+}
+
+void brw_set_uip_jip(struct brw_compile *p);
+
+uint32_t brw_swap_cmod(uint32_t cmod);
+
+void brw_disasm(FILE *file,
+		const struct brw_instruction *inst,
+		int gen);
+
+#endif
diff --git a/src/sna/brw/brw_eu_debug.c b/src/sna/brw/brw_eu_debug.c
new file mode 100644
index 0000000..99453af
--- /dev/null
+++ b/src/sna/brw/brw_eu_debug.c
@@ -0,0 +1,95 @@
+/*
+ Copyright (C) Intel Corp.  2006.  All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+ 
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ 
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+ 
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ 
+ **********************************************************************/
+ /*
+  * Authors:
+  *   Keith Whitwell <keith at tungstengraphics.com>
+  */
+    
+
+#include "main/mtypes.h"
+#include "main/imports.h"
+#include "brw_eu.h"
+
+void brw_print_reg( struct brw_reg hwreg )
+{
+   static const char *file[] = {
+      "arf",
+      "grf",
+      "msg",
+      "imm"
+   };
+
+   static const char *type[] = {
+      "ud",
+      "d",
+      "uw",
+      "w",
+      "ub",
+      "vf",
+      "hf",
+      "f"
+   };
+
+   printf("%s%s", 
+	  hwreg.abs ? "abs/" : "",
+	  hwreg.negate ? "-" : "");
+     
+   if (hwreg.file == BRW_GENERAL_REGISTER_FILE &&
+       hwreg.nr % 2 == 0 &&
+       hwreg.subnr == 0 &&
+       hwreg.vstride == BRW_VERTICAL_STRIDE_8 &&
+       hwreg.width == BRW_WIDTH_8 &&
+       hwreg.hstride == BRW_HORIZONTAL_STRIDE_1 &&
+       hwreg.type == BRW_REGISTER_TYPE_F) {
+      /* vector register */
+      printf("vec%d", hwreg.nr);
+   }
+   else if (hwreg.file == BRW_GENERAL_REGISTER_FILE &&
+	    hwreg.vstride == BRW_VERTICAL_STRIDE_0 &&
+	    hwreg.width == BRW_WIDTH_1 &&
+	    hwreg.hstride == BRW_HORIZONTAL_STRIDE_0 &&
+	    hwreg.type == BRW_REGISTER_TYPE_F) {      
+      /* "scalar" register */
+      printf("scl%d.%d", hwreg.nr, hwreg.subnr / 4);
+   }
+   else if (hwreg.file == BRW_IMMEDIATE_VALUE) {
+      printf("imm %f", hwreg.dw1.f);
+   }
+   else {
+      printf("%s%d.%d<%d;%d,%d>:%s", 
+		   file[hwreg.file],
+		   hwreg.nr,
+		   hwreg.subnr / type_sz(hwreg.type),
+		   hwreg.vstride ? (1<<(hwreg.vstride-1)) : 0,
+		   1<<hwreg.width,
+		   hwreg.hstride ? (1<<(hwreg.hstride-1)) : 0,		
+		   type[hwreg.type]);
+   }
+}
+
+
+
diff --git a/src/sna/brw/brw_eu_emit.c b/src/sna/brw/brw_eu_emit.c
new file mode 100644
index 0000000..3f01ae7
--- /dev/null
+++ b/src/sna/brw/brw_eu_emit.c
@@ -0,0 +1,2002 @@
+/*
+   Copyright (C) Intel Corp.  2006.  All Rights Reserved.
+   Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+   develop this 3D driver.
+
+   Permission is hereby granted, free of charge, to any person obtaining
+   a copy of this software and associated documentation files (the
+   "Software"), to deal in the Software without restriction, including
+   without limitation the rights to use, copy, modify, merge, publish,
+   distribute, sublicense, and/or sell copies of the Software, and to
+   permit persons to whom the Software is furnished to do so, subject to
+   the following conditions:
+
+   The above copyright notice and this permission notice (including the
+   next paragraph) shall be included in all copies or substantial
+   portions of the Software.
+
+   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+   EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+   MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+   IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+   LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+   OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+   WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **********************************************************************/
+/*
+ * Authors:
+ *   Keith Whitwell <keith at tungstengraphics.com>
+ */
+
+#include "brw_eu.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+/***********************************************************************
+ * Internal helper for constructing instructions
+ */
+
+static void guess_execution_size(struct brw_compile *p,
+				 struct brw_instruction *insn,
+				 struct brw_reg reg)
+{
+	if (reg.width == BRW_WIDTH_8 && p->compressed)
+		insn->header.execution_size = BRW_EXECUTE_16;
+	else
+		insn->header.execution_size = reg.width;
+}
+
+
+/**
+ * Prior to Sandybridge, the SEND instruction accepted non-MRF source
+ * registers, implicitly moving the operand to a message register.
+ *
+ * On Sandybridge, this is no longer the case.  This function performs the
+ * explicit move; it should be called before emitting a SEND instruction.
+ */
+void
+gen6_resolve_implied_move(struct brw_compile *p,
+			  struct brw_reg *src,
+			  unsigned msg_reg_nr)
+{
+	if (p->gen < 60)
+		return;
+
+	if (src->file == BRW_MESSAGE_REGISTER_FILE)
+		return;
+
+	if (src->file != BRW_ARCHITECTURE_REGISTER_FILE || src->nr != BRW_ARF_NULL) {
+		brw_push_insn_state(p);
+		brw_set_mask_control(p, BRW_MASK_DISABLE);
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		brw_MOV(p, __retype_ud(brw_message_reg(msg_reg_nr)), __retype_ud(*src));
+		brw_pop_insn_state(p);
+	}
+	*src = brw_message_reg(msg_reg_nr);
+}
+
+static void
+gen7_convert_mrf_to_grf(struct brw_compile *p, struct brw_reg *reg)
+{
+	/* From the BSpec / ISA Reference / send - [DevIVB+]:
+	 * "The send with EOT should use register space R112-R127 for <src>. This is
+	 *  to enable loading of a new thread into the same slot while the message
+	 *  with EOT for current thread is pending dispatch."
+	 *
+	 * Since we're pretending to have 16 MRFs anyway, we may as well use the
+	 * registers required for messages with EOT.
+	 */
+	if (p->gen >= 70 && reg->file == BRW_MESSAGE_REGISTER_FILE) {
+		reg->file = BRW_GENERAL_REGISTER_FILE;
+		reg->nr += 111;
+	}
+}
+
+void
+brw_set_dest(struct brw_compile *p, struct brw_instruction *insn,
+	     struct brw_reg dest)
+{
+	if (dest.file != BRW_ARCHITECTURE_REGISTER_FILE &&
+	    dest.file != BRW_MESSAGE_REGISTER_FILE)
+		assert(dest.nr < 128);
+
+	gen7_convert_mrf_to_grf(p, &dest);
+
+	insn->bits1.da1.dest_reg_file = dest.file;
+	insn->bits1.da1.dest_reg_type = dest.type;
+	insn->bits1.da1.dest_address_mode = dest.address_mode;
+
+	if (dest.address_mode == BRW_ADDRESS_DIRECT) {
+		insn->bits1.da1.dest_reg_nr = dest.nr;
+
+		if (insn->header.access_mode == BRW_ALIGN_1) {
+			insn->bits1.da1.dest_subreg_nr = dest.subnr;
+			if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
+				dest.hstride = BRW_HORIZONTAL_STRIDE_1;
+			insn->bits1.da1.dest_horiz_stride = dest.hstride;
+		} else {
+			insn->bits1.da16.dest_subreg_nr = dest.subnr / 16;
+			insn->bits1.da16.dest_writemask = dest.dw1.bits.writemask;
+			/* even ignored in da16, still need to set as '01' */
+			insn->bits1.da16.dest_horiz_stride = 1;
+		}
+	} else {
+		insn->bits1.ia1.dest_subreg_nr = dest.subnr;
+
+		/* These are different sizes in align1 vs align16:
+		*/
+		if (insn->header.access_mode == BRW_ALIGN_1) {
+			insn->bits1.ia1.dest_indirect_offset = dest.dw1.bits.indirect_offset;
+			if (dest.hstride == BRW_HORIZONTAL_STRIDE_0)
+				dest.hstride = BRW_HORIZONTAL_STRIDE_1;
+			insn->bits1.ia1.dest_horiz_stride = dest.hstride;
+		}
+		else {
+			insn->bits1.ia16.dest_indirect_offset = dest.dw1.bits.indirect_offset;
+			/* even ignored in da16, still need to set as '01' */
+			insn->bits1.ia16.dest_horiz_stride = 1;
+		}
+	}
+
+	guess_execution_size(p, insn, dest);
+}
+
+static const int reg_type_size[8] = {
+	[0] = 4,
+	[1] = 4,
+	[2] = 2,
+	[3] = 2,
+	[4] = 1,
+	[5] = 1,
+	[7] = 4
+};
+
+static void
+validate_reg(struct brw_instruction *insn, struct brw_reg reg)
+{
+	int hstride_for_reg[] = {0, 1, 2, 4};
+	int vstride_for_reg[] = {0, 1, 2, 4, 8, 16, 32, 64, 128, 256};
+	int width_for_reg[] = {1, 2, 4, 8, 16};
+	int execsize_for_reg[] = {1, 2, 4, 8, 16};
+	int width, hstride, vstride, execsize;
+
+	if (reg.file == BRW_IMMEDIATE_VALUE) {
+		/* 3.3.6: Region Parameters.  Restriction: Immediate vectors
+		 * mean the destination has to be 128-bit aligned and the
+		 * destination horiz stride has to be a word.
+		 */
+		if (reg.type == BRW_REGISTER_TYPE_V) {
+			assert(hstride_for_reg[insn->bits1.da1.dest_horiz_stride] *
+			       reg_type_size[insn->bits1.da1.dest_reg_type] == 2);
+		}
+
+		return;
+	}
+
+	if (reg.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+	    reg.file == BRW_ARF_NULL)
+		return;
+
+	hstride = hstride_for_reg[reg.hstride];
+
+	if (reg.vstride == 0xf) {
+		vstride = -1;
+	} else {
+		vstride = vstride_for_reg[reg.vstride];
+	}
+
+	width = width_for_reg[reg.width];
+
+	execsize = execsize_for_reg[insn->header.execution_size];
+
+	/* Restrictions from 3.3.10: Register Region Restrictions. */
+	/* 3. */
+	assert(execsize >= width);
+
+	/* 4. */
+	if (execsize == width && hstride != 0) {
+		assert(vstride == -1 || vstride == width * hstride);
+	}
+
+	/* 5. */
+	if (execsize == width && hstride == 0) {
+		/* no restriction on vstride. */
+	}
+
+	/* 6. */
+	if (width == 1) {
+		assert(hstride == 0);
+	}
+
+	/* 7. */
+	if (execsize == 1 && width == 1) {
+		assert(hstride == 0);
+		assert(vstride == 0);
+	}
+
+	/* 8. */
+	if (vstride == 0 && hstride == 0) {
+		assert(width == 1);
+	}
+
+	/* 10. Check destination issues. */
+}
+
+void
+brw_set_src0(struct brw_compile *p, struct brw_instruction *insn,
+	     struct brw_reg reg)
+{
+	if (reg.type != BRW_ARCHITECTURE_REGISTER_FILE)
+		assert(reg.nr < 128);
+
+	gen7_convert_mrf_to_grf(p, &reg);
+
+	validate_reg(insn, reg);
+
+	insn->bits1.da1.src0_reg_file = reg.file;
+	insn->bits1.da1.src0_reg_type = reg.type;
+	insn->bits2.da1.src0_abs = reg.abs;
+	insn->bits2.da1.src0_negate = reg.negate;
+	insn->bits2.da1.src0_address_mode = reg.address_mode;
+
+	if (reg.file == BRW_IMMEDIATE_VALUE) {
+		insn->bits3.ud = reg.dw1.ud;
+
+		/* Required to set some fields in src1 as well:
+		*/
+		insn->bits1.da1.src1_reg_file = 0; /* arf */
+		insn->bits1.da1.src1_reg_type = reg.type;
+	} else {
+		if (reg.address_mode == BRW_ADDRESS_DIRECT) {
+			if (insn->header.access_mode == BRW_ALIGN_1) {
+				insn->bits2.da1.src0_subreg_nr = reg.subnr;
+				insn->bits2.da1.src0_reg_nr = reg.nr;
+			} else {
+				insn->bits2.da16.src0_subreg_nr = reg.subnr / 16;
+				insn->bits2.da16.src0_reg_nr = reg.nr;
+			}
+		} else {
+			insn->bits2.ia1.src0_subreg_nr = reg.subnr;
+
+			if (insn->header.access_mode == BRW_ALIGN_1) {
+				insn->bits2.ia1.src0_indirect_offset = reg.dw1.bits.indirect_offset;
+			} else {
+				insn->bits2.ia16.src0_subreg_nr = reg.dw1.bits.indirect_offset;
+			}
+		}
+
+		if (insn->header.access_mode == BRW_ALIGN_1) {
+			if (reg.width == BRW_WIDTH_1 &&
+			    insn->header.execution_size == BRW_EXECUTE_1) {
+				insn->bits2.da1.src0_horiz_stride = BRW_HORIZONTAL_STRIDE_0;
+				insn->bits2.da1.src0_width = BRW_WIDTH_1;
+				insn->bits2.da1.src0_vert_stride = BRW_VERTICAL_STRIDE_0;
+			} else {
+				insn->bits2.da1.src0_horiz_stride = reg.hstride;
+				insn->bits2.da1.src0_width = reg.width;
+				insn->bits2.da1.src0_vert_stride = reg.vstride;
+			}
+		} else {
+			insn->bits2.da16.src0_swz_x = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X);
+			insn->bits2.da16.src0_swz_y = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y);
+			insn->bits2.da16.src0_swz_z = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z);
+			insn->bits2.da16.src0_swz_w = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W);
+
+			/* This is an oddity of the fact we're using the same
+			 * descriptions for registers in align_16 as align_1:
+			 */
+			if (reg.vstride == BRW_VERTICAL_STRIDE_8)
+				insn->bits2.da16.src0_vert_stride = BRW_VERTICAL_STRIDE_4;
+			else
+				insn->bits2.da16.src0_vert_stride = reg.vstride;
+		}
+	}
+}
+
+void brw_set_src1(struct brw_compile *p,
+		  struct brw_instruction *insn,
+		  struct brw_reg reg)
+{
+	assert(reg.file != BRW_MESSAGE_REGISTER_FILE);
+	assert(reg.nr < 128);
+
+	gen7_convert_mrf_to_grf(p, &reg);
+
+	validate_reg(insn, reg);
+
+	insn->bits1.da1.src1_reg_file = reg.file;
+	insn->bits1.da1.src1_reg_type = reg.type;
+	insn->bits3.da1.src1_abs = reg.abs;
+	insn->bits3.da1.src1_negate = reg.negate;
+
+	/* Only src1 can be immediate in two-argument instructions. */
+	assert(insn->bits1.da1.src0_reg_file != BRW_IMMEDIATE_VALUE);
+
+	if (reg.file == BRW_IMMEDIATE_VALUE) {
+		insn->bits3.ud = reg.dw1.ud;
+	} else {
+		/* This is a hardware restriction, which may or may not be lifted
+		 * in the future:
+		 */
+		assert (reg.address_mode == BRW_ADDRESS_DIRECT);
+		/* assert (reg.file == BRW_GENERAL_REGISTER_FILE); */
+
+		if (insn->header.access_mode == BRW_ALIGN_1) {
+			insn->bits3.da1.src1_subreg_nr = reg.subnr;
+			insn->bits3.da1.src1_reg_nr = reg.nr;
+		} else {
+			insn->bits3.da16.src1_subreg_nr = reg.subnr / 16;
+			insn->bits3.da16.src1_reg_nr = reg.nr;
+		}
+
+		if (insn->header.access_mode == BRW_ALIGN_1) {
+			if (reg.width == BRW_WIDTH_1 &&
+			    insn->header.execution_size == BRW_EXECUTE_1) {
+				insn->bits3.da1.src1_horiz_stride = BRW_HORIZONTAL_STRIDE_0;
+				insn->bits3.da1.src1_width = BRW_WIDTH_1;
+				insn->bits3.da1.src1_vert_stride = BRW_VERTICAL_STRIDE_0;
+			} else {
+				insn->bits3.da1.src1_horiz_stride = reg.hstride;
+				insn->bits3.da1.src1_width = reg.width;
+				insn->bits3.da1.src1_vert_stride = reg.vstride;
+			}
+		} else {
+			insn->bits3.da16.src1_swz_x = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_X);
+			insn->bits3.da16.src1_swz_y = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Y);
+			insn->bits3.da16.src1_swz_z = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_Z);
+			insn->bits3.da16.src1_swz_w = BRW_GET_SWZ(reg.dw1.bits.swizzle, BRW_CHANNEL_W);
+
+			/* This is an oddity of the fact we're using the same
+			 * descriptions for registers in align_16 as align_1:
+			 */
+			if (reg.vstride == BRW_VERTICAL_STRIDE_8)
+				insn->bits3.da16.src1_vert_stride = BRW_VERTICAL_STRIDE_4;
+			else
+				insn->bits3.da16.src1_vert_stride = reg.vstride;
+		}
+	}
+}
+
+/**
+ * Set the Message Descriptor and Extended Message Descriptor fields
+ * for SEND messages.
+ *
+ * \note This zeroes out the Function Control bits, so it must be called
+ *       \b before filling out any message-specific data.  Callers can
+ *       choose not to fill in irrelevant bits; they will be zero.
+ */
+static void
+brw_set_message_descriptor(struct brw_compile *p,
+			   struct brw_instruction *inst,
+			   enum brw_message_target sfid,
+			   unsigned msg_length,
+			   unsigned response_length,
+			   bool header_present,
+			   bool end_of_thread)
+{
+	brw_set_src1(p, inst, brw_imm_d(0));
+
+	if (p->gen >= 50) {
+		inst->bits3.generic_gen5.header_present = header_present;
+		inst->bits3.generic_gen5.response_length = response_length;
+		inst->bits3.generic_gen5.msg_length = msg_length;
+		inst->bits3.generic_gen5.end_of_thread = end_of_thread;
+
+		if (p->gen >= 60) {
+			/* On Gen6+ Message target/SFID goes in bits 27:24 of the header */
+			inst->header.destreg__conditionalmod = sfid;
+		} else {
+			/* Set Extended Message Descriptor (ex_desc) */
+			inst->bits2.send_gen5.sfid = sfid;
+			inst->bits2.send_gen5.end_of_thread = end_of_thread;
+		}
+	} else {
+		inst->bits3.generic.response_length = response_length;
+		inst->bits3.generic.msg_length = msg_length;
+		inst->bits3.generic.msg_target = sfid;
+		inst->bits3.generic.end_of_thread = end_of_thread;
+	}
+}
+
+
+static void brw_set_math_message(struct brw_compile *p,
+				 struct brw_instruction *insn,
+				 unsigned function,
+				 unsigned integer_type,
+				 bool low_precision,
+				 bool saturate,
+				 unsigned dataType)
+{
+	unsigned msg_length;
+	unsigned response_length;
+
+	/* Infer message length from the function */
+	switch (function) {
+	case BRW_MATH_FUNCTION_POW:
+	case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT:
+	case BRW_MATH_FUNCTION_INT_DIV_REMAINDER:
+	case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
+		msg_length = 2;
+		break;
+	default:
+		msg_length = 1;
+		break;
+	}
+
+	/* Infer response length from the function */
+	switch (function) {
+	case BRW_MATH_FUNCTION_SINCOS:
+	case BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER:
+		response_length = 2;
+		break;
+	default:
+		response_length = 1;
+		break;
+	}
+
+	brw_set_message_descriptor(p, insn, BRW_SFID_MATH,
+				   msg_length, response_length,
+				   false, false);
+	if (p->gen == 50) {
+		insn->bits3.math_gen5.function = function;
+		insn->bits3.math_gen5.int_type = integer_type;
+		insn->bits3.math_gen5.precision = low_precision;
+		insn->bits3.math_gen5.saturate = saturate;
+		insn->bits3.math_gen5.data_type = dataType;
+		insn->bits3.math_gen5.snapshot = 0;
+	} else {
+		insn->bits3.math.function = function;
+		insn->bits3.math.int_type = integer_type;
+		insn->bits3.math.precision = low_precision;
+		insn->bits3.math.saturate = saturate;
+		insn->bits3.math.data_type = dataType;
+	}
+}
+
+static void brw_set_ff_sync_message(struct brw_compile *p,
+				    struct brw_instruction *insn,
+				    bool allocate,
+				    unsigned response_length,
+				    bool end_of_thread)
+{
+	brw_set_message_descriptor(p, insn, BRW_SFID_URB,
+				   1, response_length,
+				   true, end_of_thread);
+	insn->bits3.urb_gen5.opcode = 1; /* FF_SYNC */
+	insn->bits3.urb_gen5.offset = 0; /* Not used by FF_SYNC */
+	insn->bits3.urb_gen5.swizzle_control = 0; /* Not used by FF_SYNC */
+	insn->bits3.urb_gen5.allocate = allocate;
+	insn->bits3.urb_gen5.used = 0; /* Not used by FF_SYNC */
+	insn->bits3.urb_gen5.complete = 0; /* Not used by FF_SYNC */
+}
+
+static void brw_set_urb_message(struct brw_compile *p,
+				struct brw_instruction *insn,
+				bool allocate,
+				bool used,
+				unsigned msg_length,
+				unsigned response_length,
+				bool end_of_thread,
+				bool complete,
+				unsigned offset,
+				unsigned swizzle_control)
+{
+	brw_set_message_descriptor(p, insn, BRW_SFID_URB,
+				   msg_length, response_length, true, end_of_thread);
+	if (p->gen >= 70) {
+		insn->bits3.urb_gen7.opcode = 0;	/* URB_WRITE_HWORD */
+		insn->bits3.urb_gen7.offset = offset;
+		assert(swizzle_control != BRW_URB_SWIZZLE_TRANSPOSE);
+		insn->bits3.urb_gen7.swizzle_control = swizzle_control;
+		/* per_slot_offset = 0 makes it ignore offsets in message header */
+		insn->bits3.urb_gen7.per_slot_offset = 0;
+		insn->bits3.urb_gen7.complete = complete;
+	} else if (p->gen >= 50) {
+		insn->bits3.urb_gen5.opcode = 0;	/* URB_WRITE */
+		insn->bits3.urb_gen5.offset = offset;
+		insn->bits3.urb_gen5.swizzle_control = swizzle_control;
+		insn->bits3.urb_gen5.allocate = allocate;
+		insn->bits3.urb_gen5.used = used;	/* ? */
+		insn->bits3.urb_gen5.complete = complete;
+	} else {
+		insn->bits3.urb.opcode = 0;	/* ? */
+		insn->bits3.urb.offset = offset;
+		insn->bits3.urb.swizzle_control = swizzle_control;
+		insn->bits3.urb.allocate = allocate;
+		insn->bits3.urb.used = used;	/* ? */
+		insn->bits3.urb.complete = complete;
+	}
+}
+
+void
+brw_set_dp_write_message(struct brw_compile *p,
+			 struct brw_instruction *insn,
+			 unsigned binding_table_index,
+			 unsigned msg_control,
+			 unsigned msg_type,
+			 unsigned msg_length,
+			 bool header_present,
+			 bool last_render_target,
+			 unsigned response_length,
+			 bool end_of_thread,
+			 bool send_commit_msg)
+{
+	unsigned sfid;
+
+	if (p->gen >= 70) {
+		/* Use the Render Cache for RT writes; otherwise use the Data Cache */
+		if (msg_type == GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE)
+			sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
+		else
+			sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
+	} else if (p->gen >= 60) {
+		/* Use the render cache for all write messages. */
+		sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
+	} else {
+		sfid = BRW_SFID_DATAPORT_WRITE;
+	}
+
+	brw_set_message_descriptor(p, insn, sfid,
+				   msg_length, response_length,
+				   header_present, end_of_thread);
+
+	if (p->gen >= 70) {
+		insn->bits3.gen7_dp.binding_table_index = binding_table_index;
+		insn->bits3.gen7_dp.msg_control = msg_control;
+		insn->bits3.gen7_dp.last_render_target = last_render_target;
+		insn->bits3.gen7_dp.msg_type = msg_type;
+	} else if (p->gen >= 60) {
+		insn->bits3.gen6_dp.binding_table_index = binding_table_index;
+		insn->bits3.gen6_dp.msg_control = msg_control;
+		insn->bits3.gen6_dp.last_render_target = last_render_target;
+		insn->bits3.gen6_dp.msg_type = msg_type;
+		insn->bits3.gen6_dp.send_commit_msg = send_commit_msg;
+	} else if (p->gen >= 50) {
+		insn->bits3.dp_write_gen5.binding_table_index = binding_table_index;
+		insn->bits3.dp_write_gen5.msg_control = msg_control;
+		insn->bits3.dp_write_gen5.last_render_target = last_render_target;
+		insn->bits3.dp_write_gen5.msg_type = msg_type;
+		insn->bits3.dp_write_gen5.send_commit_msg = send_commit_msg;
+	} else {
+		insn->bits3.dp_write.binding_table_index = binding_table_index;
+		insn->bits3.dp_write.msg_control = msg_control;
+		insn->bits3.dp_write.last_render_target = last_render_target;
+		insn->bits3.dp_write.msg_type = msg_type;
+		insn->bits3.dp_write.send_commit_msg = send_commit_msg;
+	}
+}
+
+void
+brw_set_dp_read_message(struct brw_compile *p,
+			struct brw_instruction *insn,
+			unsigned binding_table_index,
+			unsigned msg_control,
+			unsigned msg_type,
+			unsigned target_cache,
+			unsigned msg_length,
+			unsigned response_length)
+{
+	unsigned sfid;
+
+	if (p->gen >= 70) {
+		sfid = GEN7_SFID_DATAPORT_DATA_CACHE;
+	} else if (p->gen >= 60) {
+		if (target_cache == BRW_DATAPORT_READ_TARGET_RENDER_CACHE)
+			sfid = GEN6_SFID_DATAPORT_RENDER_CACHE;
+		else
+			sfid = GEN6_SFID_DATAPORT_SAMPLER_CACHE;
+	} else {
+		sfid = BRW_SFID_DATAPORT_READ;
+	}
+
+	brw_set_message_descriptor(p, insn, sfid,
+				   msg_length, response_length,
+				   true, false);
+
+	if (p->gen >= 70) {
+		insn->bits3.gen7_dp.binding_table_index = binding_table_index;
+		insn->bits3.gen7_dp.msg_control = msg_control;
+		insn->bits3.gen7_dp.last_render_target = 0;
+		insn->bits3.gen7_dp.msg_type = msg_type;
+	} else if (p->gen >= 60) {
+		insn->bits3.gen6_dp.binding_table_index = binding_table_index;
+		insn->bits3.gen6_dp.msg_control = msg_control;
+		insn->bits3.gen6_dp.last_render_target = 0;
+		insn->bits3.gen6_dp.msg_type = msg_type;
+		insn->bits3.gen6_dp.send_commit_msg = 0;
+	} else if (p->gen >= 50) {
+		insn->bits3.dp_read_gen5.binding_table_index = binding_table_index;
+		insn->bits3.dp_read_gen5.msg_control = msg_control;
+		insn->bits3.dp_read_gen5.msg_type = msg_type;
+		insn->bits3.dp_read_gen5.target_cache = target_cache;
+	} else if (p->gen >= 45) {
+		insn->bits3.dp_read_g4x.binding_table_index = binding_table_index; /*0:7*/
+		insn->bits3.dp_read_g4x.msg_control = msg_control;  /*8:10*/
+		insn->bits3.dp_read_g4x.msg_type = msg_type;  /*11:13*/
+		insn->bits3.dp_read_g4x.target_cache = target_cache;  /*14:15*/
+	} else {
+		insn->bits3.dp_read.binding_table_index = binding_table_index; /*0:7*/
+		insn->bits3.dp_read.msg_control = msg_control;  /*8:11*/
+		insn->bits3.dp_read.msg_type = msg_type;  /*12:13*/
+		insn->bits3.dp_read.target_cache = target_cache;  /*14:15*/
+	}
+}
+
+static void brw_set_sampler_message(struct brw_compile *p,
+                                    struct brw_instruction *insn,
+                                    unsigned binding_table_index,
+                                    unsigned sampler,
+                                    unsigned msg_type,
+                                    unsigned response_length,
+                                    unsigned msg_length,
+                                    bool header_present,
+                                    unsigned simd_mode)
+{
+	brw_set_message_descriptor(p, insn, BRW_SFID_SAMPLER,
+				   msg_length, response_length,
+				   header_present, false);
+
+	if (p->gen >= 70) {
+		insn->bits3.sampler_gen7.binding_table_index = binding_table_index;
+		insn->bits3.sampler_gen7.sampler = sampler;
+		insn->bits3.sampler_gen7.msg_type = msg_type;
+		insn->bits3.sampler_gen7.simd_mode = simd_mode;
+	} else if (p->gen >= 50) {
+		insn->bits3.sampler_gen5.binding_table_index = binding_table_index;
+		insn->bits3.sampler_gen5.sampler = sampler;
+		insn->bits3.sampler_gen5.msg_type = msg_type;
+		insn->bits3.sampler_gen5.simd_mode = simd_mode;
+	} else if (p->gen >= 45) {
+		insn->bits3.sampler_g4x.binding_table_index = binding_table_index;
+		insn->bits3.sampler_g4x.sampler = sampler;
+		insn->bits3.sampler_g4x.msg_type = msg_type;
+	} else {
+		insn->bits3.sampler.binding_table_index = binding_table_index;
+		insn->bits3.sampler.sampler = sampler;
+		insn->bits3.sampler.msg_type = msg_type;
+		insn->bits3.sampler.return_format = BRW_SAMPLER_RETURN_FORMAT_FLOAT32;
+	}
+}
+
+
+void brw_NOP(struct brw_compile *p)
+{
+	struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_NOP);
+	brw_set_dest(p, insn, __retype_ud(brw_vec4_grf(0,0)));
+	brw_set_src0(p, insn, __retype_ud(brw_vec4_grf(0,0)));
+	brw_set_src1(p, insn, brw_imm_ud(0x0));
+}
+
+/***********************************************************************
+ * Comparisons, if/else/endif
+ */
+
+static void
+push_if_stack(struct brw_compile *p, struct brw_instruction *inst)
+{
+	p->if_stack[p->if_stack_depth] = inst;
+
+	p->if_stack_depth++;
+	if (p->if_stack_array_size <= p->if_stack_depth) {
+		p->if_stack_array_size *= 2;
+		p->if_stack = realloc(p->if_stack, sizeof(struct brw_instruction *)*p->if_stack_array_size);
+	}
+}
+
+/* EU takes the value from the flag register and pushes it onto some
+ * sort of a stack (presumably merging with any flag value already on
+ * the stack).  Within an if block, the flags at the top of the stack
+ * control execution on each channel of the unit, eg. on each of the
+ * 16 pixel values in our wm programs.
+ *
+ * When the matching 'else' instruction is reached (presumably by
+ * countdown of the instruction count patched in by our ELSE/ENDIF
+ * functions), the relevent flags are inverted.
+ *
+ * When the matching 'endif' instruction is reached, the flags are
+ * popped off.  If the stack is now empty, normal execution resumes.
+ */
+struct brw_instruction *
+brw_IF(struct brw_compile *p, unsigned execute_size)
+{
+	struct brw_instruction *insn;
+
+	insn = brw_next_insn(p, BRW_OPCODE_IF);
+
+	/* Override the defaults for this instruction: */
+	if (p->gen < 60) {
+		brw_set_dest(p, insn, brw_ip_reg());
+		brw_set_src0(p, insn, brw_ip_reg());
+		brw_set_src1(p, insn, brw_imm_d(0x0));
+	} else if (p->gen < 70) {
+		brw_set_dest(p, insn, brw_imm_w(0));
+		insn->bits1.branch_gen6.jump_count = 0;
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, __retype_d(brw_null_reg()));
+	} else {
+		brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, brw_imm_ud(0));
+		insn->bits3.break_cont.jip = 0;
+		insn->bits3.break_cont.uip = 0;
+	}
+
+	insn->header.execution_size = execute_size;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.predicate_control = BRW_PREDICATE_NORMAL;
+	insn->header.mask_control = BRW_MASK_ENABLE;
+	if (!p->single_program_flow)
+		insn->header.thread_control = BRW_THREAD_SWITCH;
+
+	p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+	push_if_stack(p, insn);
+	return insn;
+}
+
+/* This function is only used for gen6-style IF instructions with an
+ * embedded comparison (conditional modifier).  It is not used on gen7.
+ */
+struct brw_instruction *
+gen6_IF(struct brw_compile *p, uint32_t conditional,
+	struct brw_reg src0, struct brw_reg src1)
+{
+	struct brw_instruction *insn;
+
+	insn = brw_next_insn(p, BRW_OPCODE_IF);
+
+	brw_set_dest(p, insn, brw_imm_w(0));
+	if (p->compressed) {
+		insn->header.execution_size = BRW_EXECUTE_16;
+	} else {
+		insn->header.execution_size = BRW_EXECUTE_8;
+	}
+	insn->bits1.branch_gen6.jump_count = 0;
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, src1);
+
+	assert(insn->header.compression_control == BRW_COMPRESSION_NONE);
+	assert(insn->header.predicate_control == BRW_PREDICATE_NONE);
+	insn->header.destreg__conditionalmod = conditional;
+
+	if (!p->single_program_flow)
+		insn->header.thread_control = BRW_THREAD_SWITCH;
+
+	push_if_stack(p, insn);
+	return insn;
+}
+
+/**
+ * In single-program-flow (SPF) mode, convert IF and ELSE into ADDs.
+ */
+static void
+convert_IF_ELSE_to_ADD(struct brw_compile *p,
+		       struct brw_instruction *if_inst,
+		       struct brw_instruction *else_inst)
+{
+	/* The next instruction (where the ENDIF would be, if it existed) */
+	struct brw_instruction *next_inst = &p->store[p->nr_insn];
+
+	assert(p->single_program_flow);
+	assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF);
+	assert(else_inst == NULL || else_inst->header.opcode == BRW_OPCODE_ELSE);
+	assert(if_inst->header.execution_size == BRW_EXECUTE_1);
+
+	/* Convert IF to an ADD instruction that moves the instruction pointer
+	 * to the first instruction of the ELSE block.  If there is no ELSE
+	 * block, point to where ENDIF would be.  Reverse the predicate.
+	 *
+	 * There's no need to execute an ENDIF since we don't need to do any
+	 * stack operations, and if we're currently executing, we just want to
+	 * continue normally.
+	 */
+	if_inst->header.opcode = BRW_OPCODE_ADD;
+	if_inst->header.predicate_inverse = 1;
+
+	if (else_inst != NULL) {
+		/* Convert ELSE to an ADD instruction that points where the ENDIF
+		 * would be.
+		 */
+		else_inst->header.opcode = BRW_OPCODE_ADD;
+
+		if_inst->bits3.ud = (else_inst - if_inst + 1) * 16;
+		else_inst->bits3.ud = (next_inst - else_inst) * 16;
+	} else {
+		if_inst->bits3.ud = (next_inst - if_inst) * 16;
+	}
+}
+
+/**
+ * Patch IF and ELSE instructions with appropriate jump targets.
+ */
+static void
+patch_IF_ELSE(struct brw_compile *p,
+	      struct brw_instruction *if_inst,
+	      struct brw_instruction *else_inst,
+	      struct brw_instruction *endif_inst)
+{
+	unsigned br = 1;
+
+	assert(!p->single_program_flow);
+	assert(if_inst != NULL && if_inst->header.opcode == BRW_OPCODE_IF);
+	assert(endif_inst != NULL);
+	assert(else_inst == NULL || else_inst->header.opcode == BRW_OPCODE_ELSE);
+
+	/* Jump count is for 64bit data chunk each, so one 128bit instruction
+	 * requires 2 chunks.
+	 */
+	if (p->gen >= 50)
+		br = 2;
+
+	assert(endif_inst->header.opcode == BRW_OPCODE_ENDIF);
+	endif_inst->header.execution_size = if_inst->header.execution_size;
+
+	if (else_inst == NULL) {
+		/* Patch IF -> ENDIF */
+		if (p->gen < 60) {
+			/* Turn it into an IFF, which means no mask stack operations for
+			 * all-false and jumping past the ENDIF.
+			 */
+			if_inst->header.opcode = BRW_OPCODE_IFF;
+			if_inst->bits3.if_else.jump_count = br * (endif_inst - if_inst + 1);
+			if_inst->bits3.if_else.pop_count = 0;
+			if_inst->bits3.if_else.pad0 = 0;
+		} else if (p->gen < 70) {
+			/* As of gen6, there is no IFF and IF must point to the ENDIF. */
+			if_inst->bits1.branch_gen6.jump_count = br * (endif_inst - if_inst);
+		} else {
+			if_inst->bits3.break_cont.uip = br * (endif_inst - if_inst);
+			if_inst->bits3.break_cont.jip = br * (endif_inst - if_inst);
+		}
+	} else {
+		else_inst->header.execution_size = if_inst->header.execution_size;
+
+		/* Patch IF -> ELSE */
+		if (p->gen < 60) {
+			if_inst->bits3.if_else.jump_count = br * (else_inst - if_inst);
+			if_inst->bits3.if_else.pop_count = 0;
+			if_inst->bits3.if_else.pad0 = 0;
+		} else if (p->gen <= 70) {
+			if_inst->bits1.branch_gen6.jump_count = br * (else_inst - if_inst + 1);
+		}
+
+		/* Patch ELSE -> ENDIF */
+		if (p->gen < 60) {
+			/* BRW_OPCODE_ELSE pre-gen6 should point just past the
+			 * matching ENDIF.
+			 */
+			else_inst->bits3.if_else.jump_count = br*(endif_inst - else_inst + 1);
+			else_inst->bits3.if_else.pop_count = 1;
+			else_inst->bits3.if_else.pad0 = 0;
+		} else if (p->gen < 70) {
+			/* BRW_OPCODE_ELSE on gen6 should point to the matching ENDIF. */
+			else_inst->bits1.branch_gen6.jump_count = br*(endif_inst - else_inst);
+		} else {
+			/* The IF instruction's JIP should point just past the ELSE */
+			if_inst->bits3.break_cont.jip = br * (else_inst - if_inst + 1);
+			/* The IF instruction's UIP and ELSE's JIP should point to ENDIF */
+			if_inst->bits3.break_cont.uip = br * (endif_inst - if_inst);
+			else_inst->bits3.break_cont.jip = br * (endif_inst - else_inst);
+		}
+	}
+}
+
+void
+brw_ELSE(struct brw_compile *p)
+{
+	struct brw_instruction *insn;
+
+	insn = brw_next_insn(p, BRW_OPCODE_ELSE);
+
+	if (p->gen < 60) {
+		brw_set_dest(p, insn, brw_ip_reg());
+		brw_set_src0(p, insn, brw_ip_reg());
+		brw_set_src1(p, insn, brw_imm_d(0x0));
+	} else if (p->gen < 70) {
+		brw_set_dest(p, insn, brw_imm_w(0));
+		insn->bits1.branch_gen6.jump_count = 0;
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, __retype_d(brw_null_reg()));
+	} else {
+		brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, brw_imm_ud(0));
+		insn->bits3.break_cont.jip = 0;
+		insn->bits3.break_cont.uip = 0;
+	}
+
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.mask_control = BRW_MASK_ENABLE;
+	if (!p->single_program_flow)
+		insn->header.thread_control = BRW_THREAD_SWITCH;
+
+	push_if_stack(p, insn);
+}
+
+void
+brw_ENDIF(struct brw_compile *p)
+{
+	struct brw_instruction *insn;
+	struct brw_instruction *else_inst = NULL;
+	struct brw_instruction *if_inst = NULL;
+
+	/* Pop the IF and (optional) ELSE instructions from the stack */
+	p->if_stack_depth--;
+	if (p->if_stack[p->if_stack_depth]->header.opcode == BRW_OPCODE_ELSE) {
+		else_inst = p->if_stack[p->if_stack_depth];
+		p->if_stack_depth--;
+	}
+	if_inst = p->if_stack[p->if_stack_depth];
+
+	if (p->single_program_flow) {
+		/* ENDIF is useless; don't bother emitting it. */
+		convert_IF_ELSE_to_ADD(p, if_inst, else_inst);
+		return;
+	}
+
+	insn = brw_next_insn(p, BRW_OPCODE_ENDIF);
+
+	if (p->gen < 60) {
+		brw_set_dest(p, insn, __retype_ud(brw_vec4_grf(0,0)));
+		brw_set_src0(p, insn, __retype_ud(brw_vec4_grf(0,0)));
+		brw_set_src1(p, insn, brw_imm_d(0x0));
+	} else if (p->gen < 70) {
+		brw_set_dest(p, insn, brw_imm_w(0));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, __retype_d(brw_null_reg()));
+	} else {
+		brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, brw_imm_ud(0));
+	}
+
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.mask_control = BRW_MASK_ENABLE;
+	insn->header.thread_control = BRW_THREAD_SWITCH;
+
+	/* Also pop item off the stack in the endif instruction: */
+	if (p->gen < 60) {
+		insn->bits3.if_else.jump_count = 0;
+		insn->bits3.if_else.pop_count = 1;
+		insn->bits3.if_else.pad0 = 0;
+	} else if (p->gen < 70) {
+		insn->bits1.branch_gen6.jump_count = 2;
+	} else {
+		insn->bits3.break_cont.jip = 2;
+	}
+	patch_IF_ELSE(p, if_inst, else_inst, insn);
+}
+
+struct brw_instruction *brw_BREAK(struct brw_compile *p, int pop_count)
+{
+	struct brw_instruction *insn;
+
+	insn = brw_next_insn(p, BRW_OPCODE_BREAK);
+	if (p->gen >= 60) {
+		brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, brw_imm_d(0x0));
+	} else {
+		brw_set_dest(p, insn, brw_ip_reg());
+		brw_set_src0(p, insn, brw_ip_reg());
+		brw_set_src1(p, insn, brw_imm_d(0x0));
+		insn->bits3.if_else.pad0 = 0;
+		insn->bits3.if_else.pop_count = pop_count;
+	}
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.execution_size = BRW_EXECUTE_8;
+
+	return insn;
+}
+
+struct brw_instruction *gen6_CONT(struct brw_compile *p,
+				  struct brw_instruction *do_insn)
+{
+	struct brw_instruction *insn;
+
+	insn = brw_next_insn(p, BRW_OPCODE_CONTINUE);
+	brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+	brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+	brw_set_dest(p, insn, brw_ip_reg());
+	brw_set_src0(p, insn, brw_ip_reg());
+	brw_set_src1(p, insn, brw_imm_d(0x0));
+
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.execution_size = BRW_EXECUTE_8;
+	return insn;
+}
+
+struct brw_instruction *brw_CONT(struct brw_compile *p, int pop_count)
+{
+	struct brw_instruction *insn;
+	insn = brw_next_insn(p, BRW_OPCODE_CONTINUE);
+	brw_set_dest(p, insn, brw_ip_reg());
+	brw_set_src0(p, insn, brw_ip_reg());
+	brw_set_src1(p, insn, brw_imm_d(0x0));
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.execution_size = BRW_EXECUTE_8;
+	/* insn->header.mask_control = BRW_MASK_DISABLE; */
+	insn->bits3.if_else.pad0 = 0;
+	insn->bits3.if_else.pop_count = pop_count;
+	return insn;
+}
+
+/* DO/WHILE loop:
+ *
+ * The DO/WHILE is just an unterminated loop -- break or continue are
+ * used for control within the loop.  We have a few ways they can be
+ * done.
+ *
+ * For uniform control flow, the WHILE is just a jump, so ADD ip, ip,
+ * jip and no DO instruction.
+ *
+ * For non-uniform control flow pre-gen6, there's a DO instruction to
+ * push the mask, and a WHILE to jump back, and BREAK to get out and
+ * pop the mask.
+ *
+ * For gen6, there's no more mask stack, so no need for DO.  WHILE
+ * just points back to the first instruction of the loop.
+ */
+struct brw_instruction *brw_DO(struct brw_compile *p, unsigned execute_size)
+{
+	if (p->gen >= 60 || p->single_program_flow) {
+		return &p->store[p->nr_insn];
+	} else {
+		struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_DO);
+
+		/* Override the defaults for this instruction:
+		*/
+		brw_set_dest(p, insn, brw_null_reg());
+		brw_set_src0(p, insn, brw_null_reg());
+		brw_set_src1(p, insn, brw_null_reg());
+
+		insn->header.compression_control = BRW_COMPRESSION_NONE;
+		insn->header.execution_size = execute_size;
+		insn->header.predicate_control = BRW_PREDICATE_NONE;
+		/* insn->header.mask_control = BRW_MASK_ENABLE; */
+		/* insn->header.mask_control = BRW_MASK_DISABLE; */
+
+		return insn;
+	}
+}
+
+struct brw_instruction *brw_WHILE(struct brw_compile *p,
+                                  struct brw_instruction *do_insn)
+{
+	struct brw_instruction *insn;
+	unsigned br = 1;
+
+	if (p->gen >= 50)
+		br = 2;
+
+	if (p->gen >= 70) {
+		insn = brw_next_insn(p, BRW_OPCODE_WHILE);
+
+		brw_set_dest(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, brw_imm_ud(0));
+		insn->bits3.break_cont.jip = br * (do_insn - insn);
+
+		insn->header.execution_size = BRW_EXECUTE_8;
+	} else if (p->gen >= 60) {
+		insn = brw_next_insn(p, BRW_OPCODE_WHILE);
+
+		brw_set_dest(p, insn, brw_imm_w(0));
+		insn->bits1.branch_gen6.jump_count = br * (do_insn - insn);
+		brw_set_src0(p, insn, __retype_d(brw_null_reg()));
+		brw_set_src1(p, insn, __retype_d(brw_null_reg()));
+
+		insn->header.execution_size = BRW_EXECUTE_8;
+	} else {
+		if (p->single_program_flow) {
+			insn = brw_next_insn(p, BRW_OPCODE_ADD);
+
+			brw_set_dest(p, insn, brw_ip_reg());
+			brw_set_src0(p, insn, brw_ip_reg());
+			brw_set_src1(p, insn, brw_imm_d((do_insn - insn) * 16));
+			insn->header.execution_size = BRW_EXECUTE_1;
+		} else {
+			insn = brw_next_insn(p, BRW_OPCODE_WHILE);
+
+			assert(do_insn->header.opcode == BRW_OPCODE_DO);
+
+			brw_set_dest(p, insn, brw_ip_reg());
+			brw_set_src0(p, insn, brw_ip_reg());
+			brw_set_src1(p, insn, brw_imm_d(0));
+
+			insn->header.execution_size = do_insn->header.execution_size;
+			insn->bits3.if_else.jump_count = br * (do_insn - insn + 1);
+			insn->bits3.if_else.pop_count = 0;
+			insn->bits3.if_else.pad0 = 0;
+		}
+	}
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	p->current->header.predicate_control = BRW_PREDICATE_NONE;
+
+	return insn;
+}
+
+/* FORWARD JUMPS:
+ */
+void brw_land_fwd_jump(struct brw_compile *p,
+		       struct brw_instruction *jmp_insn)
+{
+	struct brw_instruction *landing = &p->store[p->nr_insn];
+	unsigned jmpi = 1;
+
+	if (p->gen >= 50)
+		jmpi = 2;
+
+	assert(jmp_insn->header.opcode == BRW_OPCODE_JMPI);
+	assert(jmp_insn->bits1.da1.src1_reg_file == BRW_IMMEDIATE_VALUE);
+
+	jmp_insn->bits3.ud = jmpi * ((landing - jmp_insn) - 1);
+}
+
+
+
+/* To integrate with the above, it makes sense that the comparison
+ * instruction should populate the flag register.  It might be simpler
+ * just to use the flag reg for most WM tasks?
+ */
+void brw_CMP(struct brw_compile *p,
+	     struct brw_reg dest,
+	     unsigned conditional,
+	     struct brw_reg src0,
+	     struct brw_reg src1)
+{
+	struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_CMP);
+
+	insn->header.destreg__conditionalmod = conditional;
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, src1);
+
+	/* Make it so that future instructions will use the computed flag
+	 * value until brw_set_predicate_control_flag_value() is called
+	 * again.  
+	 */
+	if (dest.file == BRW_ARCHITECTURE_REGISTER_FILE &&
+	    dest.nr == 0) {
+		p->current->header.predicate_control = BRW_PREDICATE_NORMAL;
+		p->flag_value = 0xff;
+	}
+}
+
+/* Issue 'wait' instruction for n1, host could program MMIO
+   to wake up thread. */
+void brw_WAIT(struct brw_compile *p)
+{
+	struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_WAIT);
+	struct brw_reg src = brw_notification_1_reg();
+
+	brw_set_dest(p, insn, src);
+	brw_set_src0(p, insn, src);
+	brw_set_src1(p, insn, brw_null_reg());
+	insn->header.execution_size = 0; /* must */
+	insn->header.predicate_control = 0;
+	insn->header.compression_control = 0;
+}
+
+/***********************************************************************
+ * Helpers for the various SEND message types:
+ */
+
+/** Extended math function, float[8].
+ */
+void brw_math(struct brw_compile *p,
+	      struct brw_reg dest,
+	      unsigned function,
+	      unsigned saturate,
+	      unsigned msg_reg_nr,
+	      struct brw_reg src,
+	      unsigned data_type,
+	      unsigned precision)
+{
+	if (p->gen >= 60) {
+		struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_MATH);
+
+		assert(dest.file == BRW_GENERAL_REGISTER_FILE);
+		assert(src.file == BRW_GENERAL_REGISTER_FILE);
+
+		assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
+		assert(src.hstride == BRW_HORIZONTAL_STRIDE_1);
+
+		/* Source modifiers are ignored for extended math instructions. */
+		assert(!src.negate);
+		assert(!src.abs);
+
+		if (function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT &&
+		    function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
+			assert(src.type == BRW_REGISTER_TYPE_F);
+		}
+
+		/* Math is the same ISA format as other opcodes, except that CondModifier
+		 * becomes FC[3:0] and ThreadCtrl becomes FC[5:4].
+		 */
+		insn->header.destreg__conditionalmod = function;
+		insn->header.saturate = saturate;
+
+		brw_set_dest(p, insn, dest);
+		brw_set_src0(p, insn, src);
+		brw_set_src1(p, insn, brw_null_reg());
+	} else {
+		struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
+		/* Example code doesn't set predicate_control for send
+		 * instructions.
+		 */
+		insn->header.predicate_control = 0;
+		insn->header.destreg__conditionalmod = msg_reg_nr;
+
+		brw_set_dest(p, insn, dest);
+		brw_set_src0(p, insn, src);
+		brw_set_math_message(p, insn, function,
+				     src.type == BRW_REGISTER_TYPE_D,
+				     precision,
+				     saturate,
+				     data_type);
+	}
+}
+
+/** Extended math function, float[8].
+ */
+void brw_math2(struct brw_compile *p,
+	       struct brw_reg dest,
+	       unsigned function,
+	       struct brw_reg src0,
+	       struct brw_reg src1)
+{
+	struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_MATH);
+
+	assert(dest.file == BRW_GENERAL_REGISTER_FILE);
+	assert(src0.file == BRW_GENERAL_REGISTER_FILE);
+	assert(src1.file == BRW_GENERAL_REGISTER_FILE);
+
+	assert(dest.hstride == BRW_HORIZONTAL_STRIDE_1);
+	assert(src0.hstride == BRW_HORIZONTAL_STRIDE_1);
+	assert(src1.hstride == BRW_HORIZONTAL_STRIDE_1);
+
+	if (function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT &&
+	    function != BRW_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER) {
+		assert(src0.type == BRW_REGISTER_TYPE_F);
+		assert(src1.type == BRW_REGISTER_TYPE_F);
+	}
+
+	/* Source modifiers are ignored for extended math instructions. */
+	assert(!src0.negate);
+	assert(!src0.abs);
+	assert(!src1.negate);
+	assert(!src1.abs);
+
+	/* Math is the same ISA format as other opcodes, except that CondModifier
+	 * becomes FC[3:0] and ThreadCtrl becomes FC[5:4].
+	 */
+	insn->header.destreg__conditionalmod = function;
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, src1);
+}
+
+/**
+ * Extended math function, float[16].
+ * Use 2 send instructions.
+ */
+void brw_math_16(struct brw_compile *p,
+		 struct brw_reg dest,
+		 unsigned function,
+		 unsigned saturate,
+		 unsigned msg_reg_nr,
+		 struct brw_reg src,
+		 unsigned precision)
+{
+	struct brw_instruction *insn;
+
+	if (p->gen >= 60) {
+		insn = brw_next_insn(p, BRW_OPCODE_MATH);
+
+		/* Math is the same ISA format as other opcodes, except that CondModifier
+		 * becomes FC[3:0] and ThreadCtrl becomes FC[5:4].
+		 */
+		insn->header.destreg__conditionalmod = function;
+		insn->header.saturate = saturate;
+
+		/* Source modifiers are ignored for extended math instructions. */
+		assert(!src.negate);
+		assert(!src.abs);
+
+		brw_set_dest(p, insn, dest);
+		brw_set_src0(p, insn, src);
+		brw_set_src1(p, insn, brw_null_reg());
+		return;
+	}
+
+	/* First instruction:
+	*/
+	brw_push_insn_state(p);
+	brw_set_predicate_control_flag_value(p, 0xff);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.destreg__conditionalmod = msg_reg_nr;
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src);
+	brw_set_math_message(p, insn, function,
+			     BRW_MATH_INTEGER_UNSIGNED,
+			     precision,
+			     saturate,
+			     BRW_MATH_DATA_VECTOR);
+
+	/* Second instruction:
+	*/
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.compression_control = BRW_COMPRESSION_2NDHALF;
+	insn->header.destreg__conditionalmod = msg_reg_nr+1;
+
+	brw_set_dest(p, insn, __offset(dest,1));
+	brw_set_src0(p, insn, src);
+	brw_set_math_message(p, insn, function,
+			     BRW_MATH_INTEGER_UNSIGNED,
+			     precision,
+			     saturate,
+			     BRW_MATH_DATA_VECTOR);
+
+	brw_pop_insn_state(p);
+}
+
+/**
+ * Write a block of OWORDs (half a GRF each) from the scratch buffer,
+ * using a constant offset per channel.
+ *
+ * The offset must be aligned to oword size (16 bytes).  Used for
+ * register spilling.
+ */
+void brw_oword_block_write_scratch(struct brw_compile *p,
+				   struct brw_reg mrf,
+				   int num_regs,
+				   unsigned offset)
+{
+	uint32_t msg_control, msg_type;
+	int mlen;
+
+	if (p->gen >= 60)
+		offset /= 16;
+
+	mrf = __retype_ud(mrf);
+
+	if (num_regs == 1) {
+		msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
+		mlen = 2;
+	} else {
+		msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
+		mlen = 3;
+	}
+
+	/* Set up the message header.  This is g0, with g0.2 filled with
+	 * the offset.  We don't want to leave our offset around in g0 or
+	 * it'll screw up texture samples, so set it up inside the message
+	 * reg.
+	 */
+	{
+		brw_push_insn_state(p);
+		brw_set_mask_control(p, BRW_MASK_DISABLE);
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+
+		brw_MOV(p, mrf, __retype_ud(brw_vec8_grf(0, 0)));
+
+		/* set message header global offset field (reg 0, element 2) */
+		brw_MOV(p,
+			__retype_ud(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, mrf.nr, 2)),
+			brw_imm_ud(offset));
+
+		brw_pop_insn_state(p);
+	}
+
+	{
+		struct brw_reg dest;
+		struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
+		int send_commit_msg;
+		struct brw_reg src_header = __retype_uw(brw_vec8_grf(0, 0));
+
+		if (insn->header.compression_control != BRW_COMPRESSION_NONE) {
+			insn->header.compression_control = BRW_COMPRESSION_NONE;
+			src_header = vec16(src_header);
+		}
+		assert(insn->header.predicate_control == BRW_PREDICATE_NONE);
+		insn->header.destreg__conditionalmod = mrf.nr;
+
+		/* Until gen6, writes followed by reads from the same location
+		 * are not guaranteed to be ordered unless write_commit is set.
+		 * If set, then a no-op write is issued to the destination
+		 * register to set a dependency, and a read from the destination
+		 * can be used to ensure the ordering.
+		 *
+		 * For gen6, only writes between different threads need ordering
+		 * protection.  Our use of DP writes is all about register
+		 * spilling within a thread.
+		 */
+		if (p->gen >= 60) {
+			dest = __retype_uw(vec16(brw_null_reg()));
+			send_commit_msg = 0;
+		} else {
+			dest = src_header;
+			send_commit_msg = 1;
+		}
+
+		brw_set_dest(p, insn, dest);
+		if (p->gen >= 60) {
+			brw_set_src0(p, insn, mrf);
+		} else {
+			brw_set_src0(p, insn, brw_null_reg());
+		}
+
+		if (p->gen >= 60)
+			msg_type = GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
+		else
+			msg_type = BRW_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE;
+
+		brw_set_dp_write_message(p,
+					 insn,
+					 255, /* binding table index (255=stateless) */
+					 msg_control,
+					 msg_type,
+					 mlen,
+					 true, /* header_present */
+					 0, /* pixel scoreboard */
+					 send_commit_msg, /* response_length */
+					 0, /* eot */
+					 send_commit_msg);
+	}
+}
+
+
+/**
+ * Read a block of owords (half a GRF each) from the scratch buffer
+ * using a constant index per channel.
+ *
+ * Offset must be aligned to oword size (16 bytes).  Used for register
+ * spilling.
+ */
+void
+brw_oword_block_read_scratch(struct brw_compile *p,
+			     struct brw_reg dest,
+			     struct brw_reg mrf,
+			     int num_regs,
+			     unsigned offset)
+{
+	uint32_t msg_control;
+	int rlen;
+
+	if (p->gen >= 60)
+		offset /= 16;
+
+	mrf = __retype_ud(mrf);
+	dest = __retype_uw(dest);
+
+	if (num_regs == 1) {
+		msg_control = BRW_DATAPORT_OWORD_BLOCK_2_OWORDS;
+		rlen = 1;
+	} else {
+		msg_control = BRW_DATAPORT_OWORD_BLOCK_4_OWORDS;
+		rlen = 2;
+	}
+
+	{
+		brw_push_insn_state(p);
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		brw_set_mask_control(p, BRW_MASK_DISABLE);
+
+		brw_MOV(p, mrf, __retype_ud(brw_vec8_grf(0, 0)));
+
+		/* set message header global offset field (reg 0, element 2) */
+		brw_MOV(p,
+			__retype_ud(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, mrf.nr, 2)),
+			brw_imm_ud(offset));
+
+		brw_pop_insn_state(p);
+	}
+
+	{
+		struct brw_instruction *insn = brw_next_insn(p, BRW_OPCODE_SEND);
+
+		assert(insn->header.predicate_control == 0);
+		insn->header.compression_control = BRW_COMPRESSION_NONE;
+		insn->header.destreg__conditionalmod = mrf.nr;
+
+		brw_set_dest(p, insn, dest); /* UW? */
+		if (p->gen >= 60) {
+			brw_set_src0(p, insn, mrf);
+		} else {
+			brw_set_src0(p, insn, brw_null_reg());
+		}
+
+		brw_set_dp_read_message(p,
+					insn,
+					255, /* binding table index (255=stateless) */
+					msg_control,
+					BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
+					BRW_DATAPORT_READ_TARGET_RENDER_CACHE,
+					1, /* msg_length */
+					rlen);
+	}
+}
+
+/**
+ * Read a float[4] vector from the data port Data Cache (const buffer).
+ * Location (in buffer) should be a multiple of 16.
+ * Used for fetching shader constants.
+ */
+void brw_oword_block_read(struct brw_compile *p,
+			  struct brw_reg dest,
+			  struct brw_reg mrf,
+			  uint32_t offset,
+			  uint32_t bind_table_index)
+{
+	struct brw_instruction *insn;
+
+	/* On newer hardware, offset is in units of owords. */
+	if (p->gen >= 60)
+		offset /= 16;
+
+	mrf = __retype_ud(mrf);
+
+	brw_push_insn_state(p);
+	brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_set_mask_control(p, BRW_MASK_DISABLE);
+
+	brw_MOV(p, mrf, __retype_ud(brw_vec8_grf(0, 0)));
+
+	/* set message header global offset field (reg 0, element 2) */
+	brw_MOV(p,
+		__retype_ud(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, mrf.nr, 2)),
+		brw_imm_ud(offset));
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.destreg__conditionalmod = mrf.nr;
+
+	/* cast dest to a uword[8] vector */
+	dest = __retype_uw(vec8(dest));
+
+	brw_set_dest(p, insn, dest);
+	if (p->gen >= 60) {
+		brw_set_src0(p, insn, mrf);
+	} else {
+		brw_set_src0(p, insn, brw_null_reg());
+	}
+
+	brw_set_dp_read_message(p,
+				insn,
+				bind_table_index,
+				BRW_DATAPORT_OWORD_BLOCK_1_OWORDLOW,
+				BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ,
+				BRW_DATAPORT_READ_TARGET_DATA_CACHE,
+				1, /* msg_length */
+				1); /* response_length (1 reg, 2 owords!) */
+
+	brw_pop_insn_state(p);
+}
+
+/**
+ * Read a set of dwords from the data port Data Cache (const buffer).
+ *
+ * Location (in buffer) appears as UD offsets in the register after
+ * the provided mrf header reg.
+ */
+void brw_dword_scattered_read(struct brw_compile *p,
+			      struct brw_reg dest,
+			      struct brw_reg mrf,
+			      uint32_t bind_table_index)
+{
+	struct brw_instruction *insn;
+
+	mrf = __retype_ud(mrf);
+
+	brw_push_insn_state(p);
+	brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_set_mask_control(p, BRW_MASK_DISABLE);
+	brw_MOV(p, mrf, __retype_ud(brw_vec8_grf(0, 0)));
+	brw_pop_insn_state(p);
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.destreg__conditionalmod = mrf.nr;
+
+	/* cast dest to a uword[8] vector */
+	dest = __retype_uw(vec8(dest));
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, brw_null_reg());
+
+	brw_set_dp_read_message(p,
+				insn,
+				bind_table_index,
+				BRW_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS,
+				BRW_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ,
+				BRW_DATAPORT_READ_TARGET_DATA_CACHE,
+				2, /* msg_length */
+				1); /* response_length */
+}
+
+/**
+ * Read float[4] constant(s) from VS constant buffer.
+ * For relative addressing, two float[4] constants will be read into 'dest'.
+ * Otherwise, one float[4] constant will be read into the lower half of 'dest'.
+ */
+void brw_dp_READ_4_vs(struct brw_compile *p,
+                      struct brw_reg dest,
+                      unsigned location,
+                      unsigned bind_table_index)
+{
+	struct brw_instruction *insn;
+	unsigned msg_reg_nr = 1;
+
+	if (p->gen >= 60)
+		location /= 16;
+
+	/* Setup MRF[1] with location/offset into const buffer */
+	brw_push_insn_state(p);
+	brw_set_access_mode(p, BRW_ALIGN_1);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_set_mask_control(p, BRW_MASK_DISABLE);
+	brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+	brw_MOV(p, __retype_ud(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 2)),
+		brw_imm_ud(location));
+	brw_pop_insn_state(p);
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+
+	insn->header.predicate_control = BRW_PREDICATE_NONE;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.destreg__conditionalmod = msg_reg_nr;
+	insn->header.mask_control = BRW_MASK_DISABLE;
+
+	brw_set_dest(p, insn, dest);
+	if (p->gen >= 60) {
+		brw_set_src0(p, insn, brw_message_reg(msg_reg_nr));
+	} else {
+		brw_set_src0(p, insn, brw_null_reg());
+	}
+
+	brw_set_dp_read_message(p,
+				insn,
+				bind_table_index,
+				0,
+				BRW_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ, /* msg_type */
+				BRW_DATAPORT_READ_TARGET_DATA_CACHE,
+				1, /* msg_length */
+				1); /* response_length (1 Oword) */
+}
+
+/**
+ * Read a float[4] constant per vertex from VS constant buffer, with
+ * relative addressing.
+ */
+void brw_dp_READ_4_vs_relative(struct brw_compile *p,
+			       struct brw_reg dest,
+			       struct brw_reg addr_reg,
+			       unsigned offset,
+			       unsigned bind_table_index)
+{
+	struct brw_reg src = brw_vec8_grf(0, 0);
+	struct brw_instruction *insn;
+	int msg_type;
+
+	/* Setup MRF[1] with offset into const buffer */
+	brw_push_insn_state(p);
+	brw_set_access_mode(p, BRW_ALIGN_1);
+	brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+	brw_set_mask_control(p, BRW_MASK_DISABLE);
+	brw_set_predicate_control(p, BRW_PREDICATE_NONE);
+
+	/* M1.0 is block offset 0, M1.4 is block offset 1, all other
+	 * fields ignored.
+	 */
+	brw_ADD(p, __retype_d(brw_message_reg(1)),
+		addr_reg, brw_imm_d(offset));
+	brw_pop_insn_state(p);
+
+	gen6_resolve_implied_move(p, &src, 0);
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	insn->header.predicate_control = BRW_PREDICATE_NONE;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+	insn->header.destreg__conditionalmod = 0;
+	insn->header.mask_control = BRW_MASK_DISABLE;
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src);
+
+	if (p->gen >= 60)
+		msg_type = GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
+	else if (p->gen >= 45)
+		msg_type = G45_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
+	else
+		msg_type = BRW_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ;
+
+	brw_set_dp_read_message(p,
+				insn,
+				bind_table_index,
+				BRW_DATAPORT_OWORD_DUAL_BLOCK_1OWORD,
+				msg_type,
+				BRW_DATAPORT_READ_TARGET_DATA_CACHE,
+				2, /* msg_length */
+				1); /* response_length */
+}
+
+void brw_fb_WRITE(struct brw_compile *p,
+		  int dispatch_width,
+                  unsigned msg_reg_nr,
+                  struct brw_reg src0,
+                  unsigned msg_control,
+                  unsigned binding_table_index,
+                  unsigned msg_length,
+                  unsigned response_length,
+                  bool eot,
+                  bool header_present)
+{
+	struct brw_instruction *insn;
+	unsigned msg_type;
+	struct brw_reg dest;
+
+	if (dispatch_width == 16)
+		dest = __retype_uw(vec16(brw_null_reg()));
+	else
+		dest = __retype_uw(vec8(brw_null_reg()));
+
+	if (p->gen >= 60 && binding_table_index == 0) {
+		insn = brw_next_insn(p, BRW_OPCODE_SENDC);
+	} else {
+		insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	}
+	/* The execution mask is ignored for render target writes. */
+	insn->header.predicate_control = 0;
+	insn->header.compression_control = BRW_COMPRESSION_NONE;
+
+	if (p->gen >= 60) {
+		/* headerless version, just submit color payload */
+		src0 = brw_message_reg(msg_reg_nr);
+
+		msg_type = GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
+	} else {
+		insn->header.destreg__conditionalmod = msg_reg_nr;
+
+		msg_type = BRW_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE;
+	}
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_dp_write_message(p,
+				 insn,
+				 binding_table_index,
+				 msg_control,
+				 msg_type,
+				 msg_length,
+				 header_present,
+				 eot,
+				 response_length,
+				 eot,
+				 0 /* send_commit_msg */);
+}
+
+/**
+ * Texture sample instruction.
+ * Note: the msg_type plus msg_length values determine exactly what kind
+ * of sampling operation is performed.  See volume 4, page 161 of docs.
+ */
+void brw_SAMPLE(struct brw_compile *p,
+		struct brw_reg dest,
+		unsigned msg_reg_nr,
+		struct brw_reg src0,
+		unsigned binding_table_index,
+		unsigned sampler,
+		unsigned writemask,
+		unsigned msg_type,
+		unsigned response_length,
+		unsigned msg_length,
+		bool header_present,
+		unsigned simd_mode)
+{
+	assert(writemask);
+
+	if (p->gen < 50 || writemask != WRITEMASK_XYZW) {
+		struct brw_reg m1 = brw_message_reg(msg_reg_nr);
+
+		writemask = ~writemask & WRITEMASK_XYZW;
+
+		brw_push_insn_state(p);
+
+		brw_set_compression_control(p, BRW_COMPRESSION_NONE);
+		brw_set_mask_control(p, BRW_MASK_DISABLE);
+
+		brw_MOV(p, __retype_ud(m1), __retype_ud(brw_vec8_grf(0,0)));
+		brw_MOV(p, get_element_ud(m1, 2), brw_imm_ud(writemask << 12));
+
+		brw_pop_insn_state(p);
+
+		src0 = __retype_uw(brw_null_reg());
+	}
+
+	{
+		struct brw_instruction *insn;
+
+		gen6_resolve_implied_move(p, &src0, msg_reg_nr);
+
+		insn = brw_next_insn(p, BRW_OPCODE_SEND);
+		insn->header.predicate_control = 0; /* XXX */
+		insn->header.compression_control = BRW_COMPRESSION_NONE;
+		if (p->gen < 60)
+			insn->header.destreg__conditionalmod = msg_reg_nr;
+
+		brw_set_dest(p, insn, dest);
+		brw_set_src0(p, insn, src0);
+		brw_set_sampler_message(p, insn,
+					binding_table_index,
+					sampler,
+					msg_type,
+					response_length,
+					msg_length,
+					header_present,
+					simd_mode);
+	}
+}
+
+/* All these variables are pretty confusing - we might be better off
+ * using bitmasks and macros for this, in the old style.  Or perhaps
+ * just having the caller instantiate the fields in dword3 itself.
+ */
+void brw_urb_WRITE(struct brw_compile *p,
+		   struct brw_reg dest,
+		   unsigned msg_reg_nr,
+		   struct brw_reg src0,
+		   bool allocate,
+		   bool used,
+		   unsigned msg_length,
+		   unsigned response_length,
+		   bool eot,
+		   bool writes_complete,
+		   unsigned offset,
+		   unsigned swizzle)
+{
+	struct brw_instruction *insn;
+
+	gen6_resolve_implied_move(p, &src0, msg_reg_nr);
+
+	if (p->gen >= 70) {
+		/* Enable Channel Masks in the URB_WRITE_HWORD message header */
+		brw_push_insn_state(p);
+		brw_set_access_mode(p, BRW_ALIGN_1);
+		brw_OR(p, __retype_ud(brw_vec1_reg(BRW_MESSAGE_REGISTER_FILE, msg_reg_nr, 5)),
+		       __retype_ud(brw_vec1_grf(0, 5)),
+		       brw_imm_ud(0xff00));
+		brw_pop_insn_state(p);
+	}
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+
+	assert(msg_length < BRW_MAX_MRF);
+
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, brw_imm_d(0));
+
+	if (p->gen <= 60)
+		insn->header.destreg__conditionalmod = msg_reg_nr;
+
+	brw_set_urb_message(p,
+			    insn,
+			    allocate,
+			    used,
+			    msg_length,
+			    response_length,
+			    eot,
+			    writes_complete,
+			    offset,
+			    swizzle);
+}
+
+static int
+brw_find_next_block_end(struct brw_compile *p, int start)
+{
+	int ip;
+
+	for (ip = start + 1; ip < p->nr_insn; ip++) {
+		struct brw_instruction *insn = &p->store[ip];
+
+		switch (insn->header.opcode) {
+		case BRW_OPCODE_ENDIF:
+		case BRW_OPCODE_ELSE:
+		case BRW_OPCODE_WHILE:
+			return ip;
+		}
+	}
+	assert(!"not reached");
+	return start + 1;
+}
+
+/* There is no DO instruction on gen6, so to find the end of the loop
+ * we have to see if the loop is jumping back before our start
+ * instruction.
+ */
+static int
+brw_find_loop_end(struct brw_compile *p, int start)
+{
+	int ip;
+	int br = 2;
+
+	for (ip = start + 1; ip < p->nr_insn; ip++) {
+		struct brw_instruction *insn = &p->store[ip];
+
+		if (insn->header.opcode == BRW_OPCODE_WHILE) {
+			int jip = p->gen <= 70 ? insn->bits1.branch_gen6.jump_count
+				: insn->bits3.break_cont.jip;
+			if (ip + jip / br <= start)
+				return ip;
+		}
+	}
+	assert(!"not reached");
+	return start + 1;
+}
+
+/* After program generation, go back and update the UIP and JIP of
+ * BREAK and CONT instructions to their correct locations.
+ */
+void
+brw_set_uip_jip(struct brw_compile *p)
+{
+	int ip;
+	int br = 2;
+
+	if (p->gen <= 60)
+		return;
+
+	for (ip = 0; ip < p->nr_insn; ip++) {
+		struct brw_instruction *insn = &p->store[ip];
+
+		switch (insn->header.opcode) {
+		case BRW_OPCODE_BREAK:
+			insn->bits3.break_cont.jip = br * (brw_find_next_block_end(p, ip) - ip);
+			/* Gen7 UIP points to WHILE; Gen6 points just after it */
+			insn->bits3.break_cont.uip =
+				br * (brw_find_loop_end(p, ip) - ip + (p->gen <= 70 ? 1 : 0));
+			break;
+		case BRW_OPCODE_CONTINUE:
+			insn->bits3.break_cont.jip = br * (brw_find_next_block_end(p, ip) - ip);
+			insn->bits3.break_cont.uip = br * (brw_find_loop_end(p, ip) - ip);
+
+			assert(insn->bits3.break_cont.uip != 0);
+			assert(insn->bits3.break_cont.jip != 0);
+			break;
+		}
+	}
+}
+
+void brw_ff_sync(struct brw_compile *p,
+		   struct brw_reg dest,
+		   unsigned msg_reg_nr,
+		   struct brw_reg src0,
+		   bool allocate,
+		   unsigned response_length,
+		   bool eot)
+{
+	struct brw_instruction *insn;
+
+	gen6_resolve_implied_move(p, &src0, msg_reg_nr);
+
+	insn = brw_next_insn(p, BRW_OPCODE_SEND);
+	brw_set_dest(p, insn, dest);
+	brw_set_src0(p, insn, src0);
+	brw_set_src1(p, insn, brw_imm_d(0));
+
+	if (p->gen < 60)
+		insn->header.destreg__conditionalmod = msg_reg_nr;
+
+	brw_set_ff_sync_message(p,
+				insn,
+				allocate,
+				response_length,
+				eot);
+}
diff --git a/src/sna/brw/brw_eu_util.c b/src/sna/brw/brw_eu_util.c
new file mode 100644
index 0000000..5405cf1
--- /dev/null
+++ b/src/sna/brw/brw_eu_util.c
@@ -0,0 +1,126 @@
+/*
+ Copyright (C) Intel Corp.  2006.  All Rights Reserved.
+ Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
+ develop this 3D driver.
+ 
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ 
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial
+ portions of the Software.
+ 
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ 
+ **********************************************************************/
+ /*
+  * Authors:
+  *   Keith Whitwell <keith at tungstengraphics.com>
+  */
+      
+
+#include "brw_context.h"
+#include "brw_defines.h"
+#include "brw_eu.h"
+
+
+void brw_math_invert( struct brw_compile *p, 
+			     struct brw_reg dst,
+			     struct brw_reg src)
+{
+   brw_math( p, 
+	     dst,
+	     BRW_MATH_FUNCTION_INV, 
+	     BRW_MATH_SATURATE_NONE,
+	     0,
+	     src,
+	     BRW_MATH_PRECISION_FULL, 
+	     BRW_MATH_DATA_VECTOR );
+}
+
+
+
+void brw_copy4(struct brw_compile *p,
+	       struct brw_reg dst,
+	       struct brw_reg src,
+	       GLuint count)
+{
+   GLuint i;
+
+   dst = vec4(dst);
+   src = vec4(src);
+
+   for (i = 0; i < count; i++)
+   {
+      GLuint delta = i*32;
+      brw_MOV(p, byte_offset(dst, delta),    byte_offset(src, delta));
+      brw_MOV(p, byte_offset(dst, delta+16), byte_offset(src, delta+16));
+   }
+}
+
+
+void brw_copy8(struct brw_compile *p,
+	       struct brw_reg dst,
+	       struct brw_reg src,
+	       GLuint count)
+{
+   GLuint i;
+
+   dst = vec8(dst);
+   src = vec8(src);
+
+   for (i = 0; i < count; i++)
+   {
+      GLuint delta = i*32;
+      brw_MOV(p, byte_offset(dst, delta),    byte_offset(src, delta));
+   }
+}
+
+
+void brw_copy_indirect_to_indirect(struct brw_compile *p,
+				   struct brw_indirect dst_ptr,
+				   struct brw_indirect src_ptr,
+				   GLuint count)
+{
+   GLuint i;
+
+   for (i = 0; i < count; i++)
+   {
+      GLuint delta = i*32;
+      brw_MOV(p, deref_4f(dst_ptr, delta),    deref_4f(src_ptr, delta));
+      brw_MOV(p, deref_4f(dst_ptr, delta+16), deref_4f(src_ptr, delta+16));
+   }
+}
+
+
+void brw_copy_from_indirect(struct brw_compile *p,
+			    struct brw_reg dst,
+			    struct brw_indirect ptr,
+			    GLuint count)
+{
+   GLuint i;
+
+   dst = vec4(dst);
+
+   for (i = 0; i < count; i++)
+   {
+      GLuint delta = i*32;
+      brw_MOV(p, byte_offset(dst, delta),    deref_4f(ptr, delta));
+      brw_MOV(p, byte_offset(dst, delta+16), deref_4f(ptr, delta+16));
+   }
+}
+
+
+
+
commit ca9d9c02a260bf7930e04bf64e93cc051893c04e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 28 23:08:07 2012 +0100

    sna: Prefer not to create a GPU bo without RENDER acceleration
    
    Unless that bo happens to be used on a render chain to the scanout.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 0db6861..b8099c9 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -558,6 +558,11 @@ static inline bool wedged(struct sna *sna)
 	return unlikely(sna->kgem.wedged);
 }
 
+static inline bool can_render(struct sna *sna)
+{
+	return likely(!sna->kgem.wedged && sna->have_render);
+}
+
 static inline uint32_t pixmap_size(PixmapPtr pixmap)
 {
 	return (pixmap->drawable.height - 1) * pixmap->devKind +
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b8c7c6..f5cfd29 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -931,7 +931,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		goto fallback;
 	}
 
-	if (wedged(sna))
+	if (!can_render(sna))
 		flags = 0;
 
 	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
@@ -957,7 +957,6 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	if (usage == CREATE_PIXMAP_USAGE_BACKING_PIXMAP)
 		usage = 0;
 
-force_create:
 	pad = PixmapBytePad(width, depth);
 	if (pad * height <= 4096) {
 		DBG(("%s: small buffer [%d], attaching to shadow pixmap\n",
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 918b51c..ccf6497 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -188,7 +188,7 @@ bool sna_glyphs_create(struct sna *sna)
 	if (sna->render.white_image == NULL)
 		goto bail;
 
-	if (sna->kgem.wedged || !sna->have_render)
+	if (!can_render(sna))
 		return true;
 
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
@@ -1675,10 +1675,10 @@ sna_glyphs(CARD8 op,
 	if (REGION_NUM_RECTS(dst->pCompositeClip) == 0)
 		return;
 
-	if (FALLBACK || !sna->have_render)
+	if (FALLBACK)
 		goto fallback;
 
-	if (wedged(sna)) {
+	if (!can_render(sna)) {
 		DBG(("%s: wedged\n", __FUNCTION__));
 		goto fallback;
 	}
commit eefbe5b6038424566faf7333bb09764b050dd6b4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 30 10:56:29 2012 +0100

    sna: Debug option to test migration of inactive pixmaps
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d1337cc..1b8c7c6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,6 +60,7 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
+#define USE_INACTIVE 0
 
 #define MIGRATE_ALL 0
 #define DBG_NO_CPU_UPLOAD 0
@@ -2145,7 +2146,8 @@ static inline struct sna_pixmap *
 sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 {
 	assert(priv->gpu_bo);
-	if (!priv->pinned && priv->gpu_bo->proxy == NULL &&
+	if (USE_INACTIVE &&
+	    !priv->pinned && priv->gpu_bo->proxy == NULL &&
 	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->cpu = false;
@@ -2524,7 +2526,8 @@ use_gpu_bo:
 	assert(priv->gpu_bo->proxy == NULL);
 	priv->clear = false;
 	priv->cpu = false;
-	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
+	if (USE_INACTIVE &&
+	    !priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive,
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
@@ -13450,6 +13453,9 @@ static bool sna_accel_do_expire(struct sna *sna)
 
 static bool sna_accel_do_inactive(struct sna *sna)
 {
+	if (!USE_INACTIVE)
+		return false;
+
 	if (sna->timer_active & (1<<(INACTIVE_TIMER))) {
 		int32_t delta = sna->timer_expire[INACTIVE_TIMER] - TIME;
 		if (delta <= 3) {
commit d3499cacb59f19b5a3439a630ffbc3e105a27b75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 23:00:15 2012 +0100

    sna: Assert that we never attempt to submit a batch whilst wedged
    
    We should be asserting at the point that we insert the invalid operation
    into the batch, but asserting upon submitting the batch is a useful
    failsafe.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 515b1ae..635dd24 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2069,6 +2069,7 @@ void _kgem_submit(struct kgem *kgem)
 	int size;
 
 	assert(!DBG_NO_HW);
+	assert(!kgem->wedged);
 
 	assert(kgem->nbatch);
 	assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem));
commit cb4d789f98e6e05ec29e5242887018c7450ddf10
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 22:55:44 2012 +0100

    sna: Disable the warning for a hung GPU is we manually set wedged
    
    Only warn about a hung GPU if we encounter an EIO during operation, but
    don't warn if we set wedged during initialisation based on unsupported
    hw or user request.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 204b40f..515b1ae 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2238,19 +2238,17 @@ void _kgem_submit(struct kgem *kgem)
 
 void kgem_throttle(struct kgem *kgem)
 {
-	static int warned;
+	kgem->need_throttle = 0;
+	if (kgem->wedged)
+		return;
 
-	kgem->wedged |= __kgem_throttle(kgem);
-	DBG(("%s: wedged=%d\n", __FUNCTION__, kgem->wedged));
-	if (kgem->wedged && !warned) {
+	kgem->wedged = __kgem_throttle(kgem);
+	if (kgem->wedged) {
 		xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
 			   "Detected a hung GPU, disabling acceleration.\n");
 		xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
 			   "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
-		warned = 1;
 	}
-
-	kgem->need_throttle = 0;
 }
 
 void kgem_purge_cache(struct kgem *kgem)
commit 0fd680ff52f7ce0a101c617dfb8997c4e228e3ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 29 22:51:26 2012 +0100

    Don't disable acceleration on 830/845g by default
    
    Run the risk of a GPU hang (it shouldn't endanger the entire machine
    normally) and let the user elect to disable it through
    
      Option "NoAccel" "true"
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 8cb098a..0e27c48 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -390,18 +390,9 @@ static Bool can_accelerate_blt(struct intel_screen_private *intel)
 	if (INTEL_INFO(intel)->gen == -1)
 		return FALSE;
 
-	if (0 && (IS_I830(intel) || IS_845G(intel))) {
-		/* These pair of i8xx chipsets have a crippling erratum
-		 * that prevents the use of a PTE entry by the BLT
-		 * engine immediately following updating that
-		 * entry in the GATT.
-		 *
-		 * As the BLT is fundamental to our 2D acceleration,
-		 * and the workaround is lost in the midst of time,
-		 * fallback.
-		 *
-		 * XXX disabled for release as causes regressions in GL.
-		 */
+	if (xf86ReturnOptValBool(intel->Options, OPTION_ACCEL_DISABLE, FALSE)) {
+		xf86DrvMsg(intel->scrn->scrnIndex, X_CONFIG,
+			   "Disabling hardware acceleration.\n");
 		return FALSE;
 	}
 
diff --git a/src/intel_options.c b/src/intel_options.c
index 77832aa..7dbbc7e 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -5,6 +5,7 @@
 #include "intel_options.h"
 
 const OptionInfoRec intel_options[] = {
+	{OPTION_ACCEL_DISABLE,	"NoAccel",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_ACCEL_METHOD,	"AccelMethod",	OPTV_STRING,	{0},	0},
 	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	1},
 	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	0},
diff --git a/src/intel_options.h b/src/intel_options.h
index 233908c..6c16a07 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -11,6 +11,7 @@
  */
 
 enum intel_options {
+	OPTION_ACCEL_DISABLE,
 	OPTION_ACCEL_METHOD,
 	OPTION_DRI,
 	OPTION_VIDEO_KEY,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 19ebdef..204b40f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -652,8 +652,10 @@ static bool is_hw_supported(struct kgem *kgem,
 	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
 		return kgem->has_blt;
 
-	if (kgem->gen <= 20) /* dynamic GTT is fubar */
-		return false;
+	/* Although pre-855gm the GMCH is fubar, it works mostly. So
+	 * let the user decide through "NoAccel" whether or not to risk
+	 * hw acceleration.
+	 */
 
 	if (kgem->gen == 60 && dev->revision < 8) {
 		/* pre-production SNB with dysfunctional BLT */
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 3f2ff2e..bd31996 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -439,7 +439,14 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
+
 	kgem_init(&sna->kgem, fd, sna->PciInfo, sna->info->gen);
+	if (xf86ReturnOptValBool(sna->Options, OPTION_ACCEL_DISABLE, FALSE)) {
+		xf86DrvMsg(sna->scrn->scrnIndex, X_CONFIG,
+			   "Disabling hardware acceleration.\n");
+		sna->kgem.wedged = true;
+	}
+
 	if (!xf86ReturnOptValBool(sna->Options,
 				  OPTION_RELAXED_FENCING,
 				  sna->kgem.has_relaxed_fencing)) {
commit 3d45f0affe263985f440e144203ed7cbb3803696
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 28 18:21:08 2012 +0100

    sna: Honour the Option "DRI"
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52624
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 3bb2315..3f2ff2e 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -508,7 +508,9 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	/* Set display resolution */
 	xf86SetDpi(scrn, 0, 0);
 
-	sna->dri_available = !!xf86LoadSubModule(scrn, "dri2");
+	sna->dri_available = false;
+	if (xf86ReturnOptValBool(sna->Options, OPTION_DRI, TRUE))
+		sna->dri_available = !!xf86LoadSubModule(scrn, "dri2");
 
 	return TRUE;
 }
commit fb385745a2347f8966765567e78229d67ddc8d60
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 27 13:24:04 2012 +0100

    sna/gen4: Move the common vertex_offset==0 check into the flush()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 1a860bd..58d4422 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -338,7 +338,8 @@ static void gen4_magic_ca_pass(struct sna *sna,
 
 static void gen4_vertex_flush(struct sna *sna)
 {
-	assert(sna->render_state.gen4.vertex_offset);
+	if (sna->render_state.gen4.vertex_offset == 0)
+		return;
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen4.vertex_offset,
@@ -359,8 +360,7 @@ static int gen4_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (sna->render_state.gen4.vertex_offset)
-			gen4_vertex_flush(sna);
+		gen4_vertex_flush(sna);
 
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
@@ -1783,8 +1783,7 @@ gen4_render_video(struct sna *sna,
 	}
 	priv->clear = false;
 
-	if (sna->render_state.gen4.vertex_offset)
-		gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna);
 	return true;
 }
 
@@ -2693,8 +2692,7 @@ fastcall static void
 gen4_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
-	if (sna->render_state.gen4.vertex_offset)
-		gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna);
 
 	DBG(("%s()\n", __FUNCTION__));
 
@@ -3072,8 +3070,7 @@ gen4_render_copy_blt(struct sna *sna,
 static void
 gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	if (sna->render_state.gen4.vertex_offset)
-		gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna);
 }
 
 static bool
@@ -3366,8 +3363,7 @@ gen4_render_fill_op_boxes(struct sna *sna,
 static void
 gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	if (sna->render_state.gen4.vertex_offset)
-		gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
commit 73ddd8b0decee444a57f10a11f05deebba686649
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 27 12:43:00 2012 +0100

    sna/gen4: Further refinement to the GT allocation
    
    Still hunting for why gen4 fails utterly.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 25229e1..1a860bd 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -79,8 +79,8 @@
 #define URB_CS_ENTRY_SIZE     1
 #define URB_CS_ENTRIES	      0
 
-#define URB_VS_ENTRY_SIZE     1	// each 512-bit row
-#define URB_VS_ENTRIES	      32	// we needs at least 8 entries
+#define URB_VS_ENTRY_SIZE     1
+#define URB_VS_ENTRIES	      32
 
 #define URB_GS_ENTRY_SIZE     0
 #define URB_GS_ENTRIES	      0
@@ -89,25 +89,24 @@
 #define URB_CLIP_ENTRIES      0
 
 #define URB_SF_ENTRY_SIZE     2
-#define URB_SF_ENTRIES	      8
+#define URB_SF_ENTRIES	      64
 
 /*
  * this program computes dA/dx and dA/dy for the texture coordinates along
  * with the base texture coordinate. It was extracted from the Mesa driver
  */
 
-#define SF_KERNEL_NUM_GRF  16
-
-#define PS_KERNEL_NUM_GRF   32
+#define SF_KERNEL_NUM_GRF 16
+#define PS_KERNEL_NUM_GRF 32
 
 static const struct gt_info {
 	uint32_t max_sf_threads;
 	uint32_t max_wm_threads;
 	uint32_t urb_size;
 } gen4_gt_info = {
-	16, 32, 256,
+	24, 32, 256,
 }, g4x_gt_info = {
-	32, 50, 384,
+	24, 50, 384,
 };
 
 static const uint32_t sf_kernel[][4] = {
@@ -1455,10 +1454,10 @@ gen4_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
 		uint16_t wm_binding_table)
 {
+	gen4_emit_drawing_rectangle(sna, op);
 	gen4_emit_binding_table(sna, wm_binding_table);
 	gen4_emit_pipelined_pointers(sna, op, op->op, op->u.gen4.wm_kernel);
 	gen4_emit_vertex_elements(sna, op);
-	gen4_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__,
commit 717823a9f77cfedc50e03c5c31c1da44a396b561
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Fri Jul 27 18:12:26 2012 +0800

    uxa/dri (glamor): Use exchange buffer in glamor fixup.
    
    The previous implementation is to create a new textured
    pixmap based on the newly created pixmap's buffer object.
    
    This is not efficient, as we already created it when we
    call CreatePixmap. We can just exchange the underlying
    texture/image buffers by calling intel_glamor_exchange_buffers().
    
    And this commit seems also fix a weird rendering problem
    when working with compiz/mutter.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index d027a64..fa1660c 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -110,6 +110,7 @@ static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 {
 	ScreenPtr screen = drawable->pScreen;
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
 	PixmapPtr old = get_drawable_pixmap(drawable);
 	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
 	GCPtr gc;
@@ -139,28 +140,20 @@ static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 	}
 
 	intel_set_pixmap_private(pixmap, NULL);
-	screen->DestroyPixmap(pixmap);
 
+	/* Exchange the underlying texture/image. */
+	intel_glamor_exchange_buffers(intel, old, pixmap);
 	/* And redirect the pixmap to the new bo (for 3D). */
 	intel_set_pixmap_private(old, priv);
 	old->refcnt++;
 
-	/* This creating should not fail, as we already created its
-	 * successfully. But if it happens, we put a warning indicator
-	 * here, and the old pixmap will still be a glamor pixmap, and
-	 * latter the pixmap_flink will get a 0 name, then the X server
-	 * will pass a BadAlloc to the client.*/
-	if (!intel_glamor_create_textured_pixmap(old))
-		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
-			   "Failed to get DRI drawable for glamor pixmap.\n");
-
 	screen->ModifyPixmapHeader(old,
 				   drawable->width,
 				   drawable->height,
 				   0, 0,
 				   priv->stride,
 				   NULL);
-
+	screen->DestroyPixmap(pixmap);
 	intel_get_screen_private(xf86ScreenToScrn(screen))->needs_flush = TRUE;
 	return old;
 }
@@ -194,10 +187,9 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 		if (attachments[i] == DRI2BufferFrontLeft) {
 			pixmap = get_front_buffer(drawable);
 
-			if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
+			if (pixmap == NULL) {
+				drawable = &(get_drawable_pixmap(drawable)->drawable);
 				is_glamor_pixmap = TRUE;
-				drawable = &pixmap->drawable;
-				pixmap = NULL;
 			}
 		} else if (attachments[i] == DRI2BufferStencil && pDepthPixmap) {
 			pixmap = pDepthPixmap;
@@ -237,7 +229,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 				goto unwind;
 			}
 
-			if (attachment == DRI2BufferFrontLeft)
+			if (is_glamor_pixmap)
 				pixmap = fixup_glamor(drawable, pixmap);
 		}
 
@@ -314,10 +306,9 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 	if (attachment == DRI2BufferFrontLeft) {
 		pixmap = get_front_buffer(drawable);
 
-		if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
+		if (pixmap == NULL) {
+			drawable = &(get_drawable_pixmap(drawable)->drawable);
 			is_glamor_pixmap = TRUE;
-			drawable = &pixmap->drawable;
-			pixmap = NULL;
 		}
 	}
 
@@ -391,7 +382,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 			free(buffer);
 			return NULL;
 		}
-		if (attachment == DRI2BufferFrontLeft)
+		if (is_glamor_pixmap)
 			pixmap = fixup_glamor(drawable, pixmap);
 	}
 
commit a8ee1406244d8b8399bf933d6b61bfd14374b5f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 27 09:07:16 2012 +0100

    2.20.2 release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 08340f8..7e267a6 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,23 @@
+Release 2.20.2 (2012-07-27)
+===========================
+For the last 9 months, since 2.16.901, we have been shipping a driver that
+does not work on IvyBridge GT1 systems (HD2500 graphics); we were telling
+the GPU to use an invalid number of threads for the pixel shader and this
+in turned caused the GPU to hang.
+
+Also fixed since the last release just a few days ago:
+
+* Support for the gmux backlight controller on Apple laptops
+  https://bugs.freedesktop.org/show_bug.cgi?id=52423
+
+* Fix X -configure not to list this driver as matching any Intel device,
+  just the VGA class devices will do!
+
+* A crash in SNA when repeatedly switching xrandr rotations
+
+* Corruption in SNA observed in kwin on IvyBridge
+  https://bugs.freedesktop.org/show_bug.cgi?id=52473
+
 Release 2.20.1 (2012-07-22)
 ===========================
 A week in, grab the brown paper bags, for it is time to reveal a couple
diff --git a/configure.ac b/configure.ac
index 3cdacdd..9945d5b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.1],
+        [2.20.2],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit bef73cd9279be3438e467981db39c67bc13104f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 21:54:33 2012 +0100

    sna/dri: Select the engine before emitting the wait
    
    So that if we have a flexible WAIT_FOR_EVENT that can go on either
    pipeline, we can choose our preferred pipeline for DRI.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 1daf1c4..8d6c305 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -546,6 +546,14 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		}
 	}
 
+	if (!wedged(sna)) {
+		if (sync)
+			sync = sna_pixmap_is_scanout(sna, pixmap);
+
+		sna_dri_select_mode(sna, src_bo, sync);
+	} else
+		sync = false;
+
 	dx = dy = 0;
 	if (draw->type != DRAWABLE_PIXMAP) {
 		WindowPtr win = (WindowPtr)draw;
@@ -569,7 +577,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 			region = &clip;
 		}
 
-		if (sync && sna_pixmap_is_scanout(sna, pixmap)) {
+		if (sync) {
 			crtc = sna_covering_crtc(sna->scrn, &clip.extents, NULL);
 			if (crtc)
 				flush = sna_wait_for_scanline(sna, pixmap, crtc,
@@ -595,8 +603,6 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 				      dst_bo, dx, dy,
 				      boxes, n);
 	} else {
-		sna_dri_select_mode(sna, src_bo, flush);
-
 		sna->render.copy_boxes(sna, GXcopy,
 				       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
 				       pixmap, dst_bo, dx, dy,
commit 1ced4f1ddcf30b518e1760c7aa4a5ed4f934b9f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 10:50:31 2012 +0100

    Reduce maximum thread count for IVB GT1 to avoid spontaneous combustion
    
    Somewhere along the way it seems that IVB GT1 was reduced to only allow
    a maximum of 48 threads, as revealed in the lastest bspecs.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52473
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index 9d45944..2182df8 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2703,7 +2703,7 @@ gen7_composite_wm_state(intel_screen_private *intel,
 	OUT_BATCH((1 << GEN7_PS_SAMPLER_COUNT_SHIFT) |
 		  (num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT));
 	OUT_BATCH(0); /* scratch space base offset */
-	OUT_BATCH(((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
+	OUT_BATCH(((48 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
 		  GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH((6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0));
diff --git a/src/i965_video.c b/src/i965_video.c
index d9350ce..bcd6063 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -1658,7 +1658,7 @@ gen7_upload_wm_state(ScrnInfoPtr scrn, Bool is_packed)
 
 	OUT_BATCH(0); /* scratch space base offset */
 	OUT_BATCH(
-		((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
+		((48 - 1) << GEN7_PS_MAX_THREADS_SHIFT) |
 		GEN7_PS_ATTRIBUTE_ENABLE |
 		GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH(
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 167a5e6..d06b791 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -77,7 +77,7 @@ struct gt_info {
 static const struct gt_info gt1_info = {
 	.max_vs_threads = 36,
 	.max_gs_threads = 36,
-	.max_wm_threads = (86-1) << GEN7_PS_MAX_THREADS_SHIFT,
+	.max_wm_threads = (48-1) << GEN7_PS_MAX_THREADS_SHIFT,
 	.urb = { 128, 512, 192 },
 };
 
commit 8f8f8759111f791ee99adfd87296443fb0e6acad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 17:39:34 2012 +0100

    sna/gen4: Tweak heuristics for render/blt usage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d62d744..25229e1 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1125,7 +1125,7 @@ static bool gen4_rectangle_begin(struct sna *sna,
 static int gen4_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	if (!kgem_check_batch(&sna->kgem, 25))
+	if (!kgem_check_batch(&sna->kgem, (FLUSH_EVERY_VERTEX || op->need_magic_ca_pass) ? 25 : 6))
 		return 0;
 	if (!kgem_check_reloc_and_exec(&sna->kgem, 1))
 		return 0;
@@ -1145,9 +1145,9 @@ inline static int gen4_get_rectangles(struct sna *sna,
 
 start:
 	rem = vertex_space(sna);
-	if (rem < 3*op->floats_per_vertex) {
+	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
-		     __FUNCTION__, rem, 3*op->floats_per_vertex));
+		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen4_get_rectangles__flush(sna, op);
 		if (unlikely(rem == 0))
 			goto flush;
@@ -1157,8 +1157,8 @@ start:
 		     !gen4_rectangle_begin(sna, op)))
 		goto flush;
 
-	if (want > 1 && want * op->floats_per_vertex*3 > rem)
-		want = rem / (3*op->floats_per_vertex);
+	if (want > 1 && want * op->floats_per_rect > rem)
+		want = rem / op->floats_per_rect;
 
 	sna->render.vertex_index += 3*want;
 	return want;
@@ -1173,8 +1173,9 @@ flush:
 	goto start;
 }
 
-static uint32_t *gen4_composite_get_binding_table(struct sna *sna,
-						  uint16_t *offset)
+static uint32_t *
+gen4_composite_get_binding_table(struct sna *sna,
+				 uint16_t *offset)
 {
 	sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
@@ -1288,7 +1289,7 @@ static void
 gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
-		if (sna->render.vertex_size - sna->render.vertex_used < 6*op->floats_per_vertex)
+		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
 			gen4_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
@@ -1335,8 +1336,8 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
 
 	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
-			      op->mask.filter, op->mask.repeat,
-			      kernel);
+			    op->mask.filter, op->mask.repeat,
+			    kernel);
 	bp = gen4_get_blend(blend, op->has_component_alpha, op->dst.format);
 
 	key = op->mask.bo != NULL;
@@ -1371,7 +1372,7 @@ gen4_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op)
 	sna->render_state.gen4.drawrect_limit = limit;
 
 	OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
-	OUT_BATCH(0x00000000);
+	OUT_BATCH(0);
 	OUT_BATCH(limit);
 	OUT_BATCH(offset);
 }
@@ -1713,11 +1714,14 @@ gen4_render_video(struct sna *sna,
 
 	tmp.src.filter = SAMPLER_FILTER_BILINEAR;
 	tmp.src.repeat = SAMPLER_EXTEND_PAD;
+	tmp.src.bo = frame->bo;
+	tmp.mask.bo = NULL;
 	tmp.u.gen4.wm_kernel =
 		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
+	tmp.u.gen4.ve_id = 1;
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
-	tmp.u.gen4.ve_id = 1;
+	tmp.floats_per_rect = 9;
 	tmp.priv = frame;
 
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
@@ -2078,13 +2082,7 @@ picture_is_cpu(PicturePtr picture)
 	if (!picture->pDrawable)
 		return false;
 
-	/* If it is a solid, try to use the render paths */
-	if (picture->pDrawable->width == 1 &&
-	    picture->pDrawable->height == 1 &&
-	    picture->repeat)
-		return false;
-
-	return is_cpu(picture->pDrawable);
+	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
 static inline bool prefer_blt(struct sna *sna)
@@ -2099,7 +2097,7 @@ static inline bool prefer_blt(struct sna *sna)
 
 static bool
 try_blt(struct sna *sna,
-	PicturePtr source,
+	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (prefer_blt(sna)) {
@@ -2113,8 +2111,15 @@ try_blt(struct sna *sna,
 		return true;
 	}
 
+	if (too_large(dst->pDrawable->width, dst->pDrawable->height))
+		return true;
+
+	/* The blitter is much faster for solids */
+	if (sna_picture_is_solid(src, NULL))
+		return true;
+
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
-	return picture_is_cpu(source);
+	return picture_is_cpu(src);
 }
 
 static bool
@@ -2144,7 +2149,7 @@ untransformed(PicturePtr p)
 static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
+	return p->pDrawable && untransformed(p) && is_cpu(p->pDrawable);
 }
 
 static bool
@@ -2234,12 +2239,12 @@ gen4_composite_fallback(struct sna *sna,
 		return false;
 	}
 
-	if (!src_fallback) {
+	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return false;
 	}
-	if (mask && !mask_fallback) {
+	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return false;
@@ -2255,7 +2260,7 @@ gen4_composite_fallback(struct sna *sna,
 		return true;
 	}
 
-	if (mask && mask_fallback) {
+	if (mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return true;
@@ -2358,7 +2363,7 @@ gen4_render_composite(struct sna *sna,
 #endif
 
 	if (mask == NULL &&
-	    try_blt(sna, src, width, height) &&
+	    try_blt(sna, dst, src, width, height) &&
 	    sna_blt_composite(sna, op,
 			      src, dst,
 			      src_x, src_y,
@@ -2464,11 +2469,9 @@ gen4_render_composite(struct sna *sna,
 		else if (tmp->src.is_affine)
 			tmp->prim_emit = gen4_emit_composite_primitive_affine_source;
 
-		tmp->mask.filter = SAMPLER_FILTER_NEAREST;
-		tmp->mask.repeat = SAMPLER_EXTEND_NONE;
-
 		tmp->floats_per_vertex = 3 + !tmp->is_affine;
 	}
+	tmp->floats_per_rect = 3*tmp->floats_per_vertex;
 
 	tmp->u.gen4.wm_kernel =
 		gen4_choose_composite_kernel(tmp->op,
@@ -2705,7 +2708,7 @@ gen4_render_composite_spans_done(struct sna *sna,
 static bool
 gen4_check_composite_spans(struct sna *sna,
 			   uint8_t op, PicturePtr src, PicturePtr dst,
-			   int16_t width,  int16_t height,
+			   int16_t width, int16_t height,
 			   unsigned flags)
 {
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
@@ -2717,7 +2720,7 @@ gen4_check_composite_spans(struct sna *sna,
 	if (gen4_composite_fallback(sna, src, NULL, dst))
 		return false;
 
-	if (!is_gpu(dst->pDrawable))
+	if (need_tiling(sna, width, height) && !is_gpu(dst->pDrawable))
 		return false;
 
 	return true;
@@ -2999,9 +3002,8 @@ fallback_blt:
 					       extents.x1 + src_dx,
 					       extents.y1 + src_dy,
 					       extents.x2 - extents.x1,
-					       extents.y2 - extents.y1)) {
+					       extents.y2 - extents.y1))
 			goto fallback_tiled_dst;
-		}
 	} else {
 		tmp.src.bo = kgem_bo_reference(src_bo);
 		tmp.src.width  = src->drawable.width;
@@ -3011,10 +3013,9 @@ fallback_blt:
 		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
-	tmp.mask.bo = NULL;
-
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
 
@@ -3041,6 +3042,8 @@ fallback_blt:
 				     box->x1 + dst_dx, box->y1 + dst_dy);
 		box++;
 	} while (--n);
+
+	gen4_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return true;
@@ -3115,6 +3118,7 @@ fallback:
 				    dst->drawable.bitsPerPixel,
 				    op);
 	}
+
 	if (dst->drawable.depth == src->drawable.depth) {
 		op->base.dst.format = sna_render_format_for_depth(dst->drawable.depth);
 		op->base.src.pict_format = op->base.dst.format;
@@ -3142,10 +3146,9 @@ fallback:
 	op->base.src.filter = SAMPLER_FILTER_NEAREST;
 	op->base.src.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.mask.bo = NULL;
-
 	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
+	op->base.floats_per_rect = 9;
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
 	op->base.u.gen4.ve_id = 1;
 
@@ -3155,6 +3158,15 @@ fallback:
 			goto fallback;
 	}
 
+	if (kgem_bo_is_dirty(src_bo)) {
+		if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+		    sna_blt_copy(sna, alu,
+				 src_bo, dst_bo,
+				 dst->drawable.bitsPerPixel,
+				 op))
+			return true;
+	}
+
 	gen4_copy_bind_surfaces(sna, &op->base);
 	gen4_align_vertex(sna, &op->base);
 
@@ -3296,6 +3308,7 @@ gen4_render_fill_boxes(struct sna *sna,
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
 
@@ -3315,6 +3328,7 @@ gen4_render_fill_boxes(struct sna *sna,
 		box++;
 	} while (--n);
 
+	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return true;
 }
@@ -3397,6 +3411,9 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.dst.bo = dst_bo;
 	op->base.dst.x = op->base.dst.y = 0;
 
+	op->base.need_magic_ca_pass = 0;
+	op->base.has_component_alpha = 0;
+
 	op->base.src.bo =
 		sna_render_get_solid(sna,
 				     sna_rgba_for_color(color,
@@ -3410,8 +3427,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 
 	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
-	op->base.need_magic_ca_pass = 0;
-	op->base.has_component_alpha = 0;
+	op->base.floats_per_rect = 9;
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
 	op->base.u.gen4.ve_id = 1;
 
@@ -3498,6 +3514,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
+	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
 	tmp.need_magic_ca_pass = false;
 
@@ -3514,8 +3531,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen4_render_fill_rectangle(sna, &tmp, x1, y1, x2 - x1, y2 - y1);
 
-	if (sna->render_state.gen4.vertex_offset)
-		gen4_vertex_flush(sna);
+	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
 	return true;
@@ -3538,6 +3554,31 @@ discard_vbo(struct sna *sna)
 	sna->render.vertex_index = 0;
 }
 
+static void
+gen4_render_retire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
+static void
+gen4_render_expire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (sna->render.vbo && !sna->render.vertex_used) {
+		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		discard_vbo(sna);
+	}
+}
+
 static void gen4_render_reset(struct sna *sna)
 {
 	sna->render_state.gen4.needs_invariant = true;
@@ -3807,6 +3848,9 @@ bool gen4_render_init(struct sna *sna)
 	if (!gen4_render_setup(sna))
 		return false;
 
+	sna->kgem.retire = gen4_render_retire;
+	sna->kgem.expire = gen4_render_expire;
+
 	sna->render.composite = gen4_render_composite;
 #if !NO_COMPOSITE_SPANS
 	sna->render.check_composite_spans = gen4_check_composite_spans;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index db7eb7b..3af9097 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2096,8 +2096,6 @@ picture_is_cpu(PicturePtr picture)
 	if (!picture->pDrawable)
 		return false;
 
-	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
-		return true;
 
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
@@ -2731,7 +2729,8 @@ gen5_render_composite_spans_done(struct sna *sna,
 static bool
 gen5_check_composite_spans(struct sna *sna,
 			   uint8_t op, PicturePtr src, PicturePtr dst,
-			   int16_t width, int16_t height, unsigned flags)
+			   int16_t width, int16_t height,
+			   unsigned flags)
 {
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
 		return false;
commit c9dd1401615f0ed9492a0c0f547fb37150e013d1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 16:31:16 2012 +0100

    sna/gen4: Bump thread counts
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index bc37615..d62d744 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -80,7 +80,7 @@
 #define URB_CS_ENTRIES	      0
 
 #define URB_VS_ENTRY_SIZE     1	// each 512-bit row
-#define URB_VS_ENTRIES	      8	// we needs at least 8 entries
+#define URB_VS_ENTRIES	      32	// we needs at least 8 entries
 
 #define URB_GS_ENTRY_SIZE     0
 #define URB_GS_ENTRIES	      0
@@ -89,7 +89,7 @@
 #define URB_CLIP_ENTRIES      0
 
 #define URB_SF_ENTRY_SIZE     2
-#define URB_SF_ENTRIES	      1
+#define URB_SF_ENTRIES	      8
 
 /*
  * this program computes dA/dx and dA/dy for the texture coordinates along
@@ -97,10 +97,18 @@
  */
 
 #define SF_KERNEL_NUM_GRF  16
-#define SF_MAX_THREADS	   2
 
 #define PS_KERNEL_NUM_GRF   32
-#define PS_MAX_THREADS	    48
+
+static const struct gt_info {
+	uint32_t max_sf_threads;
+	uint32_t max_wm_threads;
+	uint32_t urb_size;
+} gen4_gt_info = {
+	16, 32, 256,
+}, g4x_gt_info = {
+	32, 50, 384,
+};
 
 static const uint32_t sf_kernel[][4] = {
 #include "exa_sf.g4b"
@@ -3569,34 +3577,35 @@ static uint32_t gen4_create_vs_unit_state(struct sna_static_stream *stream)
 }
 
 static uint32_t gen4_create_sf_state(struct sna_static_stream *stream,
+				     const struct gt_info *info,
 				     uint32_t kernel)
 {
-	struct gen4_sf_unit_state *sf_state;
+	struct gen4_sf_unit_state *sf;
 
-	sf_state = sna_static_stream_map(stream, sizeof(*sf_state), 32);
+	sf = sna_static_stream_map(stream, sizeof(*sf), 32);
 
-	sf_state->thread0.grf_reg_count = GEN4_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
-	sf_state->thread0.kernel_start_pointer = kernel >> 6;
-	sf_state->sf1.single_program_flow = 1;
+	sf->thread0.grf_reg_count = GEN4_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
+	sf->thread0.kernel_start_pointer = kernel >> 6;
+	sf->sf1.single_program_flow = 1;
 	/* scratch space is not used in our kernel */
-	sf_state->thread2.scratch_space_base_pointer = 0;
-	sf_state->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
-	sf_state->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
-	sf_state->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
+	sf->thread2.scratch_space_base_pointer = 0;
+	sf->thread3.const_urb_entry_read_length = 0;	/* no const URBs */
+	sf->thread3.const_urb_entry_read_offset = 0;	/* no const URBs */
+	sf->thread3.urb_entry_read_length = 1;	/* 1 URB per vertex */
 	/* don't smash vertex header, read start from dw8 */
-	sf_state->thread3.urb_entry_read_offset = 1;
-	sf_state->thread3.dispatch_grf_start_reg = 3;
-	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
-	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
-	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->sf5.viewport_transform = false;	/* skip viewport */
-	sf_state->sf6.cull_mode = GEN4_CULLMODE_NONE;
-	sf_state->sf6.scissor = 0;
-	sf_state->sf7.trifan_pv = 2;
-	sf_state->sf6.dest_org_vbias = 0x8;
-	sf_state->sf6.dest_org_hbias = 0x8;
+	sf->thread3.urb_entry_read_offset = 1;
+	sf->thread3.dispatch_grf_start_reg = 3;
+	sf->thread4.max_threads = info->max_sf_threads - 1;
+	sf->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
+	sf->thread4.nr_urb_entries = URB_SF_ENTRIES;
+	sf->sf5.viewport_transform = false;	/* skip viewport */
+	sf->sf6.cull_mode = GEN4_CULLMODE_NONE;
+	sf->sf6.scissor = 0;
+	sf->sf7.trifan_pv = 2;
+	sf->sf6.dest_org_vbias = 0x8;
+	sf->sf6.dest_org_hbias = 0x8;
 
-	return sna_static_stream_offsetof(stream, sf_state);
+	return sna_static_stream_offsetof(stream, sf);
 }
 
 static uint32_t gen4_create_sampler_state(struct sna_static_stream *stream,
@@ -3616,47 +3625,48 @@ static uint32_t gen4_create_sampler_state(struct sna_static_stream *stream,
 	return sna_static_stream_offsetof(stream, sampler_state);
 }
 
-static void gen4_init_wm_state(struct gen4_wm_unit_state *state,
+static void gen4_init_wm_state(struct gen4_wm_unit_state *wm,
+			       const struct gt_info *info,
 			       bool has_mask,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
-	state->thread0.grf_reg_count = GEN4_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
-	state->thread0.kernel_start_pointer = kernel >> 6;
+	wm->thread0.grf_reg_count = GEN4_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
+	wm->thread0.kernel_start_pointer = kernel >> 6;
 
-	state->thread1.single_program_flow = 0;
+	wm->thread1.single_program_flow = 0;
 
 	/* scratch space is not used in our kernel */
-	state->thread2.scratch_space_base_pointer = 0;
-	state->thread2.per_thread_scratch_space = 0;
+	wm->thread2.scratch_space_base_pointer = 0;
+	wm->thread2.per_thread_scratch_space = 0;
 
-	state->thread3.const_urb_entry_read_length = 0;
-	state->thread3.const_urb_entry_read_offset = 0;
+	wm->thread3.const_urb_entry_read_length = 0;
+	wm->thread3.const_urb_entry_read_offset = 0;
 
-	state->thread3.urb_entry_read_offset = 0;
+	wm->thread3.urb_entry_read_offset = 0;
 	/* wm kernel use urb from 3, see wm_program in compiler module */
-	state->thread3.dispatch_grf_start_reg = 3;	/* must match kernel */
+	wm->thread3.dispatch_grf_start_reg = 3;	/* must match kernel */
 
-	state->wm4.sampler_count = 1;	/* 1-4 samplers */
+	wm->wm4.sampler_count = 1;	/* 1-4 samplers */
 
-	state->wm4.sampler_state_pointer = sampler >> 5;
-	state->wm5.max_threads = PS_MAX_THREADS - 1;
-	state->wm5.transposed_urb_read = 0;
-	state->wm5.thread_dispatch_enable = 1;
+	wm->wm4.sampler_state_pointer = sampler >> 5;
+	wm->wm5.max_threads = info->max_wm_threads - 1;
+	wm->wm5.transposed_urb_read = 0;
+	wm->wm5.thread_dispatch_enable = 1;
 	/* just use 16-pixel dispatch (4 subspans), don't need to change kernel
 	 * start point
 	 */
-	state->wm5.enable_16_pix = 1;
-	state->wm5.enable_8_pix = 0;
-	state->wm5.early_depth_test = 1;
+	wm->wm5.enable_16_pix = 1;
+	wm->wm5.enable_8_pix = 0;
+	wm->wm5.early_depth_test = 1;
 
 	/* Each pair of attributes (src/mask coords) is two URB entries */
 	if (has_mask) {
-		state->thread1.binding_table_entry_count = 3;	/* 2 tex and fb */
-		state->thread3.urb_entry_read_length = 4;
+		wm->thread1.binding_table_entry_count = 3;	/* 2 tex and fb */
+		wm->thread3.urb_entry_read_length = 4;
 	} else {
-		state->thread1.binding_table_entry_count = 2;	/* 1 tex and fb */
-		state->thread3.urb_entry_read_length = 2;
+		wm->thread1.binding_table_entry_count = 2;	/* 1 tex and fb */
+		wm->thread3.urb_entry_read_length = 2;
 	}
 }
 
@@ -3716,9 +3726,15 @@ static bool gen4_render_setup(struct sna *sna)
 	struct gen4_render_state *state = &sna->render_state.gen4;
 	struct sna_static_stream general;
 	struct gen4_wm_unit_state_padded *wm_state;
+	const struct gt_info *info;
 	uint32_t sf[2], wm[KERNEL_COUNT];
 	int i, j, k, l, m;
 
+	if (sna->kgem.gen == 45)
+		info = &g4x_gt_info;
+	else
+		info = &gen4_gt_info;
+
 	sna_static_stream_init(&general);
 
 	/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
@@ -3744,8 +3760,8 @@ static bool gen4_render_setup(struct sna *sna)
 
 	state->vs = gen4_create_vs_unit_state(&general);
 
-	state->sf[0] = gen4_create_sf_state(&general, sf[0]);
-	state->sf[1] = gen4_create_sf_state(&general, sf[1]);
+	state->sf[0] = gen4_create_sf_state(&general, info, sf[0]);
+	state->sf[1] = gen4_create_sf_state(&general, info, sf[1]);
 
 
 	/* Set up the WM states: each filter/extend type for source and mask, per
@@ -3769,7 +3785,7 @@ static bool gen4_render_setup(struct sna *sna)
 									  k, l);
 
 					for (m = 0; m < KERNEL_COUNT; m++) {
-						gen4_init_wm_state(&wm_state->state,
+						gen4_init_wm_state(&wm_state->state, info,
 								   wm_kernels[m].has_mask,
 								   wm[m],
 								   sampler_state);
commit 7f3fdef98c1ab2fa27439c3be9810b7a934017ce
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 15:39:05 2012 +0100

    sna/gen7: IVB requires a complete pipeline stall when changing blend modes
    
    Similar to how SandyBridge behaves, I had hoped that with IvyBridge they
    would have made the pipelined operation actually pipelined, but alas.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52473
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index cf56e42..167a5e6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1031,7 +1031,7 @@ gen7_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
 		uint16_t wm_binding_table)
 {
-	bool need_stall = false;
+	bool need_stall;
 
 	if (sna->render_state.gen7.emit_flush)
 		gen7_emit_pipe_flush(sna);
@@ -1042,7 +1042,10 @@ gen7_emit_state(struct sna *sna,
 	gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
 	gen7_emit_vertex_elements(sna, op);
 
-	need_stall |= gen7_emit_binding_table(sna, wm_binding_table);
+	need_stall = false;
+	if (wm_binding_table & 1)
+		need_stall = GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
+	need_stall |= gen7_emit_binding_table(sna, wm_binding_table & ~1);
 	need_stall &= gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
@@ -1787,8 +1790,10 @@ static void gen7_emit_composite_state(struct sna *sna,
 {
 	uint32_t *binding_table;
 	uint16_t offset;
+	bool dirty;
 
 	gen7_get_batch(sna);
+	dirty = kgem_bo_is_dirty(op->dst.bo);
 
 	binding_table = gen7_composite_get_binding_table(sna, &offset);
 
@@ -1820,7 +1825,7 @@ static void gen7_emit_composite_state(struct sna *sna,
 		offset = sna->render_state.gen7.surface_table;
 	}
 
-	gen7_emit_state(sna, op, offset);
+	gen7_emit_state(sna, op, offset | dirty);
 }
 
 static void
@@ -3329,6 +3334,7 @@ gen7_emit_copy_state(struct sna *sna,
 		offset = sna->render_state.gen7.surface_table;
 	}
 
+	assert(GEN7_BLEND(op->u.gen7.flags) == NO_BLEND);
 	gen7_emit_state(sna, op, offset);
 }
 
@@ -3705,6 +3711,7 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 {
 	uint32_t *binding_table;
 	uint16_t offset;
+	bool dirty;
 
 	/* XXX Render Target Fast Clear
 	 * Set RTFC Enable in PS and render a rectangle.
@@ -3713,6 +3720,7 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 	 */
 
 	gen7_get_batch(sna);
+	dirty = kgem_bo_is_dirty(op->dst.bo);
 
 	binding_table = gen7_composite_get_binding_table(sna, &offset);
 
@@ -3734,7 +3742,7 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		offset = sna->render_state.gen7.surface_table;
 	}
 
-	gen7_emit_state(sna, op, offset);
+	gen7_emit_state(sna, op, offset | dirty);
 }
 
 static inline bool prefer_blt_fill(struct sna *sna,
commit 0938b3df8c25178c8ea0012e1ead1061d03a4e7c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 15:21:59 2012 +0100

    sna/dri: Add an explanatory assertion
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index d647c02..1daf1c4 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -397,6 +397,7 @@ static void damage(PixmapPtr pixmap, RegionPtr region)
 	struct sna_pixmap *priv;
 
 	priv = sna_pixmap(pixmap);
+	assert(priv != NULL);
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		return;
 
commit c621183466aa55a5938027b702069e792df2272d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 15:18:56 2012 +0100

    sna/dri: Tidy fallback/normal CopyRegion
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6a4a454..d647c02 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -578,8 +578,6 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
 	}
 
-	sna_dri_select_mode(sna, src_bo, flush);
-
 	damage(pixmap, region);
 	if (region) {
 		boxes = REGION_RECTS(region);
@@ -596,16 +594,18 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 				      dst_bo, dx, dy,
 				      boxes, n);
 	} else {
+		sna_dri_select_mode(sna, src_bo, flush);
+
 		sna->render.copy_boxes(sna, GXcopy,
 				       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
 				       pixmap, dst_bo, dx, dy,
 				       boxes, n, COPY_LAST);
-	}
 
-	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
-	if (flush) { /* STAT! */
-		kgem_submit(&sna->kgem);
-		bo = kgem_get_last_request(&sna->kgem);
+		DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
+		if (flush) { /* STAT! */
+			kgem_submit(&sna->kgem);
+			bo = kgem_get_last_request(&sna->kgem);
+		}
 	}
 
 	pixman_region_translate(region, dx, dy);
@@ -670,8 +670,6 @@ sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
 	}
 
-	sna_dri_select_mode(sna, src_bo, false);
-
 	if (region) {
 		boxes = REGION_RECTS(region);
 		n = REGION_NUM_RECTS(region);
@@ -688,6 +686,7 @@ sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 				      dst_bo, -draw->x, -draw->y,
 				      boxes, n);
 	} else {
+		sna_dri_select_mode(sna, src_bo, false);
 		sna->render.copy_boxes(sna, GXcopy,
 				       pixmap, src_bo, dx, dy,
 				       (PixmapPtr)draw, dst_bo, -draw->x, -draw->y,
@@ -730,14 +729,13 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		n = 1;
 	}
 
-	sna_dri_select_mode(sna, src_bo, false);
-
 	if (wedged(sna)) {
 		sna_dri_copy_fallback(sna, draw->bitsPerPixel,
 				      src_bo, 0, 0,
 				      dst_bo, 0, 0,
 				      boxes, n);
 	} else {
+		sna_dri_select_mode(sna, src_bo, false);
 		sna->render.copy_boxes(sna, GXcopy,
 				       (PixmapPtr)draw, src_bo, 0, 0,
 				       (PixmapPtr)draw, dst_bo, 0, 0,
commit 52d2491a1bafb979d79bb970027c55788f199acb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 15:19:14 2012 +0100

    sna/video: Protect against attempting to use TexturedVideo whilst wedged
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 37a5e9f..a908596 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2419,7 +2419,7 @@ sna_covering_crtc(ScrnInfoPtr scrn,
 	int best_coverage, c;
 
 	/* If we do not own the VT, we do not own the CRTC either */
-	if (!scrn->vtSema || wedged(to_sna(scrn)))
+	if (!scrn->vtSema)
 		return NULL;
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 2332ce2..27fc09f 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -235,7 +235,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		return BadAlloc;
 	}
 
-	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE)) {
+	if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: attempting to render to a non-GPU pixmap\n",
 		     __FUNCTION__));
 		return BadAlloc;
commit 8c0e3bbb0c301d0fa4652aa38edd84a9fd6b555e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 14:58:04 2012 +0100

    sna: Force the damage on the DRI pixmap to be flushed to the GPU
    
    Otherwise nothing will happen if we consider ourselves wedged.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a517cc6..d1337cc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2659,8 +2659,11 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	BoxPtr box;
 	int n;
 
-	DBG(("%s(pixmap=%ld, usage=%d)\n",
-	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
+	DBG(("%s(pixmap=%ld, usage=%d), flags=%x\n",
+	     __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     pixmap->usage_hint,
+	     flags));
 
 	if ((flags & __MOVE_FORCE) == 0 && wedged(sna))
 		return NULL;
@@ -13278,7 +13281,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		} else {
 			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
 			     priv->pixmap->drawable.serialNumber));
-			ret = sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ);
+			ret = sna_pixmap_move_to_gpu(priv->pixmap,
+						     MOVE_READ | __MOVE_FORCE);
 		}
 		(void)ret;
 	}
commit f50d7b03b5d0b81b24f0acb4ae555545bbdaa179
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 14:47:44 2012 +0100

    sna: Only recommend not to create bo for a pixmap whilst wedged
    
    This allows us to continue to map a GPU bo and operate inplace if we are
    force to create a GPU bo for a compositor.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 16c0eed..a517cc6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -923,14 +923,6 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	}
 	assert(width && height);
 
-	if (wedged(sna)) {
-		if (usage == SNA_CREATE_FB) {
-			flags = KGEM_CAN_CREATE_GPU;
-			goto force_create;
-		}
-		goto fallback;
-	}
-
 	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
 	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
@@ -938,6 +930,9 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		goto fallback;
 	}
 
+	if (wedged(sna))
+		flags = 0;
+
 	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
 		if (flags & KGEM_CAN_CREATE_GPU)
 			return sna_pixmap_create_scratch(screen,
commit 49403ddd1bd9a23e1b32a10e7d0757ae2897a579
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 14:44:14 2012 +0100

    sna/dri: Mark a flush required for any new DRI already on exec/dirty lists
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 062ccfe..16c0eed 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1153,8 +1153,10 @@ static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags)
 
 static inline void add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
 {
+	DBG(("%s: marking pixmap=%ld for flushing\n",
+	     __FUNCTION__, priv->pixmap->drawable.serialNumber));
 	list_move(&priv->list, &sna->flush_pixmaps);
-	sna->kgem.flush |= 1;
+	sna->kgem.flush = true;
 }
 
 bool
@@ -13261,6 +13263,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	 * by checking for outgoing damage events or sync replies. Tricky,
 	 * and doesn't appear to mitigate the performance loss.
 	 */
+	DBG(("%s: flush?=%d\n", __FUNCTION__, sna->kgem.flush));
 	if (!sna->kgem.flush)
 		return;
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 1812267..6a4a454 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -189,6 +189,13 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
 
+	if (priv->gpu_bo->exec || priv->cpu_damage) {
+		DBG(("%s: marking pixmap=%ld for flushing\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
+		list_move(&priv->list, &sna->flush_pixmaps);
+		sna->kgem.flush = true;
+	}
+
 	return priv->gpu_bo;
 }
 
commit efe3c8ff48738f2a274e1d4514d32499fc4aadbc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 14:43:33 2012 +0100

    sna: Allow DRI to force allocation of a GPU bo and backing pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 51a8206..0db6861 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -425,6 +425,7 @@ void sna_pixmap_destroy(PixmapPtr pixmap);
 #define MOVE_SOURCE_HINT 0x10
 #define MOVE_WHOLE_HINT 0x20
 #define __MOVE_FORCE 0x40
+#define __MOVE_DRI 0x80
 
 struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags);
 static inline struct sna_pixmap *
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4d30c5c..062ccfe 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2671,7 +2671,18 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
 		DBG(("%s: not attached\n", __FUNCTION__));
-		return NULL;
+		if ((flags & __MOVE_DRI) == 0)
+			return NULL;
+
+		DBG(("%s: forcing the creation on the GPU\n", __FUNCTION__));
+
+		priv = sna_pixmap_attach(pixmap);
+		if (priv == NULL)
+			return NULL;
+
+		sna_damage_all(&priv->cpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
 	}
 
 	if (sna_damage_is_all(&priv->gpu_damage,
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index bdd1f22..1812267 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -154,13 +154,21 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	struct sna_pixmap *priv;
 	int tiling;
 
+	DBG(("%s: attaching DRI client to pixmap=%ld\n",
+	     __FUNCTION__, pixmap->drawable.serialNumber));
+
 	priv = sna_pixmap(pixmap);
-	if (priv == NULL || priv->shm)
+	if (priv != NULL && priv->shm) {
+		DBG(("%s: SHM Pixmap, BadAlloc\n", __FUNCTION__));
 		return NULL;
+	}
 
-	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
-	if (priv == NULL)
+	priv = sna_pixmap_move_to_gpu(pixmap,
+				      MOVE_READ | MOVE_WRITE | __MOVE_FORCE | __MOVE_DRI);
+	if (priv == NULL) {
+		DBG(("%s: failed to move to GPU, BadAlloc\n", __FUNCTION__));
 		return NULL;
+	}
 
 	if (priv->flush++)
 		return priv->gpu_bo;
commit 2069384f9f06d3ef9dbb0c3f2c64cac4b24e10fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 14:15:45 2012 +0100

    sna/dri: Implement fallback handling for CopyRegion whilst wedged
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a908596..37a5e9f 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2419,7 +2419,7 @@ sna_covering_crtc(ScrnInfoPtr scrn,
 	int best_coverage, c;
 
 	/* If we do not own the VT, we do not own the CRTC either */
-	if (!scrn->vtSema)
+	if (!scrn->vtSema || wedged(to_sna(scrn)))
 		return NULL;
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 9698247..bdd1f22 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -477,6 +477,28 @@ static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
 	_kgem_set_mode(&sna->kgem, mode);
 }
 
+static void
+sna_dri_copy_fallback(struct sna *sna, int bpp,
+		      struct kgem_bo *src_bo, int sx, int sy,
+		      struct kgem_bo *dst_bo, int dx, int dy,
+		      const BoxRec *box, int n)
+{
+	void *dst = kgem_bo_map__gtt(&sna->kgem, dst_bo);
+	void *src = kgem_bo_map__gtt(&sna->kgem, src_bo);
+
+	DBG(("%s: src(%d, %d), dst(%d, %d) x %d\n",
+	     __FUNCTION__, sx, sy, dx, dy, n));
+
+	do {
+		memcpy_blt(src, dst, bpp,
+			   src_bo->pitch, dst_bo->pitch,
+			   box->x1 + sx, box->y1 + sy,
+			   box->x1 + dx, box->y1 + dy,
+			   box->x2 - box->x1, box->y2 - box->y1);
+		box++;
+	} while (--n);
+}
+
 static struct kgem_bo *
 sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		      struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
@@ -553,10 +575,17 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		boxes = &clip.extents;
 		n = 1;
 	}
-	sna->render.copy_boxes(sna, GXcopy,
-			       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
-			       pixmap, dst_bo, dx, dy,
-			       boxes, n, COPY_LAST);
+	if (wedged(sna)) {
+		sna_dri_copy_fallback(sna, draw->bitsPerPixel,
+				      src_bo, -draw->x, -draw->y,
+				      dst_bo, dx, dy,
+				      boxes, n);
+	} else {
+		sna->render.copy_boxes(sna, GXcopy,
+				       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
+				       pixmap, dst_bo, dx, dy,
+				       boxes, n, COPY_LAST);
+	}
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
@@ -638,10 +667,17 @@ sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		boxes = &box;
 		n = 1;
 	}
-	sna->render.copy_boxes(sna, GXcopy,
-			       pixmap, src_bo, dx, dy,
-			       (PixmapPtr)draw, dst_bo, -draw->x, -draw->y,
-			       boxes, n, COPY_LAST);
+	if (wedged(sna)) {
+		sna_dri_copy_fallback(sna, draw->bitsPerPixel,
+				      src_bo, dx, dy,
+				      dst_bo, -draw->x, -draw->y,
+				      boxes, n);
+	} else {
+		sna->render.copy_boxes(sna, GXcopy,
+				       pixmap, src_bo, dx, dy,
+				       (PixmapPtr)draw, dst_bo, -draw->x, -draw->y,
+				       boxes, n, COPY_LAST);
+	}
 
 	if (region == &clip)
 		pixman_region_fini(&clip);
@@ -681,10 +717,17 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	sna_dri_select_mode(sna, src_bo, false);
 
-	sna->render.copy_boxes(sna, GXcopy,
-			       (PixmapPtr)draw, src_bo, 0, 0,
-			       (PixmapPtr)draw, dst_bo, 0, 0,
-			       boxes, n, COPY_LAST);
+	if (wedged(sna)) {
+		sna_dri_copy_fallback(sna, draw->bitsPerPixel,
+				      src_bo, 0, 0,
+				      dst_bo, 0, 0,
+				      boxes, n);
+	} else {
+		sna->render.copy_boxes(sna, GXcopy,
+				       (PixmapPtr)draw, src_bo, 0, 0,
+				       (PixmapPtr)draw, dst_bo, 0, 0,
+				       boxes, n, COPY_LAST);
+	}
 
 	if (region == &clip)
 		pixman_region_fini(&clip);
commit b18143de47e060b67a46d9c68590a2d35df9fca6
Author: Paul Menzel <paulepanter at users.sourceforge.net>
Date:   Thu Jul 26 12:51:57 2012 +0200

    NEWS: Correct release version: s/2.12.0/2.20.0/
    
    Signed-off-by: Paul Menzel <paulepanter at users.sourceforge.net>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index a6819d4..08340f8 100644
--- a/NEWS
+++ b/NEWS
@@ -38,7 +38,7 @@ Among the other tweaks this week:
 * Shadow support was dropped from UXA as it was neither complete nor
   correct, use SNA instead.
 
-Release 2.12.0 (2012-07-15)
+Release 2.20.0 (2012-07-15)
 ===========================
 First the big news, a new acceleration method that aims to be faster and
 consume far less CPU than UXA is now available for selection at runtime.
commit c262d02fb5defe9100df54cf83cc00e11e335745
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 01:12:11 2012 +0100

    Limit PCI matching to VGA devices
    
    Fixes X -configure
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index 5e7a330..e1755ff 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -154,7 +154,7 @@ static const SymTabRec _intel_chipsets[] = {
 SymTabRec *intel_chipsets = (SymTabRec *) _intel_chipsets;
 
 #define INTEL_DEVICE_MATCH(d,i) \
-    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0, 0, (intptr_t)(i) }
+    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
 
 static const struct pci_id_match intel_device_match[] = {
 #if !KMS_ONLY
commit ad6355311b8b80777bc0fec95f6bf6cd1b4969d9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 01:02:53 2012 +0100

    sna: Compile against xorg-1.10
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbpict.c b/src/sna/fb/fbpict.c
index f6bcb64..a203851 100644
--- a/src/sna/fb/fbpict.c
+++ b/src/sna/fb/fbpict.c
@@ -31,6 +31,30 @@
 #include <mipict.h>
 #include "fbpict.h"
 
+static void
+SourceValidateOnePicture(PicturePtr picture)
+{
+	DrawablePtr drawable = picture->pDrawable;
+	ScreenPtr screen;
+
+	if (!drawable)
+		return;
+
+	screen = drawable->pScreen;
+	if (screen->SourceValidate)
+		screen->SourceValidate(drawable,
+				       0, 0, drawable->width, drawable->height,
+				       picture->subWindowMode);
+}
+
+static void
+fbCompositeSourceValidate(PicturePtr picture)
+{
+	SourceValidateOnePicture(picture);
+	if (picture->alphaMap)
+		SourceValidateOnePicture(picture->alphaMap);
+}
+
 void
 fbComposite(CARD8 op,
             PicturePtr pSrc,
@@ -46,9 +70,9 @@ fbComposite(CARD8 op,
 	int msk_xoff, msk_yoff;
 	int dst_xoff, dst_yoff;
 
-	miCompositeSourceValidate(pSrc);
+	fbCompositeSourceValidate(pSrc);
 	if (pMask)
-		miCompositeSourceValidate(pMask);
+		fbCompositeSourceValidate(pMask);
 
 	src = image_from_pict(pSrc, FALSE, &src_xoff, &src_yoff);
 	mask = image_from_pict(pMask, FALSE, &msk_xoff, &msk_yoff);
commit b6c7c490d76c683b7b5c89d20f902603b85bd3bc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 26 00:48:59 2012 +0100

    sna: Check for a NULL scanout after DPMS off with shadow enabled
    
    We may mark the scanout as detached when all outputs are turned off (for
    example during rotation) and so in the subsequent block handler we need
    to be careful in case we are handling a detached shadow.
    
    Reported-by: chr.ohm at gmx.net
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52514
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 30e5e9f..4d30c5c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13375,7 +13375,8 @@ static bool sna_accel_do_flush(struct sna *sna)
 	} else {
 		if (!start_flush(sna, priv)) {
 			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
-			kgem_bo_flush(&sna->kgem, priv->gpu_bo);
+			if (priv)
+				kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
commit f4acc01bb09f68edbad4c6cc7e04c271c92661c1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 22:43:32 2012 +0100

    sna: Restore inplace upload for DRI compositors
    
    With a DRI compositor we have to flush the output after every request,
    which leads to major inefficiencies. This can be mitigated slightly if
    we know we will have to upload shortly, which we track using the pixmap
    flush flag.
    
    In particular PutImage is meant to upload inplace to an active DRI
    buffer, however this was accidentally dropped in
    
    commit a253c95ec63b2b075e66ae7380fed6a73469eba5
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Sun Jul 15 13:32:35 2012 +0100
    
        sna: Prefer uploads to be staged in snoopable bo
    
    Performace of putimage500 on ivb i7-3720qm:
      bare:        4610
      gnome-shell: 3000
      patched:     3390
    
    Reported-by: Michael Larabel <Michael at phoronix.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 66d3e93..30e5e9f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3120,7 +3120,7 @@ static bool upload_inplace(struct sna *sna,
 		return false;
 	}
 
-	if (sna->kgem.has_llc) {
+	if (sna->kgem.has_llc && !priv->flush) {
 		if (priv->cpu_bo) {
 			if (priv->cpu_damage &&
 			    kgem_bo_is_busy(priv->cpu_bo) &&
@@ -3158,7 +3158,6 @@ static bool upload_inplace(struct sna *sna,
 			DBG(("%s? yes, will replace busy GPU\n", __FUNCTION__));
 			return true;
 		}
-
 	}
 
 	DBG(("%s? no\n", __FUNCTION__));
commit 5c969a05ef815b261e157fe8d1172aebfd7f5841
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 20:28:41 2012 +0100

    sna/gen7: Move the PS thread count definition into the constant struct
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index a8d40b6..cf56e42 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -64,9 +64,9 @@
 #define is_aligned(x, y) (((x) & ((y) - 1)) == 0)
 
 struct gt_info {
-	int max_vs_threads;
-	int max_gs_threads;
-	int max_wm_threads;
+	uint32_t max_vs_threads;
+	uint32_t max_gs_threads;
+	uint32_t max_wm_threads;
 	struct {
 		int size;
 		int max_vs_entries;
@@ -77,14 +77,14 @@ struct gt_info {
 static const struct gt_info gt1_info = {
 	.max_vs_threads = 36,
 	.max_gs_threads = 36,
-	.max_wm_threads = 86,
+	.max_wm_threads = (86-1) << GEN7_PS_MAX_THREADS_SHIFT,
 	.urb = { 128, 512, 192 },
 };
 
 static const struct gt_info gt2_info = {
 	.max_vs_threads = 128,
 	.max_gs_threads = 128,
-	.max_wm_threads = 172,
+	.max_wm_threads = (172-1) << GEN7_PS_MAX_THREADS_SHIFT,
 	.urb = { 256, 704, 320 },
 };
 
@@ -833,7 +833,7 @@ gen7_emit_wm(struct sna *sna, int kernel)
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
 		  wm_kernels[kernel].num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
-	OUT_BATCH((sna->render_state.gen7.info->max_wm_threads - 1) << GEN7_PS_MAX_THREADS_SHIFT |
+	OUT_BATCH(sna->render_state.gen7.info->max_wm_threads |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
 		  GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH(6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
commit 901bb618215d65747eb0a8d481c77c1201f69362
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 20:13:15 2012 +0100

    sna/gen7: Remove duplicated constants
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ae0aa9d..a8d40b6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4277,7 +4277,7 @@ static bool gen7_render_setup(struct sna *sna)
 	 */
 	null_create(&general);
 
-	for (m = 0; m < GEN7_KERNEL_COUNT; m++)
+	for (m = 0; m < GEN7_WM_KERNEL_COUNT; m++)
 		state->wm_kernel[m] =
 			sna_static_stream_add(&general,
 					       wm_kernels[m].data,
diff --git a/src/sna/gen7_render.h b/src/sna/gen7_render.h
index e2ca1f2..8de52a4 100644
--- a/src/sna/gen7_render.h
+++ b/src/sna/gen7_render.h
@@ -81,24 +81,46 @@
 # define GEN7_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT		25
 
 #define GEN7_3DSTATE_WM				GEN7_3D(3, 0, 0x14)
+/* DW1 */
+# define GEN7_WM_STATISTICS_ENABLE                              (1 << 31)
+# define GEN7_WM_DEPTH_CLEAR                                    (1 << 30)
+# define GEN7_WM_DISPATCH_ENABLE                                (1 << 29)
+# define GEN7_WM_DEPTH_RESOLVE                                  (1 << 28)
+# define GEN7_WM_HIERARCHICAL_DEPTH_RESOLVE                     (1 << 27)
+# define GEN7_WM_KILL_ENABLE                                    (1 << 25)
+# define GEN7_WM_PSCDEPTH_OFF                                   (0 << 23)
+# define GEN7_WM_PSCDEPTH_ON                                    (1 << 23)
+# define GEN7_WM_PSCDEPTH_ON_GE                                 (2 << 23)
+# define GEN7_WM_PSCDEPTH_ON_LE                                 (3 << 23)
+# define GEN7_WM_USES_SOURCE_DEPTH                              (1 << 20)
+# define GEN7_WM_USES_SOURCE_W                                  (1 << 19)
+# define GEN7_WM_POSITION_ZW_PIXEL                              (0 << 17)
+# define GEN7_WM_POSITION_ZW_CENTROID                           (2 << 17)
+# define GEN7_WM_POSITION_ZW_SAMPLE                             (3 << 17)
+# define GEN7_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC              (1 << 16)
+# define GEN7_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC            (1 << 15)
+# define GEN7_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC               (1 << 14)
+# define GEN7_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC                 (1 << 13)
+# define GEN7_WM_PERSPECTIVE_CENTROID_BARYCENTRIC               (1 << 12)
+# define GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC                  (1 << 11)
+# define GEN7_WM_USES_INPUT_COVERAGE_MASK                       (1 << 10)
+# define GEN7_WM_LINE_END_CAP_AA_WIDTH_0_5                      (0 << 8)
+# define GEN7_WM_LINE_END_CAP_AA_WIDTH_1_0                      (1 << 8)
+# define GEN7_WM_LINE_END_CAP_AA_WIDTH_2_0                      (2 << 8)
+# define GEN7_WM_LINE_END_CAP_AA_WIDTH_4_0                      (3 << 8)
+# define GEN7_WM_LINE_AA_WIDTH_0_5                              (0 << 6)
+# define GEN7_WM_LINE_AA_WIDTH_1_0                              (1 << 6)
+# define GEN7_WM_LINE_AA_WIDTH_2_0                              (2 << 6)
+# define GEN7_WM_LINE_AA_WIDTH_4_0                              (3 << 6)
+# define GEN7_WM_POLYGON_STIPPLE_ENABLE                         (1 << 4)
+# define GEN7_WM_LINE_STIPPLE_ENABLE                            (1 << 3)
+# define GEN7_WM_POINT_RASTRULE_UPPER_RIGHT                     (1 << 2)
+# define GEN7_WM_MSRAST_OFF_PIXEL                               (0 << 0)
+# define GEN7_WM_MSRAST_OFF_PATTERN                             (1 << 0)
+# define GEN7_WM_MSRAST_ON_PIXEL                                (2 << 0)
+# define GEN7_WM_MSRAST_ON_PATTERN                              (3 << 0)
 /* DW2 */
-# define GEN7_3DSTATE_WM_SAMPLER_COUNT_SHIFT			27
-# define GEN7_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT	18
-/* DW4 */
-# define GEN7_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT		16
-/* DW5 */
-# define GEN7_3DSTATE_WM_MAX_THREADS_SHIFT			25
-# define GEN7_3DSTATE_WM_DISPATCH_ENABLE			(1 << 19)
-# define GEN7_3DSTATE_WM_16_DISPATCH_ENABLE			(1 << 1)
-# define GEN7_3DSTATE_WM_8_DISPATCH_ENABLE			(1 << 0)
-/* DW6 */
-# define GEN7_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT			20
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC	(1 << 15)
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC	(1 << 14)
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC	(1 << 13)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC		(1 << 12)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_CENTROID_BARYCENTRIC	(1 << 11)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC		(1 << 10)
+# define GEN7_WM_MSDISPMODE_PERPIXEL                            (1 << 31)
 
 
 #define GEN7_3DSTATE_CONSTANT_VS		GEN7_3D(3, 0, 0x15)
@@ -177,7 +199,7 @@
 #define GEN7_VF_CTL_SKIP_INITIAL_PRIMITIVES	   (1 << 3)
 #define GEN7_VF_CTL_MAX_PRIMITIVES_LIMIT_ENABLE	   (1 << 2)
 #define GEN7_VF_CTL_VERTEX_RANGE_LIMIT_ENABLE	   (1 << 1)
-#define GEN7_VF_CTL_SNAPSHOT_ENABLE	     	   (1 << 0)
+#define GEN7_VF_CTL_SNAPSHOT_ENABLE		   (1 << 0)
 
 #define GEN7_VF_STRG_VAL		       0x7504
 #define GEN7_VF_STR_VL_OVR	       0x7508
@@ -240,7 +262,7 @@
 #define GEN7_TS_CTL_SNAPSHOT_MESSAGE_ERROR	   (0 << 8)
 #define GEN7_TS_CTL_SNAPSHOT_INTERFACE_DESCRIPTOR   (3 << 8)
 #define GEN7_TS_CTL_SNAPSHOT_ALL_CHILD_THREADS	   (1 << 2)
-#define GEN7_TS_CTL_SNAPSHOT_ALL_ROOT_THREADS  	   (1 << 1)
+#define GEN7_TS_CTL_SNAPSHOT_ALL_ROOT_THREADS	   (1 << 1)
 #define GEN7_TS_CTL_SNAPSHOT_ENABLE		   (1 << 0)
 
 #define GEN7_TS_STRG_VAL		       0x7e04
@@ -288,119 +310,6 @@
 #define GEN7_EU_ATT_CLR_1	       0x8834
 #define GEN7_EU_RDATA		       0x8840
 
-#define GEN7_3DPRIMITIVE				GEN7_3D(3, 3, 0)
-
-/* for GEN7+ */
-#define GEN7_3DSTATE_SAMPLER_STATE_POINTERS	GEN7_3D(3, 0, 0x02)
-# define GEN7_3DSTATE_SAMPLER_STATE_MODIFY_PS	(1 << 12)
-# define GEN7_3DSTATE_SAMPLER_STATE_MODIFY_GS	(1 << 9)
-# define GEN7_3DSTATE_SAMPLER_STATE_MODIFY_VS	(1 << 8)
-
-#define GEN7_3DSTATE_URB			GEN7_3D(3, 0, 0x05)
-/* DW1 */
-# define GEN7_3DSTATE_URB_VS_SIZE_SHIFT		16
-# define GEN7_3DSTATE_URB_VS_ENTRIES_SHIFT	0
-/* DW2 */
-# define GEN7_3DSTATE_URB_GS_ENTRIES_SHIFT	8
-# define GEN7_3DSTATE_URB_GS_SIZE_SHIFT		0
-
-#define GEN7_3DSTATE_VIEWPORT_STATE_POINTERS	GEN7_3D(3, 0, 0x0d)
-# define GEN7_3DSTATE_VIEWPORT_STATE_MODIFY_CC		(1 << 12)
-# define GEN7_3DSTATE_VIEWPORT_STATE_MODIFY_SF		(1 << 11)
-# define GEN7_3DSTATE_VIEWPORT_STATE_MODIFY_CLIP	(1 << 10)
-
-#define GEN7_3DSTATE_CC_STATE_POINTERS		GEN7_3D(3, 0, 0x0e)
-
-#define GEN7_3DSTATE_VS				GEN7_3D(3, 0, 0x10)
-
-#define GEN7_3DSTATE_GS				GEN7_3D(3, 0, 0x11)
-/* DW4 */
-# define GEN7_3DSTATE_GS_DISPATCH_START_GRF_SHIFT	0
-
-#define GEN7_3DSTATE_CLIP			GEN7_3D(3, 0, 0x12)
-
-#define GEN7_3DSTATE_SF				GEN7_3D(3, 0, 0x13)
-/* DW1 */
-# define GEN7_3DSTATE_SF_NUM_OUTPUTS_SHIFT		22
-# define GEN7_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT	11
-# define GEN7_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT	4
-/* DW2 */
-/* DW3 */
-# define GEN7_3DSTATE_SF_CULL_BOTH			(0 << 29)
-# define GEN7_3DSTATE_SF_CULL_NONE			(1 << 29)
-# define GEN7_3DSTATE_SF_CULL_FRONT			(2 << 29)
-# define GEN7_3DSTATE_SF_CULL_BACK			(3 << 29)
-/* DW4 */
-# define GEN7_3DSTATE_SF_TRI_PROVOKE_SHIFT		29
-# define GEN7_3DSTATE_SF_LINE_PROVOKE_SHIFT		27
-# define GEN7_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT		25
-
-
-#define GEN7_3DSTATE_WM				GEN7_3D(3, 0, 0x14)
-/* DW2 */
-# define GEN7_3DSTATE_WM_SAMPLER_COUNT_SHITF			27
-# define GEN7_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT	18
-/* DW4 */
-# define GEN7_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT		16
-/* DW5 */
-# define GEN7_3DSTATE_WM_MAX_THREADS_SHIFT			25
-# define GEN7_3DSTATE_WM_DISPATCH_ENABLE			(1 << 19)
-# define GEN7_3DSTATE_WM_16_DISPATCH_ENABLE			(1 << 1)
-# define GEN7_3DSTATE_WM_8_DISPATCH_ENABLE			(1 << 0)
-/* DW6 */
-# define GEN7_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT			20
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC	(1 << 15)
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC	(1 << 14)
-# define GEN7_3DSTATE_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC	(1 << 13)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC		(1 << 12)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_CENTROID_BARYCENTRIC	(1 << 11)
-# define GEN7_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC		(1 << 10)
-
-
-#define GEN7_3DSTATE_CONSTANT_VS		GEN7_3D(3, 0, 0x15)
-#define GEN7_3DSTATE_CONSTANT_GS          	GEN7_3D(3, 0, 0x16)
-#define GEN7_3DSTATE_CONSTANT_PS          	GEN7_3D(3, 0, 0x17)
-
-#define GEN7_3DSTATE_SAMPLE_MASK		GEN7_3D(3, 0, 0x18)
-
-#define GEN7_3DSTATE_MULTISAMPLE		GEN7_3D(3, 1, 0x0d)
-/* DW1 */
-# define GEN7_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER		(0 << 4)
-# define GEN7_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_UPPER_LEFT	(1 << 4)
-# define GEN7_3DSTATE_MULTISAMPLE_NUMSAMPLES_1			(0 << 1)
-# define GEN7_3DSTATE_MULTISAMPLE_NUMSAMPLES_4			(2 << 1)
-# define GEN7_3DSTATE_MULTISAMPLE_NUMSAMPLES_8			(3 << 1)
-
-#define PIPELINE_SELECT_3D		0
-#define PIPELINE_SELECT_MEDIA		1
-
-#define UF0_CS_REALLOC			(1 << 13)
-#define UF0_VFE_REALLOC			(1 << 12)
-#define UF0_SF_REALLOC			(1 << 11)
-#define UF0_CLIP_REALLOC		(1 << 10)
-#define UF0_GS_REALLOC			(1 << 9)
-#define UF0_VS_REALLOC			(1 << 8)
-#define UF1_CLIP_FENCE_SHIFT		20
-#define UF1_GS_FENCE_SHIFT		10
-#define UF1_VS_FENCE_SHIFT		0
-#define UF2_CS_FENCE_SHIFT		20
-#define UF2_VFE_FENCE_SHIFT		10
-#define UF2_SF_FENCE_SHIFT		0
-
-/* for GEN7_STATE_BASE_ADDRESS */
-#define BASE_ADDRESS_MODIFY		(1 << 0)
-
-/* for GEN7_3DSTATE_PIPELINED_POINTERS */
-#define GEN7_GS_DISABLE		       0
-#define GEN7_GS_ENABLE		       1
-#define GEN7_CLIP_DISABLE	       0
-#define GEN7_CLIP_ENABLE		       1
-
-#define GEN7_SVG_RDATA		       0x7404
-#define GEN7_SVG_WORK_CTL	       0x7408
-
-#define GEN7_VF_CTL		       0x7500
-
 #define _3DPRIM_POINTLIST         0x01
 #define _3DPRIM_LINELIST          0x02
 #define _3DPRIM_LINESTRIP         0x03
@@ -1337,48 +1246,6 @@ struct gen7_sampler_state {
 # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL     (0 << 8)
 # define GEN7_3DPRIM_VERTEXBUFFER_ACCESS_RANDOM         (1 << 8)
 
-/* 3DSTATE_WM on GEN7 */
-/* DW1 */
-# define GEN7_WM_STATISTICS_ENABLE                              (1 << 31)
-# define GEN7_WM_DEPTH_CLEAR                                    (1 << 30)
-# define GEN7_WM_DISPATCH_ENABLE                                (1 << 29)
-# define GEN6_WM_DEPTH_RESOLVE                                  (1 << 28)
-# define GEN7_WM_HIERARCHICAL_DEPTH_RESOLVE                     (1 << 27)
-# define GEN7_WM_KILL_ENABLE                                    (1 << 25)
-# define GEN7_WM_PSCDEPTH_OFF                                   (0 << 23)
-# define GEN7_WM_PSCDEPTH_ON                                    (1 << 23)
-# define GEN7_WM_PSCDEPTH_ON_GE                                 (2 << 23)
-# define GEN7_WM_PSCDEPTH_ON_LE                                 (3 << 23)
-# define GEN7_WM_USES_SOURCE_DEPTH                              (1 << 20)
-# define GEN7_WM_USES_SOURCE_W                                  (1 << 19)
-# define GEN7_WM_POSITION_ZW_PIXEL                              (0 << 17)
-# define GEN7_WM_POSITION_ZW_CENTROID                           (2 << 17)
-# define GEN7_WM_POSITION_ZW_SAMPLE                             (3 << 17)
-# define GEN7_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC              (1 << 16)
-# define GEN7_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC            (1 << 15)
-# define GEN7_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC               (1 << 14)
-# define GEN7_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC                 (1 << 13)
-# define GEN7_WM_PERSPECTIVE_CENTROID_BARYCENTRIC               (1 << 12)
-# define GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC                  (1 << 11)
-# define GEN7_WM_USES_INPUT_COVERAGE_MASK                       (1 << 10)
-# define GEN7_WM_LINE_END_CAP_AA_WIDTH_0_5                      (0 << 8)
-# define GEN7_WM_LINE_END_CAP_AA_WIDTH_1_0                      (1 << 8)
-# define GEN7_WM_LINE_END_CAP_AA_WIDTH_2_0                      (2 << 8)
-# define GEN7_WM_LINE_END_CAP_AA_WIDTH_4_0                      (3 << 8)
-# define GEN7_WM_LINE_AA_WIDTH_0_5                              (0 << 6)
-# define GEN7_WM_LINE_AA_WIDTH_1_0                              (1 << 6)
-# define GEN7_WM_LINE_AA_WIDTH_2_0                              (2 << 6)
-# define GEN7_WM_LINE_AA_WIDTH_4_0                              (3 << 6)
-# define GEN7_WM_POLYGON_STIPPLE_ENABLE                         (1 << 4)
-# define GEN7_WM_LINE_STIPPLE_ENABLE                            (1 << 3)
-# define GEN7_WM_POINT_RASTRULE_UPPER_RIGHT                     (1 << 2)
-# define GEN7_WM_MSRAST_OFF_PIXEL                               (0 << 0)
-# define GEN7_WM_MSRAST_OFF_PATTERN                             (1 << 0)
-# define GEN7_WM_MSRAST_ON_PIXEL                                (2 << 0)
-# define GEN7_WM_MSRAST_ON_PATTERN                              (3 << 0)
-/* DW2 */
-# define GEN7_WM_MSDISPMODE_PERPIXEL                            (1 << 31)
-
 #define GEN7_3DSTATE_CLEAR_PARAMS               GEN7_3D(3, 0, 0x04)
 #define GEN7_3DSTATE_DEPTH_BUFFER               GEN7_3D(3, 0, 0x05)
 # define GEN7_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT	29
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 5662a79..e676b6a 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -433,7 +433,7 @@ enum {
 
 	GEN7_WM_KERNEL_VIDEO_PLANAR,
 	GEN7_WM_KERNEL_VIDEO_PACKED,
-	GEN7_KERNEL_COUNT
+	GEN7_WM_KERNEL_COUNT
 };
 
 struct gen7_render_state {
@@ -444,7 +444,7 @@ struct gen7_render_state {
 	uint32_t sf_state;
 	uint32_t sf_mask_state;
 	uint32_t wm_state;
-	uint32_t wm_kernel[GEN7_KERNEL_COUNT];
+	uint32_t wm_kernel[GEN7_WM_KERNEL_COUNT];
 
 	uint32_t cc_vp;
 	uint32_t cc_blend;
commit 8c5077e4ed055a97bf9deda59c0e9a45e42317ca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 16:59:11 2012 +0100

    Assume all unknown chipsets are future gen
    
    I think the likelihood of a new product being launched based on a 8xx
    design is remote enough not to worry about.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index cab7b4e..8cb098a 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -387,7 +387,7 @@ static Bool has_relaxed_fencing(struct intel_screen_private *intel)
 
 static Bool can_accelerate_blt(struct intel_screen_private *intel)
 {
-	if (INTEL_INFO(intel)->gen == 0)
+	if (INTEL_INFO(intel)->gen == -1)
 		return FALSE;
 
 	if (0 && (IS_I830(intel) || IS_845G(intel))) {
diff --git a/src/intel_module.c b/src/intel_module.c
index 845b40e..5e7a330 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -47,7 +47,7 @@
 static struct intel_device_info *chipset_info;
 
 static const struct intel_device_info intel_generic_info = {
-	.gen = 0,
+	.gen = -1,
 };
 
 static const struct intel_device_info intel_i81x_info = {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1aa8af9..19ebdef 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -556,12 +556,12 @@ static struct list *active(struct kgem *kgem, int num_pages, int tiling)
 }
 
 static size_t
-agp_aperture_size(struct pci_device *dev, int gen)
+agp_aperture_size(struct pci_device *dev, unsigned gen)
 {
 	/* XXX assume that only future chipsets are unknown and follow
 	 * the post gen2 PCI layout.
 	 */
-	return dev->regions[(gen && gen < 30) ? 0 : 2].size;
+	return dev->regions[gen < 30 ? 0 : 2].size;
 }
 
 static size_t
@@ -649,7 +649,7 @@ static bool is_hw_supported(struct kgem *kgem,
 	if (DBG_NO_HW)
 		return false;
 
-	if (kgem->gen == 0) /* unknown chipset, assume future gen */
+	if (kgem->gen == (unsigned)-1) /* unknown chipset, assume future gen */
 		return kgem->has_blt;
 
 	if (kgem->gen <= 20) /* dynamic GTT is fubar */
@@ -791,7 +791,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
 	     kgem->has_semaphores));
 
-	kgem->can_blt_cpu = gen == 0 || gen >= 30;
+	kgem->can_blt_cpu = gen >= 30;
 	DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
 	     kgem->can_blt_cpu));
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index d7c3812..8227538 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -108,7 +108,7 @@ enum {
 struct kgem {
 	int fd;
 	int wedged;
-	int gen;
+	unsigned gen;
 
 	uint32_t unique_id;
 
commit 954d9c6aca792098cb16a46b4ec8d5fc104b15a9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 15:41:29 2012 +0100

    sna: Initialise single-shot tile offsets before use
    
    As noted by the compiler amidst all the noise.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 94dcabf..66d3e93 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9466,6 +9466,13 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 
 		sna_damage_add_rectangles(damage, r, n, dx, dy);
 		if (n == 1) {
+			tx = (r->x - origin->x) % 8;
+			if (tx < 8)
+				tx = 8 - tx;
+			ty = (r->y - origin->y) % 8;
+			if (ty < 8)
+				ty = 8 - ty;
+
 			b = sna->kgem.batch + sna->kgem.nbatch;
 			b[0] = XY_PAT_BLT | tx << 12 | ty << 8 | 3 << 20 | (br00 & BLT_DST_TILED);
 			b[1] = br13;
commit 484b072072c8297a87940c9e32097923f0a77c8f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 12:22:47 2012 +0100

    sna: Add 'gmux_backlight' to list of known devices for backwards compatability
    
    Reported-by: Austin Lund <austin.lund at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52423
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ebecd21..a908596 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -292,6 +292,7 @@ static void
 sna_output_backlight_init(xf86OutputPtr output)
 {
 	static const char *known_interfaces[] = {
+		"gmux_backlight",
 		"asus-laptop",
 		"asus-nb-wmi",
 		"eeepc",
commit 0ab6d7a50d37cf4454577cb8c333d4b8683aa054
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 12:20:36 2012 +0100

    sna: Prefer platform backlight devices over firmware
    
    This is in contrast to libbacklight but closer to our original code as
    we prefer a known custom backlight controller over the ACPI interfaces.
    As only the ACPI interfaces are marked as firmware, and the custom
    backlight controllers as platform, we therefore need to prefer platform
    backlight devices.
    
    Reported-by: Austin Lund <austin.lund at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52423
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b31f08d..ebecd21 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -282,8 +282,8 @@ sna_output_backlight_get_max(xf86OutputPtr output)
 }
 
 enum {
-	FIRMWARE,
 	PLATFORM,
+	FIRMWARE,
 	RAW,
 	NAMED,
 };
commit b6d82ab07661aba98963f239f9501b50c3910962
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 10:40:07 2012 +0100

    sna: Reduce ping-pong for Composite with render disabled
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 17ac814..9d4b926 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -84,6 +84,10 @@ no_render_composite(struct sna *sna,
 {
 	DBG(("%s (op=%d, mask? %d)\n", __FUNCTION__, op, mask != NULL));
 
+	if (!is_gpu(dst->pDrawable) &&
+	    (src->pDrawable == NULL || !is_gpu(src->pDrawable)))
+		return false;
+
 	if (mask == NULL &&
 	    sna_blt_composite(sna,
 			      op, src, dst,
commit de707b7dc853a3b315ead9789d5ca541412c99bc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 25 09:05:46 2012 +0100

    uxa: Add Apple's gmux to the list of known preferred backlights
    
    Reported-by: Austin Lund <austin.lund at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52423
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index bfe5918..6580c8c 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -127,6 +127,7 @@ intel_output_dpms_backlight(xf86OutputPtr output, int oldmode, int mode);
  * List of available kernel interfaces in priority order
  */
 static const char *backlight_interfaces[] = {
+	"gmux_backlight",
 	"asus-laptop",
 	"asus-nb-wmi",
 	"eeepc",
commit 3d254e4010d0753f433cfe62c6a7546b02482847
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 23:45:33 2012 +0100

    sna: Use SETUP_BLT to reduce overheads for tiled BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8b1e09a..94dcabf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9442,48 +9442,80 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 	DBG(("%s x %d [(%d, %d)+(%d, %d)...], clipped=%x\n",
 	     __FUNCTION__, n, r->x, r->y, r->width, r->height, clipped));
 
-	br00 = XY_PAT_BLT | 3 << 20;
+	kgem_set_mode(&sna->kgem, KGEM_BLT);
+	if (!kgem_check_batch(&sna->kgem, 8+2*3) ||
+	    !kgem_check_reloc(&sna->kgem, 2) ||
+	    !kgem_check_bo_fenced(&sna->kgem, bo)) {
+		_kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
+	}
+
+	br00 = XY_SCANLINE_BLT;
 	br13 = bo->pitch;
-	if (bo->tiling && sna->kgem.gen >= 40) {
-		br13 >>= 2;
+	if (sna->kgem.gen >= 40 && bo->tiling) {
 		br00 |= BLT_DST_TILED;
+		br13 >>= 2;
 	}
 	br13 |= blt_depth(drawable->depth) << 24;
 	br13 |= fill_ROP[gc->alu] << 16;
 
-	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 	if (!clipped) {
 		dx += drawable->x;
 		dy += drawable->y;
 
-		if (!kgem_check_batch(&sna->kgem, 6) ||
-		    !kgem_check_reloc(&sna->kgem, 2) ||
-		    !kgem_check_bo_fenced(&sna->kgem, bo)) {
-			_kgem_submit(&sna->kgem);
-			_kgem_set_mode(&sna->kgem, KGEM_BLT);
-		}
-
 		sna_damage_add_rectangles(damage, r, n, dx, dy);
-		do {
+		if (n == 1) {
+			b = sna->kgem.batch + sna->kgem.nbatch;
+			b[0] = XY_PAT_BLT | tx << 12 | ty << 8 | 3 << 20 | (br00 & BLT_DST_TILED);
+			b[1] = br13;
+			b[2] = (r->y + dy) << 16 | (r->x + dx);
+			b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
+			b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      I915_GEM_DOMAIN_RENDER |
+					      KGEM_RELOC_FENCED,
+					      0);
+			b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, tile_bo,
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      KGEM_RELOC_FENCED,
+					      0);
+			sna->kgem.nbatch += 6;
+		} else do {
 			int n_this_time;
 
+			b = sna->kgem.batch + sna->kgem.nbatch;
+			b[0] = XY_SETUP_BLT | 3 << 20;
+			b[1] = br13;
+			b[2] = 0;
+			b[3] = 0;
+			b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      I915_GEM_DOMAIN_RENDER |
+					      KGEM_RELOC_FENCED,
+					      0);
+			b[5] = gc->bgPixel;
+			b[6] = gc->fgPixel;
+			b[7] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 7, tile_bo,
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      KGEM_RELOC_FENCED,
+					      0);
+			sna->kgem.nbatch += 8;
+
 			n_this_time = n;
-			if (6*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
-				n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 8;
-			if (2*n_this_time > KGEM_RELOC_SIZE(&sna->kgem) - sna->kgem.nreloc)
-				n_this_time = (KGEM_RELOC_SIZE(&sna->kgem) - sna->kgem.nreloc)/2;
+			if (3*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
+				n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 3;
 			assert(n_this_time);
 			n -= n_this_time;
 
-			assert(r->x + dx >= 0);
-			assert(r->y + dy >= 0);
-			assert(r->x + dx + r->width  <= pixmap->drawable.width);
-			assert(r->y + dy + r->height <= pixmap->drawable.height);
-
 			b = sna->kgem.batch + sna->kgem.nbatch;
-			sna->kgem.nbatch += 6*n_this_time;
+			sna->kgem.nbatch += 3*n_this_time;
 			do {
+				assert(r->x + dx >= 0);
+				assert(r->y + dy >= 0);
+				assert(r->x + dx + r->width  <= pixmap->drawable.width);
+				assert(r->y + dy + r->height <= pixmap->drawable.height);
+
 				tx = (r->x - origin->x) % 8;
 				if (tx < 8)
 					tx = 8 - tx;
@@ -9492,20 +9524,9 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 					ty = 8 - ty;
 
 				b[0] = br00 | tx << 12 | ty << 8;
-				b[1] = br13;
-				b[2] = (r->y + dy) << 16 | (r->x + dx);
-				b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
-				b[4] = kgem_add_reloc(&sna->kgem, b - sna->kgem.batch + 4, bo,
-						      I915_GEM_DOMAIN_RENDER << 16 |
-						      I915_GEM_DOMAIN_RENDER |
-						      KGEM_RELOC_FENCED,
-						      0);
-				b[5] = kgem_add_reloc(&sna->kgem, b - sna->kgem.batch + 5, tile_bo,
-						      I915_GEM_DOMAIN_RENDER << 16 |
-						      KGEM_RELOC_FENCED,
-						      0);
-				b += 6;
-				r++;
+				b[1] = (r->y + dy) << 16 | (r->x + dx);
+				b[2] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
+				b += 3; r++;
 			} while (--n_this_time);
 
 			if (!n)
@@ -9522,6 +9543,24 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 		if (!RegionNotEmpty(&clip))
 			goto done;
 
+		b = sna->kgem.batch + sna->kgem.nbatch;
+		b[0] = XY_SETUP_BLT | 3 << 20;
+		b[1] = br13;
+		b[2] = 0;
+		b[3] = 0;
+		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+				      I915_GEM_DOMAIN_RENDER << 16 |
+				      I915_GEM_DOMAIN_RENDER |
+				      KGEM_RELOC_FENCED,
+				      0);
+		b[5] = gc->bgPixel;
+		b[6] = gc->fgPixel;
+		b[7] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 7, tile_bo,
+				      I915_GEM_DOMAIN_RENDER << 16 |
+				      KGEM_RELOC_FENCED,
+				      0);
+		sna->kgem.nbatch += 8;
+
 		if (clip.data == NULL) {
 			const BoxRec *c = &clip.extents;
 			while (n--) {
@@ -9534,6 +9573,28 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 				r++;
 
 				if (box_intersect(&box, c)) {
+					if (!kgem_check_batch(&sna->kgem, 3)) {
+						_kgem_submit(&sna->kgem);
+						_kgem_set_mode(&sna->kgem, KGEM_BLT);
+						b = sna->kgem.batch + sna->kgem.nbatch;
+						b[0] = XY_SETUP_BLT | 3 << 20;
+						b[1] = br13;
+						b[2] = 0;
+						b[3] = 0;
+						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+								      I915_GEM_DOMAIN_RENDER << 16 |
+								      I915_GEM_DOMAIN_RENDER |
+								      KGEM_RELOC_FENCED,
+								      0);
+						b[5] = gc->bgPixel;
+						b[6] = gc->fgPixel;
+						b[7] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 7, tile_bo,
+								      I915_GEM_DOMAIN_RENDER << 16 |
+								      KGEM_RELOC_FENCED,
+								      0);
+						sna->kgem.nbatch += 8;
+					}
+
 					ty = (box.y1 - drawable->y - origin->y) % 8;
 					if (ty < 0)
 						ty = 8 - ty;
@@ -9542,28 +9603,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 					if (tx < 0)
 						tx = 8 - tx;
 
-					if (!kgem_check_batch(&sna->kgem, 6) ||
-					    !kgem_check_reloc(&sna->kgem, 2) ||
-					    !kgem_check_bo_fenced(&sna->kgem, bo)) {
-						_kgem_submit(&sna->kgem);
-						_kgem_set_mode(&sna->kgem, KGEM_BLT);
-					}
-
 					b = sna->kgem.batch + sna->kgem.nbatch;
 					b[0] = br00 | tx << 12 | ty << 8;
-					b[1] = br13;
-					b[2] = box.y1 << 16 | box.x1;
-					b[3] = box.y2 << 16 | box.x2;
-					b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
-							      I915_GEM_DOMAIN_RENDER << 16 |
-							      I915_GEM_DOMAIN_RENDER |
-							      KGEM_RELOC_FENCED,
-							      0);
-					b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, tile_bo,
-							      I915_GEM_DOMAIN_RENDER << 16 |
-							      KGEM_RELOC_FENCED,
-							      0);
-					sna->kgem.nbatch += 6;
+					b[1] = box.y1 << 16 | box.x1;
+					b[2] = box.y2 << 16 | box.x2;
+					sna->kgem.nbatch += 3;
 				}
 			}
 		} else {
@@ -9591,6 +9635,28 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 
 					bb = box;
 					if (box_intersect(&bb, c++)) {
+						if (!kgem_check_batch(&sna->kgem, 3)) {
+							_kgem_submit(&sna->kgem);
+							_kgem_set_mode(&sna->kgem, KGEM_BLT);
+							b = sna->kgem.batch + sna->kgem.nbatch;
+							b[0] = XY_SETUP_BLT | 3 << 20;
+							b[1] = br13;
+							b[2] = 0;
+							b[3] = 0;
+							b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+									      I915_GEM_DOMAIN_RENDER << 16 |
+									      I915_GEM_DOMAIN_RENDER |
+									      KGEM_RELOC_FENCED,
+									      0);
+							b[5] = gc->bgPixel;
+							b[6] = gc->fgPixel;
+							b[7] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 7, tile_bo,
+									      I915_GEM_DOMAIN_RENDER << 16 |
+									      KGEM_RELOC_FENCED,
+									      0);
+							sna->kgem.nbatch += 8;
+						}
+
 						ty = (bb.y1 - drawable->y - origin->y) % 8;
 						if (ty < 0)
 							ty = 8 - ty;
@@ -9599,28 +9665,11 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
 						if (tx < 0)
 							tx = 8 - tx;
 
-						if (!kgem_check_batch(&sna->kgem, 6) ||
-						    !kgem_check_reloc(&sna->kgem, 2) ||
-						    !kgem_check_bo_fenced(&sna->kgem, bo)) {
-							_kgem_submit(&sna->kgem);
-							_kgem_set_mode(&sna->kgem, KGEM_BLT);
-						}
-
 						b = sna->kgem.batch + sna->kgem.nbatch;
 						b[0] = br00 | tx << 12 | ty << 8;
-						b[1] = br13;
-						b[2] = (bb.y1+dy) << 16 | (bb.x1+dx);
-						b[3] = (bb.y2+dy) << 16 | (bb.x2+dx);
-						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
-								      I915_GEM_DOMAIN_RENDER << 16 |
-								      I915_GEM_DOMAIN_RENDER |
-								      KGEM_RELOC_FENCED,
-								      0);
-						b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, tile_bo,
-								      I915_GEM_DOMAIN_RENDER << 16 |
-								      KGEM_RELOC_FENCED,
-								      0);
-						sna->kgem.nbatch += 6;
+						b[1] = bb.y1 << 16 | bb.x1;
+						b[2] = bb.y2 << 16 | bb.x2;
+						sna->kgem.nbatch += 3;
 					}
 				}
 			} while (--n);
commit 2ed44149eaa27b9632ec83a776f3ee67b0eec7b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 23:45:33 2012 +0100

    sna: Use SETUP_BLT to reduce overheads for stippled BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a963d30..8b1e09a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9961,6 +9961,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	uint32_t pat[2] = {0, 0}, br00, br13;
 	int16_t dx, dy;
+	uint32_t *b;
 
 	DBG(("%s: alu=%d, upload (%d, %d), (%d, %d), origin (%d, %d)\n",
 	     __FUNCTION__, gc->alu,
@@ -9973,8 +9974,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 		unsigned px = (0 - gc->patOrg.x - dx) & 7;
 		unsigned py = (0 - gc->patOrg.y - dy) & 7;
 		DBG(("%s: pat offset (%d, %d)\n", __FUNCTION__ ,px, py));
-		br00 = XY_MONO_PAT | px << 12 | py << 8 | 3 << 20;
-
+		br00 = XY_SCANLINE_BLT | px << 12 | py << 8 | 3 << 20;
 		br13 = bo->pitch;
 		if (sna->kgem.gen >= 40 && bo->tiling) {
 			br00 |= BLT_DST_TILED;
@@ -9997,26 +9997,24 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 	}
 
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
+	if (!kgem_check_batch(&sna->kgem, 9 + 2*3) ||
+	    !kgem_check_bo_fenced(&sna->kgem, bo) ||
+	    !kgem_check_reloc(&sna->kgem, 1)) {
+		_kgem_submit(&sna->kgem);
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
+	}
+
 	if (!clipped) {
 		dx += drawable->x;
 		dy += drawable->y;
 
 		sna_damage_add_rectangles(damage, r, n, dx, dy);
-		do {
-			uint32_t *b;
-
-			DBG(("%s: rect (%d, %d)x(%d, %d)\n",
+		if (n == 1) {
+			DBG(("%s: single unclipped rect (%d, %d)x(%d, %d)\n",
 			     __FUNCTION__, r->x + dx, r->y + dy, r->width, r->height));
 
-			if (!kgem_check_batch(&sna->kgem, 9) ||
-			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-			    !kgem_check_reloc(&sna->kgem, 1)) {
-				_kgem_submit(&sna->kgem);
-				_kgem_set_mode(&sna->kgem, KGEM_BLT);
-			}
-
 			b = sna->kgem.batch + sna->kgem.nbatch;
-			b[0] = br00;
+			b[0] = XY_MONO_PAT | (br00 & (BLT_DST_TILED | 0x7<<12 | 0x7<<8)) | 3<<20;
 			b[1] = br13;
 			b[2] = (r->y + dy) << 16 | (r->x + dx);
 			b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
@@ -10030,9 +10028,54 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 			b[7] = pat[0];
 			b[8] = pat[1];
 			sna->kgem.nbatch += 9;
+		} else do {
+			int n_this_time;
 
-			r++;
-		} while (--n);
+			b = sna->kgem.batch + sna->kgem.nbatch;
+			b[0] = XY_SETUP_MONO_PATTERN_SL_BLT | 3 << 20;
+			b[1] = br13;
+			b[2] = 0;
+			b[3] = 0;
+			b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+					      I915_GEM_DOMAIN_RENDER << 16 |
+					      I915_GEM_DOMAIN_RENDER |
+					      KGEM_RELOC_FENCED,
+					      0);
+			b[5] = gc->bgPixel;
+			b[6] = gc->fgPixel;
+			b[7] = pat[0];
+			b[8] = pat[1];
+			sna->kgem.nbatch += 9;
+
+			n_this_time = n;
+			if (3*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
+				n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 3;
+			assert(n_this_time);
+			n -= n_this_time;
+
+			b = sna->kgem.batch + sna->kgem.nbatch;
+			sna->kgem.nbatch += 3 * n_this_time;
+			do {
+				DBG(("%s: rect (%d, %d)x(%d, %d)\n",
+				     __FUNCTION__, r->x + dx, r->y + dy, r->width, r->height));
+				assert(r->x + dx >= 0);
+				assert(r->y + dy >= 0);
+				assert(r->x + dx + r->width  <= pixmap->drawable.width);
+				assert(r->y + dy + r->height <= pixmap->drawable.height);
+
+				b[0] = br00;
+				b[1] = (r->y + dy) << 16 | (r->x + dx);
+				b[2] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
+
+				b += 3; r++;
+			} while(--n_this_time);
+
+			if (!n)
+				break;
+
+			_kgem_submit(&sna->kgem);
+			_kgem_set_mode(&sna->kgem, KGEM_BLT);
+		} while (1);
 	} else {
 		RegionRec clip;
 
@@ -10041,7 +10084,21 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 		if (!RegionNotEmpty(&clip))
 			return true;
 
-		/* XXX XY_SETUP_BLT + XY_SCANLINE_BLT */
+		b = sna->kgem.batch + sna->kgem.nbatch;
+		b[0] = XY_SETUP_MONO_PATTERN_SL_BLT | 3 << 20;
+		b[1] = br13;
+		b[2] = 0;
+		b[3] = 0;
+		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+				      I915_GEM_DOMAIN_RENDER << 16 |
+				      I915_GEM_DOMAIN_RENDER |
+				      KGEM_RELOC_FENCED,
+				      0);
+		b[5] = gc->bgPixel;
+		b[6] = gc->fgPixel;
+		b[7] = pat[0];
+		b[8] = pat[1];
+		sna->kgem.nbatch += 9;
 
 		if (clip.data == NULL) {
 			do {
@@ -10054,30 +10111,32 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 				r++;
 
 				if (box_intersect(&box, &clip.extents)) {
-					uint32_t *b;
-
-					if (!kgem_check_batch(&sna->kgem, 9) ||
-					    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-					    !kgem_check_reloc(&sna->kgem, 1)) {
+					if (!kgem_check_batch(&sna->kgem, 3)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
+
+						b = sna->kgem.batch + sna->kgem.nbatch;
+						b[0] = XY_SETUP_MONO_PATTERN_SL_BLT | 3 << 20;
+						b[1] = br13;
+						b[2] = 0;
+						b[3] = 0;
+						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+								      I915_GEM_DOMAIN_RENDER << 16 |
+								      I915_GEM_DOMAIN_RENDER |
+								      KGEM_RELOC_FENCED,
+								      0);
+						b[5] = gc->bgPixel;
+						b[6] = gc->fgPixel;
+						b[7] = pat[0];
+						b[8] = pat[1];
+						sna->kgem.nbatch += 9;
 					}
 
 					b = sna->kgem.batch + sna->kgem.nbatch;
+					sna->kgem.nbatch += 3;
 					b[0] = br00;
-					b[1] = br13;
-					b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
-					b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
-					b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
-							      I915_GEM_DOMAIN_RENDER << 16 |
-							      I915_GEM_DOMAIN_RENDER |
-							      KGEM_RELOC_FENCED,
-							      0);
-					b[5] = gc->bgPixel;
-					b[6] = gc->fgPixel;
-					b[7] = pat[0];
-					b[8] = pat[1];
-					sna->kgem.nbatch += 9;
+					b[1] = (box.y1 + dy) << 16 | (box.x1 + dx);
+					b[2] = (box.y2 + dy) << 16 | (box.x2 + dx);
 				}
 			} while (--n);
 		} else {
@@ -10104,32 +10163,33 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 
 					bb = box;
 					if (box_intersect(&bb, c++)) {
-						uint32_t *b;
-
-						if (!kgem_check_batch(&sna->kgem, 9) ||
-						    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-						    !kgem_check_reloc(&sna->kgem, 1)) {
+						if (!kgem_check_batch(&sna->kgem, 3)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
+
+							b = sna->kgem.batch + sna->kgem.nbatch;
+							b[0] = XY_SETUP_MONO_PATTERN_SL_BLT | 3 << 20;
+							b[1] = br13;
+							b[2] = 0;
+							b[3] = 0;
+							b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+									      I915_GEM_DOMAIN_RENDER << 16 |
+									      I915_GEM_DOMAIN_RENDER |
+									      KGEM_RELOC_FENCED,
+									      0);
+							b[5] = gc->bgPixel;
+							b[6] = gc->fgPixel;
+							b[7] = pat[0];
+							b[8] = pat[1];
+							sna->kgem.nbatch += 9;
 						}
 
 						b = sna->kgem.batch + sna->kgem.nbatch;
+						sna->kgem.nbatch += 3;
 						b[0] = br00;
-						b[1] = br13;
-						b[2] = (bb.y1 + dy) << 16 | (bb.x1 + dx);
-						b[3] = (bb.y2 + dy) << 16 | (bb.x2 + dx);
-						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
-								      I915_GEM_DOMAIN_RENDER << 16 |
-								      I915_GEM_DOMAIN_RENDER |
-								      KGEM_RELOC_FENCED,
-								      0);
-						b[5] = gc->bgPixel;
-						b[6] = gc->fgPixel;
-						b[7] = pat[0];
-						b[8] = pat[1];
-						sna->kgem.nbatch += 9;
+						b[1] = (bb.y1 + dy) << 16 | (bb.x1 + dx);
+						b[2] = (bb.y2 + dy) << 16 | (bb.x2 + dx);
 					}
-
 				}
 			} while (--n);
 		}
commit c1785aaca2c9347114d28f114ee59ef8206d829b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 23:00:25 2012 +0100

    sna: Remove a surplus function
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 3f7234f..3bb2315 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -226,21 +226,6 @@ static void PreInitCleanup(ScrnInfoPtr scrn)
 	scrn->driverPrivate = NULL;
 }
 
-static void sna_check_chipset_option(ScrnInfoPtr scrn)
-{
-	struct sna *sna = to_sna(scrn);
-
-	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
-}
-
-static Bool sna_get_early_options(ScrnInfoPtr scrn)
-{
-	struct sna *sna = to_sna(scrn);
-
-	sna->Options = intel_options_get(scrn);
-	return sna->Options != NULL;
-}
-
 struct sna_device {
 	int fd;
 	int open_count;
@@ -449,7 +434,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (!xf86SetDefaultVisual(scrn, -1))
 		return FALSE;
 
-	if (!sna_get_early_options(scrn))
+	sna->Options = intel_options_get(scrn);
+	if (sna->Options == NULL)
 		return FALSE;
 
 	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
commit 554fce8a65b2518cae032f1eadf58830559299c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 22:59:16 2012 +0100

    uxa: Pass the correct parameters to intel_detect_chipset()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index e76f19e..cab7b4e 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -186,8 +186,7 @@ static void PreInitCleanup(ScrnInfoPtr scrn)
 static void intel_check_chipset_option(ScrnInfoPtr scrn)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
-	intel->info =
-		intel_detect_chipset(scrn, intel->pEnt->device, intel->PciInfo);
+	intel->info = intel_detect_chipset(scrn, intel->pEnt, intel->PciInfo);
 }
 
 static Bool I830GetEarlyOptions(ScrnInfoPtr scrn)
commit 40d90dfd8674c255a45b46bbdc09d497af5b3f50
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 21:55:46 2012 +0100

    intel: Refactor the common chipset detection/override
    
    Reduce the duplicate messages for which type of chip we by
    amalgamating the common code.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index fe27011..0b57aaf 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -212,7 +212,7 @@ typedef struct intel_screen_private {
 	int Chipset;
 	EntityInfoPtr pEnt;
 	struct pci_device *PciInfo;
-	struct intel_chipset chipset;
+	const struct intel_device_info *info;
 
 	unsigned int BR[20];
 
diff --git a/src/intel_driver.c b/src/intel_driver.c
index da920ef..e76f19e 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -186,37 +186,8 @@ static void PreInitCleanup(ScrnInfoPtr scrn)
 static void intel_check_chipset_option(ScrnInfoPtr scrn)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
-	MessageType from = X_PROBED;
-
-	intel_detect_chipset(scrn,
-			     intel->PciInfo,
-			     &intel->chipset);
-
-	/* Set the Chipset and ChipRev, allowing config file entries to override. */
-	if (intel->pEnt->device->chipset && *intel->pEnt->device->chipset) {
-		scrn->chipset = intel->pEnt->device->chipset;
-		from = X_CONFIG;
-	} else if (intel->pEnt->device->chipID >= 0) {
-		scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-							   intel->pEnt->device->chipID);
-		from = X_CONFIG;
-		xf86DrvMsg(scrn->scrnIndex, X_CONFIG,
-			   "ChipID override: 0x%04X\n",
-			   intel->pEnt->device->chipID);
-		DEVICE_ID(intel->PciInfo) = intel->pEnt->device->chipID;
-	} else {
-		from = X_PROBED;
-		scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-							   DEVICE_ID(intel->PciInfo));
-	}
-
-	if (intel->pEnt->device->chipRev >= 0) {
-		xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipRev override: %d\n",
-			   intel->pEnt->device->chipRev);
-	}
-
-	xf86DrvMsg(scrn->scrnIndex, from, "Chipset: \"%s\"\n",
-		   (scrn->chipset != NULL) ? scrn->chipset : "Unknown i8xx");
+	intel->info =
+		intel_detect_chipset(scrn, intel->pEnt->device, intel->PciInfo);
 }
 
 static Bool I830GetEarlyOptions(ScrnInfoPtr scrn)
diff --git a/src/intel_driver.h b/src/intel_driver.h
index f33d135..31c11f6 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -209,7 +209,7 @@
 #define SUBSYS_ID(p)      (p)->subdevice_id
 #define CHIP_REVISION(p)  (p)->revision
 
-#define INTEL_INFO(intel) ((intel)->chipset.info)
+#define INTEL_INFO(intel) ((intel)->info)
 #define IS_GENx(intel, X) (INTEL_INFO(intel)->gen >= 10*(X) && INTEL_INFO(intel)->gen < 10*((X)+1))
 #define IS_GEN1(intel) IS_GENx(intel, 1)
 #define IS_GEN2(intel) IS_GENx(intel, 2)
@@ -234,18 +234,13 @@
 #define HAS_BLT(pI810) (INTEL_INFO(intel)->gen >= 60)
 
 extern SymTabRec *intel_chipsets;
-
-struct intel_chipset {
-    const char *name;
-    int variant;
-    const struct intel_device_info {
-	    int gen;
-    } *info;
+struct intel_device_info {
+	int gen;
 };
 
-void intel_detect_chipset(ScrnInfoPtr scrn,
-			  struct pci_device *pci,
-			  struct intel_chipset *chipset);
+const struct intel_device_info *
+intel_detect_chipset(ScrnInfoPtr scrn,
+		     EntityInfoPtr ent, struct pci_device *pci);
 
 
 #endif /* INTEL_DRIVER_H */
diff --git a/src/intel_module.c b/src/intel_module.c
index a39affb..845b40e 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -223,29 +223,38 @@ static const struct pci_id_match intel_device_match[] = {
 	{ 0, 0, 0 },
 };
 
-void intel_detect_chipset(ScrnInfoPtr scrn,
-			  struct pci_device *pci,
-			  struct intel_chipset *chipset)
+const struct intel_device_info *
+intel_detect_chipset(ScrnInfoPtr scrn,
+		     EntityInfoPtr ent, struct pci_device *pci)
 {
+	MessageType from = X_PROBED;
+	const char *name = NULL;
 	int i;
 
-	chipset->info = chipset_info;
-	chipset->name = NULL;
+	if (ent->device->chipID >= 0) {
+		xf86DrvMsg(scrn->scrnIndex, from = X_CONFIG,
+			   "ChipID override: 0x%04X\n",
+			   ent->device->chipID);
+		DEVICE_ID(pci) = ent->device->chipID;
+	}
 
 	for (i = 0; intel_chipsets[i].name != NULL; i++) {
 		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
-			chipset->name = intel_chipsets[i].name;
+			name = intel_chipsets[i].name;
 			break;
 		}
 	}
-	if (chipset->name == NULL) {
+	if (name == NULL) {
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
-		chipset->name = "unknown";
+		name = "unknown";
 	} else {
-		xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		xf86DrvMsg(scrn->scrnIndex, from,
 			   "Integrated Graphics Chipset: Intel(R) %s\n",
-			   chipset->name);
+			   name);
 	}
+
+	scrn->chipset = name;
+	return chipset_info;
 }
 
 /*
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index b7212cf..949fd27 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -240,7 +240,6 @@ I810PreInit(ScrnInfoPtr scrn, int flags)
    rgb defaultWeight = { 0, 0, 0 };
    int mem;
    Bool enable;
-   struct intel_chipset chipset;
 
    if (scrn->numEntities != 1)
       return FALSE;
@@ -365,40 +364,14 @@ I810PreInit(ScrnInfoPtr scrn, int flags)
     */
    I810DoDDC(scrn, pI810->pEnt->index);
 
-   intel_detect_chipset(scrn, pI810->PciInfo, &chipset);
-
-   /*
-    * Set the Chipset and ChipRev, allowing config file entries to
-    * override.
-    */
-   if (pI810->pEnt->device->chipset && *pI810->pEnt->device->chipset) {
-      scrn->chipset = pI810->pEnt->device->chipset;
-      from = X_CONFIG;
-   } else if (pI810->pEnt->device->chipID >= 0) {
-      scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-						 pI810->pEnt->device->chipID);
-      from = X_CONFIG;
-      xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipID override: 0x%04X\n",
-		 pI810->pEnt->device->chipID);
-   } else {
-      from = X_PROBED;
-      scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-						 DEVICE_ID(pI810->PciInfo));
-   }
-   if (pI810->pEnt->device->chipRev >= 0) {
-      xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipRev override: %d\n",
-		 pI810->pEnt->device->chipRev);
-   }
-
-   xf86DrvMsg(scrn->scrnIndex, from, "Chipset: \"%s\"\n",
-	      (scrn->chipset != NULL) ? scrn->chipset : "Unknown i810");
+   intel_detect_chipset(scrn, pI810->pEnt, pI810->PciInfo);
 
    pI810->LinearAddr = pI810->PciInfo->regions[0].base_addr;
-   xf86DrvMsg(scrn->scrnIndex, from, "Linear framebuffer at 0x%lX\n",
+   xf86DrvMsg(scrn->scrnIndex, X_PROBED, "Linear framebuffer at 0x%lX\n",
 	      (unsigned long)pI810->LinearAddr);
 
    pI810->MMIOAddr = pI810->PciInfo->regions[1].base_addr;
-   xf86DrvMsg(scrn->scrnIndex, from, "IO registers at addr 0x%lX\n",
+   xf86DrvMsg(scrn->scrnIndex, X_PROBED, "IO registers at addr 0x%lX\n",
 	      (unsigned long)pI810->MMIOAddr);
 
    /* AGP GART support is required.  Don't proceed any further if it isn't
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 91db995..51a8206 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -231,7 +231,7 @@ struct sna {
 
 	EntityInfoPtr pEnt;
 	struct pci_device *PciInfo;
-	struct intel_chipset chipset;
+	const struct intel_device_info *info;
 
 	ScreenBlockHandlerProcPtr BlockHandler;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f51f732..a963d30 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -13706,23 +13706,23 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	no_render_init(sna);
 
 #if !DEBUG_NO_RENDER
-	if (sna->chipset.info->gen >= 80) {
-	} else if (sna->chipset.info->gen >= 70) {
+	if (sna->info->gen >= 80) {
+	} else if (sna->info->gen >= 70) {
 		if ((sna->have_render = gen7_render_init(sna)))
 			backend = "IvyBridge";
-	} else if (sna->chipset.info->gen >= 60) {
+	} else if (sna->info->gen >= 60) {
 		if ((sna->have_render = gen6_render_init(sna)))
 			backend = "SandyBridge";
-	} else if (sna->chipset.info->gen >= 50) {
+	} else if (sna->info->gen >= 50) {
 		if ((sna->have_render = gen5_render_init(sna)))
 			backend = "Ironlake";
-	} else if (sna->chipset.info->gen >= 40) {
+	} else if (sna->info->gen >= 40) {
 		if ((sna->have_render = gen4_render_init(sna)))
 			backend = "Broadwater";
-	} else if (sna->chipset.info->gen >= 30) {
+	} else if (sna->info->gen >= 30) {
 		if ((sna->have_render = gen3_render_init(sna)))
 			backend = "gen3";
-	} else if (sna->chipset.info->gen >= 20) {
+	} else if (sna->info->gen >= 20) {
 		if ((sna->have_render = gen2_render_init(sna)))
 			backend = "gen2";
 	}
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 21e967a..3f7234f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -229,35 +229,8 @@ static void PreInitCleanup(ScrnInfoPtr scrn)
 static void sna_check_chipset_option(ScrnInfoPtr scrn)
 {
 	struct sna *sna = to_sna(scrn);
-	MessageType from = X_PROBED;
-
-	intel_detect_chipset(scrn, sna->PciInfo, &sna->chipset);
-
-	/* Set the Chipset and ChipRev, allowing config file entries to override. */
-	if (sna->pEnt->device->chipset && *sna->pEnt->device->chipset) {
-		scrn->chipset = sna->pEnt->device->chipset;
-		from = X_CONFIG;
-	} else if (sna->pEnt->device->chipID >= 0) {
-		scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-							  sna->pEnt->device->chipID);
-		from = X_CONFIG;
-		xf86DrvMsg(scrn->scrnIndex, X_CONFIG,
-			   "ChipID override: 0x%04X\n",
-			   sna->pEnt->device->chipID);
-		DEVICE_ID(sna->PciInfo) = sna->pEnt->device->chipID;
-	} else {
-		from = X_PROBED;
-		scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
-							   DEVICE_ID(sna->PciInfo));
-	}
-
-	if (sna->pEnt->device->chipRev >= 0) {
-		xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipRev override: %d\n",
-			   sna->pEnt->device->chipRev);
-	}
 
-	xf86DrvMsg(scrn->scrnIndex, from, "Chipset: \"%s\"\n",
-		   (scrn->chipset != NULL) ? scrn->chipset : "Unknown i8xx");
+	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
 }
 
 static Bool sna_get_early_options(ScrnInfoPtr scrn)
@@ -479,8 +452,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (!sna_get_early_options(scrn))
 		return FALSE;
 
-	sna_check_chipset_option(scrn);
-	kgem_init(&sna->kgem, fd, sna->PciInfo, sna->chipset.info->gen);
+	sna->info = intel_detect_chipset(scrn, sna->pEnt, sna->PciInfo);
+	kgem_init(&sna->kgem, fd, sna->PciInfo, sna->info->gen);
 	if (!xf86ReturnOptValBool(sna->Options,
 				  OPTION_RELAXED_FENCING,
 				  sna->kgem.has_relaxed_fencing)) {
diff --git a/src/sna/sna_video_hwmc.c b/src/sna/sna_video_hwmc.c
index 2baa939..b0e8d25 100644
--- a/src/sna/sna_video_hwmc.c
+++ b/src/sna/sna_video_hwmc.c
@@ -78,7 +78,7 @@ static int create_context(ScrnInfoPtr scrn, XvMCContextPtr pContext,
 			contextRec->type = XVMC_I965_MPEG2_MC;
 		contextRec->i965.is_g4x = sna->kgem.gen == 45;
 		contextRec->i965.is_965_q = IS_965_Q(sna);
-		contextRec->i965.is_igdng = IS_GEN5(sna);
+		contextRec->i965.is_igdng = sna->kgem.gen == 50;
 	} else {
 		contextRec->type = XVMC_I915_MPEG2_MC;
 		contextRec->i915.use_phys_addr = 0;
@@ -192,7 +192,7 @@ Bool sna_video_xvmc_setup(struct sna *sna,
 			  XF86VideoAdaptorPtr target)
 {
 	XF86MCAdaptorRec *pAdapt;
-	char *name;
+	const char *name;
 	char buf[64];
 
 	/* Needs KMS support. */
commit e3f6c48d18c316899c71b6fc34971039c6f9e5f8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 14:43:23 2012 +0100

    sna: Refactor PutImage to avoid calling drawable_gc_flags() too early
    
    drawable_gc_flags() asserts that the gc has been moved to the CPU prior
    to its calls so that it can read the reduced raster operation.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 98a3e68..f51f732 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3698,7 +3698,6 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	RegionRec region;
 	int16_t dx, dy;
-	unsigned hint;
 
 	DBG(("%s((%d, %d)x(%d, %d), depth=%d, format=%d)\n",
 	     __FUNCTION__, x, y, w, h, depth, format));
@@ -3727,47 +3726,39 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (priv == NULL) {
 		DBG(("%s: fallback -- unattached(%d, %d, %d, %d)\n",
 		     __FUNCTION__, x, y, w, h));
-hint_and_fallback:
-		hint = (format == XYPixmap ?
-			MOVE_READ | MOVE_WRITE :
-			drawable_gc_flags(drawable, gc, false));
 		goto fallback;
 	}
 
 	RegionTranslate(&region, dx, dy);
 
 	if (FORCE_FALLBACK)
-		goto hint_and_fallback;
+		goto fallback;
 
 	if (wedged(sna))
-		goto hint_and_fallback;
+		goto fallback;
 
 	if (!ACCEL_PUT_IMAGE)
-		goto hint_and_fallback;
+		goto fallback;
 
 	switch (format) {
 	case ZPixmap:
 		if (!PM_IS_SOLID(drawable, gc->planemask))
-			goto hint_and_fallback;
+			goto fallback;
 
 		if (sna_put_zpixmap_blt(drawable, gc, &region,
 					x, y, w, h,
 					bits, PixmapBytePad(w, depth)))
 			return;
-
-		hint = drawable_gc_flags(drawable, gc, false);
 		break;
 
 	case XYBitmap:
 		if (!PM_IS_SOLID(drawable, gc->planemask))
-			goto hint_and_fallback;
+			goto fallback;
 
 		if (sna_put_xybitmap_blt(drawable, gc, &region,
 					 x, y, w, h,
 					 bits))
 			return;
-
-		hint = drawable_gc_flags(drawable, gc, false);
 		break;
 
 	case XYPixmap:
@@ -3775,8 +3766,6 @@ hint_and_fallback:
 					 x, y, w, h, left,
 					 bits))
 			return;
-
-		hint = MOVE_READ | MOVE_WRITE;
 		break;
 
 	default:
@@ -3789,7 +3778,10 @@ fallback:
 
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
-	if (!sna_drawable_move_region_to_cpu(drawable, &region, hint))
+	if (!sna_drawable_move_region_to_cpu(drawable, &region,
+					      format == XYPixmap ?
+					      MOVE_READ | MOVE_WRITE :
+					      drawable_gc_flags(drawable, gc, false)))
 		goto out_gc;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
commit 1af26ea4228a9d7768b475b4f9164d2c7620d4fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 14:39:44 2012 +0100

    sna: Flesh out tiled operations using the BLT
    
    Before enabling the RENDER pipeline for this operation, let's just see
    what is required to fully use the BLT pipeline as well.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9ae8775..98a3e68 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9423,6 +9423,277 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap)
 	return kgem_bo_reference(priv->gpu_bo);
 }
 
+/*
+static bool
+tile(DrawablePtr drawable,
+	struct kgem_bo *bo, struct sna_damage **damage,
+	PixmapPtr tile, const DDXPointRec * const origin, int alu,
+	int n, xRectangle *rect,
+	const BoxRec *extents, unsigned clipped)
+	*/
+
+static bool
+sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
+				 struct kgem_bo *bo, struct sna_damage **damage,
+				 struct kgem_bo *tile_bo, GCPtr gc,
+				 int n, xRectangle *r,
+				 const BoxRec *extents, unsigned clipped)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(drawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	const DDXPointRec * const origin = &gc->patOrg;
+	uint32_t br00, br13;
+	int tx, ty;
+	int16_t dx, dy;
+	uint32_t *b;
+
+	DBG(("%s x %d [(%d, %d)+(%d, %d)...], clipped=%x\n",
+	     __FUNCTION__, n, r->x, r->y, r->width, r->height, clipped));
+
+	br00 = XY_PAT_BLT | 3 << 20;
+	br13 = bo->pitch;
+	if (bo->tiling && sna->kgem.gen >= 40) {
+		br13 >>= 2;
+		br00 |= BLT_DST_TILED;
+	}
+	br13 |= blt_depth(drawable->depth) << 24;
+	br13 |= fill_ROP[gc->alu] << 16;
+
+	kgem_set_mode(&sna->kgem, KGEM_BLT);
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	if (!clipped) {
+		dx += drawable->x;
+		dy += drawable->y;
+
+		if (!kgem_check_batch(&sna->kgem, 6) ||
+		    !kgem_check_reloc(&sna->kgem, 2) ||
+		    !kgem_check_bo_fenced(&sna->kgem, bo)) {
+			_kgem_submit(&sna->kgem);
+			_kgem_set_mode(&sna->kgem, KGEM_BLT);
+		}
+
+		sna_damage_add_rectangles(damage, r, n, dx, dy);
+		do {
+			int n_this_time;
+
+			n_this_time = n;
+			if (6*n_this_time > sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED)
+				n_this_time = (sna->kgem.surface - sna->kgem.nbatch - KGEM_BATCH_RESERVED) / 8;
+			if (2*n_this_time > KGEM_RELOC_SIZE(&sna->kgem) - sna->kgem.nreloc)
+				n_this_time = (KGEM_RELOC_SIZE(&sna->kgem) - sna->kgem.nreloc)/2;
+			assert(n_this_time);
+			n -= n_this_time;
+
+			assert(r->x + dx >= 0);
+			assert(r->y + dy >= 0);
+			assert(r->x + dx + r->width  <= pixmap->drawable.width);
+			assert(r->y + dy + r->height <= pixmap->drawable.height);
+
+			b = sna->kgem.batch + sna->kgem.nbatch;
+			sna->kgem.nbatch += 6*n_this_time;
+			do {
+				tx = (r->x - origin->x) % 8;
+				if (tx < 8)
+					tx = 8 - tx;
+				ty = (r->y - origin->y) % 8;
+				if (ty < 8)
+					ty = 8 - ty;
+
+				b[0] = br00 | tx << 12 | ty << 8;
+				b[1] = br13;
+				b[2] = (r->y + dy) << 16 | (r->x + dx);
+				b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
+				b[4] = kgem_add_reloc(&sna->kgem, b - sna->kgem.batch + 4, bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      I915_GEM_DOMAIN_RENDER |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[5] = kgem_add_reloc(&sna->kgem, b - sna->kgem.batch + 5, tile_bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b += 6;
+				r++;
+			} while (--n_this_time);
+
+			if (!n)
+				break;
+
+			_kgem_submit(&sna->kgem);
+			_kgem_set_mode(&sna->kgem, KGEM_BLT);
+		} while (1);
+	} else {
+		RegionRec clip;
+
+		region_set(&clip, extents);
+		region_maybe_clip(&clip, gc->pCompositeClip);
+		if (!RegionNotEmpty(&clip))
+			goto done;
+
+		if (clip.data == NULL) {
+			const BoxRec *c = &clip.extents;
+			while (n--) {
+				BoxRec box;
+
+				box.x1 = r->x + drawable->x;
+				box.y1 = r->y + drawable->y;
+				box.x2 = bound(box.x1, r->width);
+				box.y2 = bound(box.y1, r->height);
+				r++;
+
+				if (box_intersect(&box, c)) {
+					ty = (box.y1 - drawable->y - origin->y) % 8;
+					if (ty < 0)
+						ty = 8 - ty;
+
+					tx = (box.x1 - drawable->x - origin->x) % 8;
+					if (tx < 0)
+						tx = 8 - tx;
+
+					if (!kgem_check_batch(&sna->kgem, 6) ||
+					    !kgem_check_reloc(&sna->kgem, 2) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo)) {
+						_kgem_submit(&sna->kgem);
+						_kgem_set_mode(&sna->kgem, KGEM_BLT);
+					}
+
+					b = sna->kgem.batch + sna->kgem.nbatch;
+					b[0] = br00 | tx << 12 | ty << 8;
+					b[1] = br13;
+					b[2] = box.y1 << 16 | box.x1;
+					b[3] = box.y2 << 16 | box.x2;
+					b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+							      I915_GEM_DOMAIN_RENDER << 16 |
+							      I915_GEM_DOMAIN_RENDER |
+							      KGEM_RELOC_FENCED,
+							      0);
+					b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, tile_bo,
+							      I915_GEM_DOMAIN_RENDER << 16 |
+							      KGEM_RELOC_FENCED,
+							      0);
+					sna->kgem.nbatch += 6;
+				}
+			}
+		} else {
+			const BoxRec * const clip_start = RegionBoxptr(&clip);
+			const BoxRec * const clip_end = clip_start + clip.data->numRects;
+			const BoxRec *c;
+
+			do {
+				BoxRec box;
+
+				box.x1 = r->x + drawable->x;
+				box.y1 = r->y + drawable->y;
+				box.x2 = bound(box.x1, r->width);
+				box.y2 = bound(box.y1, r->height);
+				r++;
+
+				c = find_clip_box_for_y(clip_start,
+							clip_end,
+							box.y1);
+				while (c != clip_end) {
+					BoxRec bb;
+
+					if (box.y2 <= c->y1)
+						break;
+
+					bb = box;
+					if (box_intersect(&bb, c++)) {
+						ty = (bb.y1 - drawable->y - origin->y) % 8;
+						if (ty < 0)
+							ty = 8 - ty;
+
+						tx = (bb.x1 - drawable->x - origin->x) % 8;
+						if (tx < 0)
+							tx = 8 - tx;
+
+						if (!kgem_check_batch(&sna->kgem, 6) ||
+						    !kgem_check_reloc(&sna->kgem, 2) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo)) {
+							_kgem_submit(&sna->kgem);
+							_kgem_set_mode(&sna->kgem, KGEM_BLT);
+						}
+
+						b = sna->kgem.batch + sna->kgem.nbatch;
+						b[0] = br00 | tx << 12 | ty << 8;
+						b[1] = br13;
+						b[2] = (bb.y1+dy) << 16 | (bb.x1+dx);
+						b[3] = (bb.y2+dy) << 16 | (bb.x2+dx);
+						b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
+								      I915_GEM_DOMAIN_RENDER << 16 |
+								      I915_GEM_DOMAIN_RENDER |
+								      KGEM_RELOC_FENCED,
+								      0);
+						b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5, tile_bo,
+								      I915_GEM_DOMAIN_RENDER << 16 |
+								      KGEM_RELOC_FENCED,
+								      0);
+						sna->kgem.nbatch += 6;
+					}
+				}
+			} while (--n);
+		}
+	}
+done:
+	assert_pixmap_damage(pixmap);
+	return true;
+}
+
+static bool
+sna_poly_fill_rect_tiled_nxm_blt(DrawablePtr drawable,
+				 struct kgem_bo *bo,
+				 struct sna_damage **damage,
+				 GCPtr gc, int n, xRectangle *rect,
+				 const BoxRec *extents, unsigned clipped)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(drawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	PixmapPtr tile = gc->tile.pixmap;
+	struct kgem_bo *upload;
+	int w, h, cpp;
+	void *ptr;
+	bool ret;
+
+	DBG(("%s: %dx%d\n", __FUNCTION__,
+	     tile->drawable.width, tile->drawable.height));
+
+	if (!sna_pixmap_move_to_cpu(tile, MOVE_READ))
+		return false;
+
+	upload = kgem_create_buffer(&sna->kgem, 8*tile->drawable.bitsPerPixel,
+				    KGEM_BUFFER_WRITE_INPLACE,
+				    &ptr);
+	if (upload == NULL)
+		return false;
+
+	assert(tile->drawable.height && tile->drawable.height <= 8);
+	assert(tile->drawable.width && tile->drawable.width <= 8);
+
+	cpp = tile->drawable.bitsPerPixel/8;
+	for (h = 0; h < tile->drawable.height; h++) {
+		uint8_t *src = (uint8_t *)tile->devPrivate.ptr + tile->devKind*h;
+		uint8_t *dst = (uint8_t *)ptr + 8*cpp*h;
+
+		w = tile->drawable.width*cpp;
+		memcpy(dst, src, w);
+		while (w < 8*cpp) {
+			memcpy(dst+w, dst, w);
+			w *= 2;
+		}
+	}
+	while (h < 8) {
+		memcpy((uint8_t*)ptr + h*w, ptr, h*w);
+		h *= 2;
+	}
+
+	ret = sna_poly_fill_rect_tiled_8x8_blt(drawable, bo, damage,
+					       upload, gc, n, rect,
+					       extents, clipped);
+
+	kgem_bo_destroy(&sna->kgem, upload);
+	return ret;
+}
+
 static bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
@@ -9459,6 +9730,25 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	 * RENDER.
 	 */
 
+	if ((tile->drawable.width | tile->drawable.height) == 8) {
+		bool ret;
+
+		tile_bo = sna_pixmap_get_source_bo(tile);
+		ret = sna_poly_fill_rect_tiled_8x8_blt(drawable, bo, damage,
+						       tile_bo, gc, n, rect,
+						       extents, clipped);
+		kgem_bo_destroy(&sna->kgem, tile_bo);
+
+		return ret;
+	}
+
+	if ((tile->drawable.width | tile->drawable.height) <= 0xc &&
+	    is_power_of_two(tile->drawable.width) &&
+	    is_power_of_two(tile->drawable.height))
+		return sna_poly_fill_rect_tiled_nxm_blt(drawable, bo, damage,
+							gc, n, rect,
+							extents, clipped);
+
 	tile_bo = sna_pixmap_get_source_bo(tile);
 	if (tile_bo == NULL) {
 		DBG(("%s: unable to move tile go GPU, fallback\n",
diff --git a/src/sna/sna_reg.h b/src/sna/sna_reg.h
index 551d64b..2628236 100644
--- a/src/sna/sna_reg.h
+++ b/src/sna/sna_reg.h
@@ -51,6 +51,7 @@
 #define XY_TEXT_IMMEDIATE_BLT		((2<<29)|(0x31<<22)|(1<<16))
 #define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
 #define SRC_COPY_BLT_CMD		((2<<29)|(0x43<<22)|0x4)
+#define XY_PAT_BLT			((2<<29)|(0x51<<22)|0x4)
 #define XY_PAT_BLT_IMMEDIATE		((2<<29)|(0x72<<22))
 #define XY_MONO_PAT			((0x2<<29)|(0x52<<22)|0x7)
 #define XY_MONO_SRC_COPY		((0x2<<29)|(0x54<<22)|(0x6))
commit ac182a006732525a921a9c539e5ebfb537ad3b52
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 23 11:05:05 2012 +0100

    sna: Hold a reference to the full stipple pattern for repeated tiles
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5f14f4e..9ae8775 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10440,6 +10440,7 @@ sna_poly_fill_rect_stippled_n_box__imm(struct sna *sna,
 static void
 sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 				  struct kgem_bo *bo,
+				  struct kgem_bo **tile,
 				  uint32_t br00, uint32_t br13,
 				  const GC *gc,
 				  const BoxRec *box,
@@ -10471,6 +10472,7 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 		row = oy * stride;
 		for (x1 = box->x1; x1 < box->x2; x1 = x2) {
 			int bx1, bx2, bw, bh, len, ox;
+			bool use_tile;
 
 			x2 = box->x2;
 			ox = (x1 - origin->x) % w;
@@ -10483,11 +10485,14 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 				x2 = x1 + bx2-ox;
 			}
 
-			DBG(("%s: box((%d, %d)x(%d, %d)) origin=(%d, %d), pat=(%d, %d), up=(%d, %d), stipple=%dx%d\n",
+			use_tile = y2-y1 == h && x2-x1 == w;
+
+			DBG(("%s: box((%d, %d)x(%d, %d)) origin=(%d, %d), pat=(%d, %d), up=(%d, %d), stipple=%dx%d, full tile?=%d\n",
 			     __FUNCTION__,
 			     x1, y1, x2-x1, y2-y1,
 			     origin->x, origin->y,
-			     ox, oy, bx1, bx2, w, h));
+			     ox, oy, bx1, bx2, w, h,
+			     use_tile));
 
 			bw = (bx2 - bx1 + 7)/8;
 			bw = ALIGN(bw, 2);
@@ -10504,7 +10509,7 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 
 			b = sna->kgem.batch + sna->kgem.nbatch;
 
-			if (len <= 128) {
+			if (!use_tile && len <= 128) {
 				uint8_t *dst, *src;
 
 				b[0] = XY_MONO_SRC_COPY_IMM;
@@ -10540,15 +10545,20 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 					src += len;
 				} while (--bh);
 			} else {
+				bool has_tile = use_tile && *tile;
 				struct kgem_bo *upload;
 				uint8_t *dst, *src;
 				void *ptr;
 
-				upload = kgem_create_buffer(&sna->kgem, bw*bh,
-							    KGEM_BUFFER_WRITE_INPLACE,
-							    &ptr);
-				if (!upload)
-					return;
+				if (has_tile) {
+					upload = kgem_bo_reference(*tile);
+				} else {
+					upload = kgem_create_buffer(&sna->kgem, bw*bh,
+								    KGEM_BUFFER_WRITE_INPLACE,
+								    &ptr);
+					if (!upload)
+						return;
+				}
 
 				b = sna->kgem.batch + sna->kgem.nbatch;
 				b[0] = br00 | (ox & 7) << 17;
@@ -10571,20 +10581,24 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 
 				sna->kgem.nbatch += 8;
 
-				dst = ptr;
-				len = stride;
-				src = gc->stipple->devPrivate.ptr;
-				src += row + (ox >> 3);
-				len -= bw;
-				do {
-					int i = bw;
+				if (!has_tile) {
+					dst = ptr;
+					len = stride;
+					src = gc->stipple->devPrivate.ptr;
+					src += row + (ox >> 3);
+					len -= bw;
 					do {
-						*dst++ = byte_reverse(*src++);
-						*dst++ = byte_reverse(*src++);
-						i -= 2;
-					} while (i);
-					src += len;
-				} while (--bh);
+						int i = bw;
+						do {
+							*dst++ = byte_reverse(*src++);
+							*dst++ = byte_reverse(*src++);
+							i -= 2;
+						} while (i);
+						src += len;
+					} while (--bh);
+					if (use_tile)
+						*tile = kgem_bo_reference(upload);
+				}
 
 				kgem_bo_destroy(&sna->kgem, upload);
 			}
@@ -10740,6 +10754,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	DDXPointRec origin = gc->patOrg;
+	struct kgem_bo *tile = NULL;
 	int16_t dx, dy;
 	uint32_t br00, br13;
 
@@ -10784,7 +10799,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 			box.x2 = box.x1 + r->width;
 			box.y2 = box.y1 + r->height;
 
-			sna_poly_fill_rect_stippled_n_box(sna, bo,
+			sna_poly_fill_rect_stippled_n_box(sna, bo, &tile,
 							  br00, br13, gc,
 							  &box, &origin);
 			r++;
@@ -10822,7 +10837,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 				box.x1 += dx; box.x2 += dx;
 				box.y1 += dy; box.y2 += dy;
 
-				sna_poly_fill_rect_stippled_n_box(sna, bo,
+				sna_poly_fill_rect_stippled_n_box(sna, bo, &tile,
 								  br00, br13, gc,
 								  &box, &origin);
 			} while (--n);
@@ -10861,7 +10876,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 					box.x1 += dx; box.x2 += dx;
 					box.y1 += dy; box.y2 += dy;
 
-					sna_poly_fill_rect_stippled_n_box(sna, bo,
+					sna_poly_fill_rect_stippled_n_box(sna, bo, &tile,
 									  br00, br13, gc,
 									  &box, &origin);
 				}
@@ -10870,6 +10885,8 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 	}
 
 	assert_pixmap_damage(pixmap);
+	if (tile)
+		kgem_bo_destroy(&sna->kgem, tile);
 	sna->blt_state.fill_bo = 0;
 	return true;
 }
commit 83f683b47063eab8cfb5037d02133dd977c3fc25
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 22 23:20:23 2012 +0100

    2.20.1 release
    
    A good brown paper bag bug release for SNA.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 8e30d9e..a6819d4 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,43 @@
+Release 2.20.1 (2012-07-22)
+===========================
+A week in, grab the brown paper bags, for it is time to reveal a couple
+of critical bugs that spoilt the 2.20.0 release.
+
+Firstly we have the restoration of DRI for i810. I am sure that the
+solitary user will be overjoyed in a couple of years when a new xserver
+is forced upon him. That enjoyment will be short-lived when as no actual
+acceleration remains, not even shadow, for the chipset.
+
+Perhaps a little more wildly felt, I hope!, will be that the SNA
+fallbacks were broken on 64-bit machines if they required clipping. One
+little misplaced cast of a pointer, and the screen is filled with
+corruption.
+
+Among the other tweaks this week:
+
+* A bug affecting gen4 handling of trapezoids was fixed, and CPU
+  overhead reduced.
+  https://bugs.freedesktop.org/show_bug.cgi?id=52158
+
+* A fix for a bug causing corruption of a DRI2 unredirected client
+  window that was resized whilst under a compositor.
+
+* Support for snoopable buffers on non-LLC architectures, coming to
+  a future kernel. The aim to accelerate transfers between the CPU
+  and the GPU, in particular to dramatically improve readback
+  performance, and to further minimise clflushes.
+
+* Improvement to the composite performance on GT2 SandyBridge and
+  IvyBridge devices, in particular the render copy is significantly
+  improved.
+
+* Improved handling for when acceleration is disabled, including
+  permitting DRI2 to remain supported even if the X server believes
+  the GPU wedged.
+
+* Shadow support was dropped from UXA as it was neither complete nor
+  correct, use SNA instead.
+
 Release 2.12.0 (2012-07-15)
 ===========================
 First the big news, a new acceleration method that aims to be faster and
diff --git a/configure.ac b/configure.ac
index 45157a8..3cdacdd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.20.0],
+        [2.20.1],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 9402bdcc13f7e96dfe527ff4a3da8d13a7870a02
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 22 22:14:52 2012 +0100

    sna/glyphs: Also discard the glyph mask for bitmaps with an opaque source
    
    Though I expect all such glyphs to be caught by the non-overlapping
    checks...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 46fbf8d..918b51c 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1248,6 +1248,44 @@ out:
 	return format;
 }
 
+static bool can_discard_mask(uint8_t op, PicturePtr src, PictFormatPtr mask,
+			     int nlist, GlyphListPtr list, GlyphPtr *glyphs)
+{
+	PictFormatPtr g;
+	uint32_t color;
+
+	if (nlist == 1 && list->len == 1)
+		return true;
+
+	if (!op_is_bounded(op))
+		return false;
+
+	/* No glyphs overlap and we are not performing a mask conversion. */
+	g = glyphs_format(nlist, list, glyphs);
+	if (mask == g)
+		return true;
+
+	/* Otherwise if the glyphs are all bitmaps and we have an
+	 * opaque source we can also render directly to the dst.
+	 */
+	if (g == NULL) {
+		while (nlist--) {
+			if (list->format->depth != 1)
+				return false;
+
+			list++;
+		}
+	} else {
+		if (g->depth != 1)
+			return false;
+	}
+
+	if (!sna_picture_is_solid(src, &color))
+		return false;
+
+	return color >> 24 == 0xff;
+}
+
 #if HAS_PIXMAN_GLYPHS
 static void
 glyphs_fallback(CARD8 op,
@@ -1309,8 +1347,7 @@ glyphs_fallback(CARD8 op,
 	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
 
 	if (mask_format &&
-	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
-	    mask_format == glyphs_format(nlist, list, glyphs))
+	    can_discard_mask(op, src, mask_format, nlist, list, glyphs))
 		mask_format = NULL;
 
 	cache = sna->render.glyph_cache;
@@ -1674,8 +1711,7 @@ sna_glyphs(CARD8 op,
 
 	/* Try to discard the mask for non-overlapping glyphs */
 	if (mask && dst->pCompositeClip->data == NULL &&
-	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
-	    mask == glyphs_format(nlist, list, glyphs)) {
+	    can_discard_mask(op, src, mask, nlist, list, glyphs)) {
 		DBG(("%s: discarding mask\n", __FUNCTION__));
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
commit b315e0ebb75d8391ebef7ebe53741a5e33c968bb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 22 20:07:38 2012 +0100

    sna: Tweak the fallback hints for XYPixmap PutImage
    
    As the fallback uses a multiple-pass algorithm updating one plane at a
    time, we wish to prepare the fallback surface for reads.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e881051..5f14f4e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3698,6 +3698,7 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	RegionRec region;
 	int16_t dx, dy;
+	unsigned hint;
 
 	DBG(("%s((%d, %d)x(%d, %d), depth=%d, format=%d)\n",
 	     __FUNCTION__, x, y, w, h, depth, format));
@@ -3726,39 +3727,47 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (priv == NULL) {
 		DBG(("%s: fallback -- unattached(%d, %d, %d, %d)\n",
 		     __FUNCTION__, x, y, w, h));
+hint_and_fallback:
+		hint = (format == XYPixmap ?
+			MOVE_READ | MOVE_WRITE :
+			drawable_gc_flags(drawable, gc, false));
 		goto fallback;
 	}
 
 	RegionTranslate(&region, dx, dy);
 
 	if (FORCE_FALLBACK)
-		goto fallback;
+		goto hint_and_fallback;
 
 	if (wedged(sna))
-		goto fallback;
+		goto hint_and_fallback;
 
 	if (!ACCEL_PUT_IMAGE)
-		goto fallback;
+		goto hint_and_fallback;
 
 	switch (format) {
 	case ZPixmap:
 		if (!PM_IS_SOLID(drawable, gc->planemask))
-			goto fallback;
+			goto hint_and_fallback;
 
 		if (sna_put_zpixmap_blt(drawable, gc, &region,
 					x, y, w, h,
 					bits, PixmapBytePad(w, depth)))
 			return;
+
+		hint = drawable_gc_flags(drawable, gc, false);
 		break;
 
 	case XYBitmap:
 		if (!PM_IS_SOLID(drawable, gc->planemask))
-			goto fallback;
+			goto hint_and_fallback;
 
 		if (sna_put_xybitmap_blt(drawable, gc, &region,
 					 x, y, w, h,
 					 bits))
 			return;
+
+		hint = drawable_gc_flags(drawable, gc, false);
 		break;
 
 	case XYPixmap:
@@ -3766,10 +3775,12 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 					 x, y, w, h, left,
 					 bits))
 			return;
+
+		hint = MOVE_READ | MOVE_WRITE;
 		break;
 
 	default:
-		break;
+		return;
 	}
 
 fallback:
@@ -3778,8 +3789,7 @@ fallback:
 
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
-	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc, false)))
+	if (!sna_drawable_move_region_to_cpu(drawable, &region, hint))
 		goto out_gc;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
commit 8acaf2693e176a92993a498683f121cfe0343fd4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 22 12:28:34 2012 +0100

    sna: Promote tiled operations to the GPU if the tile is already on the GPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f39052f..e881051 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3993,6 +3993,21 @@ out:
 		free(box);
 }
 
+static inline bool
+sna_pixmap_is_gpu(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+
+	if (priv == NULL || priv->clear)
+		return false;
+
+	if (DAMAGE_IS_ALL(priv->gpu_damage) ||
+	    (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
+		return true;
+
+	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
+}
+
 static int
 source_prefer_gpu(struct sna_pixmap *priv)
 {
@@ -4001,6 +4016,11 @@ source_prefer_gpu(struct sna_pixmap *priv)
 		return 0;
 	}
 
+	if (priv->clear) {
+		DBG(("%s: source is clear, don't force use of GPU\n", __FUNCTION__));
+		return 0;
+	}
+
 	if (priv->gpu_damage) {
 		DBG(("%s: source has gpu damage, force gpu\n", __FUNCTION__));
 		return PREFER_GPU | FORCE_GPU;
@@ -11014,6 +11034,12 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 			priv->cpu = false;
 	}
 
+	/* If the source is already on the GPU, keep the operation on the GPU */
+	if (gc->fillStyle == FillTiled) {
+		if (!gc->tileIsPixel && sna_pixmap_is_gpu(gc->tile.pixmap))
+			hint |= PREFER_GPU | FORCE_GPU;
+	}
+
 	bo = sna_drawable_use_bo(draw, hint, &region.extents, &damage);
 	if (bo == NULL)
 		goto fallback;
commit 7d4a3e371beea65bf66e54ae13789d6d5ca91f8b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 22 11:19:13 2012 +0100

    sna: Use an upload buffer for large stipples
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0b0a304..f39052f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3519,9 +3519,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		} while (--bh);
 
 		b = sna->kgem.batch + sna->kgem.nbatch;
-		b[0] = XY_MONO_SRC_COPY;
-		if (drawable->bitsPerPixel == 32)
-			b[0] |= 3 << 20;
+		b[0] = XY_MONO_SRC_COPY | 3 << 20;
 		b[0] |= ((box->x1 - x) & 7) << 17;
 		b[1] = bo->pitch;
 		if (sna->kgem.gen >= 40 && bo->tiling) {
@@ -3649,9 +3647,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			} while (--bh);
 
 			b = sna->kgem.batch + sna->kgem.nbatch;
-			b[0] = XY_FULL_MONO_PATTERN_MONO_SRC_BLT;
-			if (drawable->bitsPerPixel == 32)
-				b[0] |= 3 << 20;
+			b[0] = XY_FULL_MONO_PATTERN_MONO_SRC_BLT | 3 << 20;
 			b[0] |= ((box->x1 - x) & 7) << 17;
 			b[1] = bo->pitch;
 			if (sna->kgem.gen >= 40 && bo->tiling) {
@@ -5852,9 +5848,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	get_drawable_deltas(drawable, dst_pixmap, &dx, &dy);
 	assert_pixmap_contains_boxes(dst_pixmap, box, n, dx, dy);
 
-	br00 = XY_MONO_SRC_COPY;
-	if (drawable->bitsPerPixel == 32)
-		br00 |= 3 << 20;
+	br00 = XY_MONO_SRC_COPY | 3 << 20;
 	br13 = arg->bo->pitch;
 	if (sna->kgem.gen >= 40 && arg->bo->tiling) {
 		br00 |= BLT_DST_TILED;
@@ -9667,9 +9661,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 		unsigned px = (0 - gc->patOrg.x - dx) & 7;
 		unsigned py = (0 - gc->patOrg.y - dy) & 7;
 		DBG(("%s: pat offset (%d, %d)\n", __FUNCTION__ ,px, py));
-		br00 = XY_MONO_PAT | px << 12 | py << 8;
-		if (drawable->bitsPerPixel == 32)
-			br00 |= 3 << 20;
+		br00 = XY_MONO_PAT | px << 12 | py << 8 | 3 << 20;
 
 		br13 = bo->pitch;
 		if (sna->kgem.gen >= 40 && bo->tiling) {
@@ -10327,12 +10319,12 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 }
 
 static void
-sna_poly_fill_rect_stippled_n_box(struct sna *sna,
-				  struct kgem_bo *bo,
-				  uint32_t br00, uint32_t br13,
-				  GCPtr gc,
-				  const BoxRec *box,
-				  const DDXPointRec *origin)
+sna_poly_fill_rect_stippled_n_box__imm(struct sna *sna,
+				       struct kgem_bo *bo,
+				       uint32_t br00, uint32_t br13,
+				       const GC *gc,
+				       const BoxRec *box,
+				       const DDXPointRec *origin)
 {
 	int x1, x2, y1, y2;
 	uint32_t *b;
@@ -10415,6 +10407,299 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 	}
 }
 
+static void
+sna_poly_fill_rect_stippled_n_box(struct sna *sna,
+				  struct kgem_bo *bo,
+				  uint32_t br00, uint32_t br13,
+				  const GC *gc,
+				  const BoxRec *box,
+				  const DDXPointRec *origin)
+{
+	int x1, x2, y1, y2;
+	int w = gc->stipple->drawable.width;
+	int h = gc->stipple->drawable.height;
+	int stride = gc->stipple->devKind;
+	uint32_t *b;
+
+	if ((((box->y2-box->y1) | (box->x2-box->x1)) & ~31) == 0) {
+		br00 = XY_MONO_SRC_COPY_IMM |(br00 & (BLT_DST_TILED | 3 << 20));
+		sna_poly_fill_rect_stippled_n_box__imm(sna, bo,
+						       br00, br13, gc,
+						       box, origin);
+		return;
+	}
+
+	for (y1 = box->y1; y1 < box->y2; y1 = y2) {
+		int row, oy = (y1 - origin->y) % gc->stipple->drawable.height;
+		if (oy < 0)
+			oy += h;
+
+		y2 = box->y2;
+		if (y2 - y1 > h - oy)
+			y2 = y1 + h - oy;
+
+		row = oy * stride;
+		for (x1 = box->x1; x1 < box->x2; x1 = x2) {
+			int bx1, bx2, bw, bh, len, ox;
+
+			x2 = box->x2;
+			ox = (x1 - origin->x) % w;
+			if (ox < 0)
+				ox += w;
+			bx1 = ox & ~7;
+			bx2 = ox + (x2 - x1);
+			if (bx2 > w) {
+				bx2 = w;
+				x2 = x1 + bx2-ox;
+			}
+
+			DBG(("%s: box((%d, %d)x(%d, %d)) origin=(%d, %d), pat=(%d, %d), up=(%d, %d), stipple=%dx%d\n",
+			     __FUNCTION__,
+			     x1, y1, x2-x1, y2-y1,
+			     origin->x, origin->y,
+			     ox, oy, bx1, bx2, w, h));
+
+			bw = (bx2 - bx1 + 7)/8;
+			bw = ALIGN(bw, 2);
+			bh = y2 - y1;
+
+			len = bw*bh;
+			len = ALIGN(len, 8) / 4;
+			if (!kgem_check_batch(&sna->kgem, 7+len) ||
+			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
+			    !kgem_check_reloc(&sna->kgem, 2)) {
+				_kgem_submit(&sna->kgem);
+				_kgem_set_mode(&sna->kgem, KGEM_BLT);
+			}
+
+			b = sna->kgem.batch + sna->kgem.nbatch;
+
+			if (len <= 128) {
+				uint8_t *dst, *src;
+
+				b[0] = XY_MONO_SRC_COPY_IMM;
+				b[0] |= (br00 & (BLT_DST_TILED | 3 << 20));
+				b[0] |= (ox & 7) << 17;
+				b[0] |= (5 + len);
+				b[1] = br13;
+				b[2] = y1 << 16 | x1;
+				b[3] = y2 << 16 | x2;
+				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+						      bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      I915_GEM_DOMAIN_RENDER |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[5] = gc->bgPixel;
+				b[6] = gc->fgPixel;
+
+				sna->kgem.nbatch += 7 + len;
+
+				dst = (uint8_t *)&b[7];
+				len = gc->stipple->devKind;
+				src = gc->stipple->devPrivate.ptr;
+				src += oy*len + ox/8;
+				len -= bw;
+				do {
+					int i = bw;
+					do {
+						*dst++ = byte_reverse(*src++);
+						*dst++ = byte_reverse(*src++);
+						i -= 2;
+					} while (i);
+					src += len;
+				} while (--bh);
+			} else {
+				struct kgem_bo *upload;
+				uint8_t *dst, *src;
+				void *ptr;
+
+				upload = kgem_create_buffer(&sna->kgem, bw*bh,
+							    KGEM_BUFFER_WRITE_INPLACE,
+							    &ptr);
+				if (!upload)
+					return;
+
+				b = sna->kgem.batch + sna->kgem.nbatch;
+				b[0] = br00 | (ox & 7) << 17;
+				b[1] = br13;
+				b[2] = y1 << 16 | x1;
+				b[3] = y2 << 16 | x2;
+				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+						      bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      I915_GEM_DOMAIN_RENDER |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5,
+						      upload,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[6] = gc->bgPixel;
+				b[7] = gc->fgPixel;
+
+				sna->kgem.nbatch += 8;
+
+				dst = ptr;
+				len = stride;
+				src = gc->stipple->devPrivate.ptr;
+				src += row + (ox >> 3);
+				len -= bw;
+				do {
+					int i = bw;
+					do {
+						*dst++ = byte_reverse(*src++);
+						*dst++ = byte_reverse(*src++);
+						i -= 2;
+					} while (i);
+					src += len;
+				} while (--bh);
+
+				kgem_bo_destroy(&sna->kgem, upload);
+			}
+		}
+	}
+}
+
+static bool
+sna_poly_fill_rect_stippled_n_blt__imm(DrawablePtr drawable,
+				       struct kgem_bo *bo,
+				       struct sna_damage **damage,
+				       GCPtr gc, int n, xRectangle *r,
+				       const BoxRec *extents, unsigned clipped)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(drawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	DDXPointRec origin = gc->patOrg;
+	int16_t dx, dy;
+	uint32_t br00, br13;
+
+	DBG(("%s: upload (%d, %d), (%d, %d), origin (%d, %d), clipped=%d, alu=%d, opaque=%d\n", __FUNCTION__,
+	     extents->x1, extents->y1,
+	     extents->x2, extents->y2,
+	     origin.x, origin.y,
+	     clipped, gc->alu, gc->fillStyle == FillOpaqueStippled));
+
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	kgem_set_mode(&sna->kgem, KGEM_BLT);
+
+	br00 = XY_MONO_SRC_COPY_IMM | 3 << 20;
+	br13 = bo->pitch;
+	if (sna->kgem.gen >= 40 && bo->tiling) {
+		br00 |= BLT_DST_TILED;
+		br13 >>= 2;
+	}
+	br13 |= (gc->fillStyle == FillStippled) << 29;
+	br13 |= blt_depth(drawable->depth) << 24;
+	br13 |= copy_ROP[gc->alu] << 16;
+
+	origin.x += dx + drawable->x;
+	origin.y += dy + drawable->y;
+
+	if (!clipped) {
+		dx += drawable->x;
+		dy += drawable->y;
+
+		sna_damage_add_rectangles(damage, r, n, dx, dy);
+		do {
+			BoxRec box;
+
+			box.x1 = r->x + dx;
+			box.y1 = r->y + dy;
+			box.x2 = box.x1 + r->width;
+			box.y2 = box.y1 + r->height;
+
+			sna_poly_fill_rect_stippled_n_box__imm(sna, bo,
+							       br00, br13, gc,
+							       &box, &origin);
+			r++;
+		} while (--n);
+	} else {
+		RegionRec clip;
+
+		region_set(&clip, extents);
+		region_maybe_clip(&clip, gc->pCompositeClip);
+		if (!RegionNotEmpty(&clip)) {
+			DBG(("%s: all clipped\n", __FUNCTION__));
+			return true;
+		}
+
+		if (clip.data == NULL) {
+			DBG(("%s: clipped to extents ((%d, %d), (%d, %d))\n",
+			     __FUNCTION__,
+			     clip.extents.x1, clip.extents.y1,
+			     clip.extents.x2, clip.extents.y2));
+			do {
+				BoxRec box;
+
+				box.x1 = r->x + drawable->x;
+				box.x2 = bound(box.x1, r->width);
+				box.y1 = r->y + drawable->y;
+				box.y2 = bound(box.y1, r->height);
+				r++;
+
+				DBG(("%s: box (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     box.x1, box.y1, box.x2, box.y2));
+				if (!box_intersect(&box, &clip.extents))
+					continue;
+
+				box.x1 += dx; box.x2 += dx;
+				box.y1 += dy; box.y2 += dy;
+
+				sna_poly_fill_rect_stippled_n_box__imm(sna, bo,
+								       br00, br13, gc,
+								       &box, &origin);
+			} while (--n);
+		} else {
+			const BoxRec * const clip_start = RegionBoxptr(&clip);
+			const BoxRec * const clip_end = clip_start + clip.data->numRects;
+			const BoxRec *c;
+
+			DBG(("%s: clipped to boxes: start((%d, %d), (%d, %d)); end=((%d, %d), (%d, %d))\n", __FUNCTION__,
+			     clip_start->x1, clip_start->y1,
+			     clip_start->x2, clip_start->y2,
+			     clip_end->x1, clip_end->y1,
+			     clip_end->x2, clip_end->y2));
+			do {
+				BoxRec unclipped;
+
+				unclipped.x1 = r->x + drawable->x;
+				unclipped.x2 = bound(unclipped.x1, r->width);
+				unclipped.y1 = r->y + drawable->y;
+				unclipped.y2 = bound(unclipped.y1, r->height);
+				r++;
+
+				c = find_clip_box_for_y(clip_start,
+							clip_end,
+							unclipped.y1);
+				while (c != clip_end) {
+					BoxRec box;
+
+					if (unclipped.y2 <= c->y1)
+						break;
+
+					box = unclipped;
+					if (!box_intersect(&box, c++))
+						continue;
+
+					box.x1 += dx; box.x2 += dx;
+					box.y1 += dy; box.y2 += dy;
+
+					sna_poly_fill_rect_stippled_n_box__imm(sna, bo,
+									       br00, br13, gc,
+									       &box, &origin);
+				}
+			} while (--n);
+		}
+	}
+
+	assert_pixmap_damage(pixmap);
+	sna->blt_state.fill_bo = 0;
+	return true;
+}
+
 static bool
 sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 				  struct kgem_bo *bo,
@@ -10434,14 +10719,16 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 	     origin.x, origin.y,
 	     clipped, gc->alu, gc->fillStyle == FillOpaqueStippled));
 
-	if (gc->stipple->drawable.width > 32 ||
-	    gc->stipple->drawable.height > 32)
-		return false;
+	if (((gc->stipple->drawable.width | gc->stipple->drawable.height) & ~31) == 0)
+		return sna_poly_fill_rect_stippled_n_blt__imm(drawable,
+							      bo, damage,
+							      gc, n, r,
+							      extents, clipped);
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 
-	br00 = XY_MONO_SRC_COPY_IMM | 3 << 20;
+	br00 = XY_MONO_SRC_COPY | 3 << 20;
 	br13 = bo->pitch;
 	if (sna->kgem.gen >= 40 && bo->tiling) {
 		br00 |= BLT_DST_TILED;
@@ -12165,9 +12452,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		} while (--bh);
 
 		b = sna->kgem.batch + sna->kgem.nbatch;
-		b[0] = XY_MONO_SRC_COPY;
-		if (drawable->bitsPerPixel == 32)
-			b[0] |= 3 << 20;
+		b[0] = XY_MONO_SRC_COPY | 3 << 20;
 		b[0] |= ((box->x1 - region->extents.x1) & 7) << 17;
 		b[1] = bo->pitch;
 		if (sna->kgem.gen >= 40 && bo->tiling) {
commit 40e0cf32a25e43e16184b2af87a1e1abeb8e4052
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 19:43:23 2012 +0100

    sna/dri: We fail at predicting the flip frame
    
    Simply report the values from the kernel, and transfer the blame...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index cfb9d98..9698247 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -71,7 +71,6 @@ struct sna_dri_frame_event {
 	DrawablePtr draw;
 	ClientPtr client;
 	enum frame_event_type type;
-	unsigned frame;
 	int pipe;
 	int count;
 
@@ -1300,18 +1299,6 @@ static void sna_dri_flip_event(struct sna *sna,
 	/* We assume our flips arrive in order, so we don't check the frame */
 	switch (flip->type) {
 	case DRI2_FLIP:
-		/* Deliver cached msc, ust from reference crtc */
-		/* Check for too small vblank count of pageflip completion,
-		 * taking wraparound * into account. This usually means some
-		 * defective kms pageflip completion, causing wrong (msc, ust)
-		 * return values and possible visual corruption.
-		 */
-		if (flip->fe_frame < flip->frame &&
-		    flip->frame - flip->fe_frame < 5) {
-			/* All-0 values signal timestamping failure. */
-			flip->fe_frame = flip->fe_tv_sec = flip->fe_tv_usec = 0;
-		}
-
 		DBG(("%s: flip complete\n", __FUNCTION__));
 		DRI2SwapComplete(flip->client, draw,
 				 flip->fe_frame,
@@ -1596,8 +1583,6 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			sna_dri_frame_event_info_free(sna, draw, info);
 			return false;
 		}
-
-		info->frame = *target_msc;
 	}
 
 	return true;
@@ -1812,7 +1797,6 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		     (int)*target_msc,
 		     (int)divisor));
 
-		info->frame = *target_msc;
 		info->type = DRI2_SWAP;
 
 		vbl.request.type =
@@ -1861,7 +1845,6 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		goto blit_fallback;
 
 	*target_msc = vbl.reply.sequence;
-	info->frame = *target_msc;
 	return TRUE;
 
 blit_fallback:
@@ -2102,7 +2085,6 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 		if (sna_wait_vblank(sna, &vbl))
 			goto out_free_info;
 
-		info->frame = vbl.reply.sequence;
 		DRI2BlockClient(client, draw);
 		return TRUE;
 	}
@@ -2130,7 +2112,6 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (sna_wait_vblank(sna, &vbl))
 		goto out_free_info;
 
-	info->frame = vbl.reply.sequence;
 	DRI2BlockClient(client, draw);
 	return TRUE;
 
commit c6e316eeba3008b351f2cd63829154f4672c5417
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 18:18:32 2012 +0100

    sna: Adjust hints to prefer rendering convex polygon with the GPU
    
    Keep the general polygons as only using the GPU if necessary, until the
    cost of the routines is analysed.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5bec59a..0b0a304 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9274,7 +9274,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 		goto fallback;
 
 	if ((data.bo = sna_drawable_use_bo(draw,
-					   use_wide_spans(draw, gc, &data.region.extents),
+					   (shape == Convex ? use_zero_spans : use_wide_spans)(draw, gc, &data.region.extents),
 					   &data.region.extents,
 					   &data.damage))) {
 		uint32_t color;
commit f1e7248cb353d634f27d297059911168ce1a0762
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 16:08:31 2012 +0100

    sna: Expand the heuristic for predicting when to use CPU bo for readback
    
    For tiny transfers, the cost of setting up the GPU operation outweighs
    the actual savings through increased throughput. So we try to guess when
    it will be preferrable to simply read from the GPU bo directly.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5a9eb1e..5bec59a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1093,12 +1093,31 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 }
 
 static inline bool use_cpu_bo_for_download(struct sna *sna,
-					   struct sna_pixmap *priv)
+					   struct sna_pixmap *priv,
+					   const BoxRec *box)
 {
 	if (DBG_NO_CPU_DOWNLOAD)
 		return false;
 
-	return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
+	if (priv->cpu_bo == NULL || !sna->kgem.can_blt_cpu)
+		return false;
+
+	if (kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo)) {
+		DBG(("%s: yes, either bo is busy, so use GPU for readback\n",
+		     __FUNCTION__));
+		return true;
+	}
+
+	/* Is it worth detiling? */
+	if (kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo) &&
+	    (box->y2 - box->y1 - 1) * priv->gpu_bo->pitch < 4096) {
+		DBG(("%s: no, tiny transfer, expect to read inplace\n",
+		     __FUNCTION__));
+		return false;
+	}
+
+	DBG(("%s: yes, default action\n", __FUNCTION__));
+	return true;
 }
 
 static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv,
@@ -1329,7 +1348,7 @@ skip_inplace_map:
 		if (n) {
 			bool ok = false;
 
-			if (use_cpu_bo_for_download(sna, priv)) {
+			if (use_cpu_bo_for_download(sna, priv, &priv->gpu_damage->extents)) {
 				DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1794,7 +1813,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = false;
-			if (use_cpu_bo_for_download(sna, priv)) {
+			if (use_cpu_bo_for_download(sna, priv, &priv->gpu_damage->extents)) {
 				DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1904,7 +1923,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (n) {
 					bool ok = false;
 
-					if (use_cpu_bo_for_download(sna, priv)) {
+					if (use_cpu_bo_for_download(sna, priv, &priv->gpu_damage->extents)) {
 						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
@@ -1931,7 +1950,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				DBG(("%s: region wholly inside damage\n",
 				     __FUNCTION__));
 
-				if (use_cpu_bo_for_download(sna, priv)) {
+				if (use_cpu_bo_for_download(sna, priv, &r->extents)) {
 					DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
@@ -1958,7 +1977,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					DBG(("%s: region intersects damage\n",
 					     __FUNCTION__));
 
-					if (use_cpu_bo_for_download(sna, priv)) {
+					if (use_cpu_bo_for_download(sna, priv, &need.extents)) {
 						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
commit 06db69c2c7023f702f9773be90144fdf7a1159e4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 15:28:10 2012 +0100

    sna: Update assertion for cached io buffers
    
    As kgem_buffers may be reused and repurposed through the snoop cache it
    is no longer true that only proxies will have the io flag set.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 7b388fb..d7c3812 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -417,7 +417,7 @@ void kgem_get_tile_size(struct kgem *kgem, int tiling,
 
 static inline int __kgem_buffer_size(struct kgem_bo *bo)
 {
-	assert(bo->proxy && bo->io);
+	assert(bo->proxy != NULL);
 	return bo->size.bytes;
 }
 
@@ -429,7 +429,7 @@ static inline int __kgem_bo_size(struct kgem_bo *bo)
 
 static inline int kgem_bo_size(struct kgem_bo *bo)
 {
-	if (bo->io)
+	if (bo->proxy)
 		return __kgem_buffer_size(bo);
 	else
 		return __kgem_bo_size(bo);
commit d715e1e01437049e167462281d51b5e214594361
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 15:04:31 2012 +0100

    sna: Also discard the last-was-cpu flag when overwriting cpu damage
    
    We interpret a FillRect that erradicates the existing damage as a
    clear-event and an opportunity to see if it is worth migrating the
    render commands to the GPU. This is undermined if we leave the
    'prefer-cpu' flag intact.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 16f0c4e..5a9eb1e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10701,10 +10701,11 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 					       pixmap->drawable.width,
 					       pixmap->drawable.height);
 				priv->undamaged = false;
-				priv->cpu = false;
 			}
 			hint |= IGNORE_CPU;
 		}
+		if (priv->cpu_damage == NULL)
+			priv->cpu = false;
 	}
 
 	bo = sna_drawable_use_bo(draw, hint, &region.extents, &damage);
commit e95825d17ce65ad8173a5e6518a98969e236a4f8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 14:38:17 2012 +0100

    sna: Enable snooping on the reused linear buffer
    
    This explains why suddenly I was seeing clflush again on a couple of
    machines...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ab78e51..1aa8af9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3284,8 +3284,6 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			kgem_bo_destroy(kgem, bo);
 			return NULL;
 		}
-
-		bo->reusable = false;
 		bo->snoop = true;
 
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
@@ -3803,7 +3801,6 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	}
 
 	bo->snoop = true;
-
 	debug_alloc__bo(kgem, bo);
 
 	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
@@ -4024,12 +4021,6 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 				return NULL;
 			}
 
-			if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
-				gem_close(kgem->fd, handle);
-				free(bo);
-				return NULL;
-			}
-
 			debug_alloc(kgem, alloc);
 			__kgem_bo_init(&bo->base, handle, alloc);
 			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
@@ -4040,15 +4031,20 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->mmapped == true);
 		assert(bo->need_io == false);
 
+		if (!gem_set_cacheing(kgem->fd, bo->base.handle, SNOOPED))
+			goto free_cacheing;
+
 		bo->base.snoop = true;
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem == NULL) {
-			bo->base.refcnt = 0; /* for valgrind */
-			kgem_bo_free(kgem, &bo->base);
-			bo = NULL;
-		}
+		if (bo->mem == NULL)
+			goto free_cacheing;
+
 		return bo;
+
+free_cacheing:
+		bo->base.refcnt = 0; /* for valgrind */
+		kgem_bo_free(kgem, &bo->base);
 	}
 
 	if (kgem->has_userptr) {
commit c5e6b5874f334b9124a17f017c6eb175cf88f115
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 14:26:52 2012 +0100

    sna: Fix the reversed not SHM assertion
    
    Should be double checking that we are not about to free a CPU bo pinned
    to a SHM pixmap.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 74db52e..16f0c4e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1208,7 +1208,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
-				assert(priv->shm);
+				assert(!priv->shm);
 				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
commit f36b656ab2bc16ec8849cadb0afb574bb742c3a3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 13:18:40 2012 +0100

    sna: Avoid marking io/snoop buffers as unreusable unnecessarily
    
    As they are kept in special caches, we can reserve the unreusable flags
    for exceptional buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index dbe6423..ab78e51 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1431,14 +1431,14 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 			     __FUNCTION__, bo->handle));
 			/* transfer the handle to a minimum bo */
 			memcpy(base, bo, sizeof(*base));
-			base->reusable = true;
 			base->io = false;
 			list_init(&base->list);
 			list_replace(&bo->request, &base->request);
 			list_replace(&bo->vma, &base->vma);
 			free(bo);
 			bo = base;
-		}
+		} else
+			bo->reusable = false;
 	}
 
 	if (!bo->reusable) {
@@ -1749,7 +1749,8 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 
-		if (!bo->refcnt && !bo->reusable && !bo->snoop) {
+		if (!bo->refcnt && !bo->reusable) {
+			assert(!bo->snoop);
 			kgem_bo_free(kgem, bo);
 			continue;
 		}
@@ -3801,7 +3802,6 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 		return NULL;
 	}
 
-	bo->reusable = false;
 	bo->snoop = true;
 
 	debug_alloc__bo(kgem, bo);
@@ -4040,7 +4040,6 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->mmapped == true);
 		assert(bo->need_io == false);
 
-		bo->base.reusable = false;
 		bo->base.snoop = true;
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
@@ -4080,7 +4079,6 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 
 		bo->base.refcnt = 1;
 		bo->base.snoop = true;
-		bo->base.reusable = false;
 		bo->base.map = MAKE_USER_MAP(bo->mem);
 
 		return bo;
@@ -4408,7 +4406,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	}
 init:
 	bo->base.io = true;
-	bo->base.reusable = false;
 	assert(bo->base.refcnt == 1);
 	assert(num_pages(&bo->base) == alloc);
 	assert(!bo->need_io || !bo->base.needs_flush);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 32f7007..74db52e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -461,6 +461,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 #endif
 		if (priv->cpu_bo->flush) {
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+			priv->cpu_bo->reusable = false;
 			sna_accel_watch_flush(sna, -1);
 		}
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
commit 37dfdb0e9e86effc3ca8b590c98aa2382e8f0cea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 13:01:09 2012 +0100

    sna: Correct assertion for __kgem_bo_size()
    
    Only proxies are measured in bytes not pages.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3853379..dbe6423 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -127,7 +127,7 @@ static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
 
 static inline int bytes(struct kgem_bo *bo)
 {
-	return kgem_bo_size(bo);
+	return __kgem_bo_size(bo);
 }
 
 #define bucket(B) (B)->size.pages.bucket
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f1ded24..7b388fb 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -423,7 +423,7 @@ static inline int __kgem_buffer_size(struct kgem_bo *bo)
 
 static inline int __kgem_bo_size(struct kgem_bo *bo)
 {
-	assert(!(bo->proxy && bo->io));
+	assert(bo->proxy == NULL);
 	return PAGE_SIZE * bo->size.pages.count;
 }
 
commit 83ad661bc73e9d0094b669c5203e25afc3937bb7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 12:51:41 2012 +0100

    sna: Change the vmap interface name to userptr
    
    This is in common with the other drivers and avoids the conflict with
    'vmalloc/vmap' used by the kernel for allocation of contiguous virtual
    mappings.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index b3a786d..45157a8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -247,14 +247,14 @@ if test "x$accel" = xno; then
 	AC_MSG_ERROR([No default acceleration option])
 fi
 
-AC_ARG_ENABLE(vmap,
-	      AS_HELP_STRING([--enable-vmap],
-			     [Enable use of vmap (experimental) [default=no]]),
-	      [VMAP="$enableval"],
-	      [VMAP=no])
-AM_CONDITIONAL(USE_VMAP, test x$VMAP = xyes)
-if test "x$VMAP" = xyes; then
-	AC_DEFINE(USE_VMAP,1,[Assume VMAP support])
+AC_ARG_ENABLE(userptr,
+	      AS_HELP_STRING([--enable-userptr],
+			     [Enable use of userptr (experimental) [default=no]]),
+	      [USERPTR="$enableval"],
+	      [USERPTR=no])
+AM_CONDITIONAL(USE_USERPTR, test x$USERPTR = xyes)
+if test "x$USERPTR" = xyes; then
+	AC_DEFINE(USE_USERPTR,1,[Assume USERPTR support])
 fi
 
 AC_ARG_ENABLE(async-swap,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 578c8e4..3853379 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -60,7 +60,7 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG_NO_TILING 0
 #define DBG_NO_CACHE 0
 #define DBG_NO_CACHE_LEVEL 0
-#define DBG_NO_VMAP 0
+#define DBG_NO_USERPTR 0
 #define DBG_NO_LLC 0
 #define DBG_NO_SEMAPHORES 0
 #define DBG_NO_MADV 0
@@ -88,17 +88,17 @@ search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
-#define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
-#define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
+#define MAKE_USER_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
+#define IS_USER_MAP(ptr) ((uintptr_t)(ptr) & 2)
 #define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
 
-#define LOCAL_I915_GEM_VMAP       0x32
-#define LOCAL_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_VMAP, struct local_i915_gem_vmap)
-struct local_i915_gem_vmap {
+#define LOCAL_I915_GEM_USERPTR       0x32
+#define LOCAL_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_USERPTR, struct local_i915_gem_userptr)
+struct local_i915_gem_userptr {
 	uint64_t user_ptr;
 	uint32_t user_size;
 	uint32_t flags;
-#define I915_VMAP_READ_ONLY 0x1
+#define I915_USERPTR_READ_ONLY 0x1
 	uint32_t handle;
 };
 
@@ -195,24 +195,24 @@ static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
 	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
 }
 
-static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
+static uint32_t gem_userptr(int fd, void *ptr, int size, int read_only)
 {
-	struct local_i915_gem_vmap vmap;
+	struct local_i915_gem_userptr arg;
 
-	VG_CLEAR(vmap);
-	vmap.user_ptr = (uintptr_t)ptr;
-	vmap.user_size = size;
-	vmap.flags = 0;
+	VG_CLEAR(arg);
+	arg.user_ptr = (uintptr_t)ptr;
+	arg.user_size = size;
+	arg.flags = 0;
 	if (read_only)
-		vmap.flags |= I915_VMAP_READ_ONLY;
+		arg.flags |= I915_USERPTR_READ_ONLY;
 
-	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
+	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_USERPTR, &arg)) {
 		DBG(("%s: failed to map %p + %d bytes: %d\n",
 		     __FUNCTION__, ptr, size, errno));
 		return 0;
 	}
 
-	return vmap.handle;
+	return arg.handle;
 }
 
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
@@ -716,13 +716,13 @@ static bool test_has_cacheing(struct kgem *kgem)
 	return ret;
 }
 
-static bool test_has_vmap(struct kgem *kgem)
+static bool test_has_userptr(struct kgem *kgem)
 {
-#if defined(USE_VMAP)
+#if defined(USE_USERPTR)
 	uint32_t handle;
 	void *ptr;
 
-	if (DBG_NO_VMAP)
+	if (DBG_NO_USERPTR)
 		return false;
 
 	/* Incoherent blt and sampler hangs the GPU */
@@ -730,7 +730,7 @@ static bool test_has_vmap(struct kgem *kgem)
 		return false;
 
 	ptr = malloc(PAGE_SIZE);
-	handle = gem_vmap(kgem->fd, ptr, PAGE_SIZE, false);
+	handle = gem_userptr(kgem->fd, ptr, PAGE_SIZE, false);
 	gem_close(kgem->fd, handle);
 	free(ptr);
 
@@ -781,9 +781,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
 	     kgem->has_cacheing));
 
-	kgem->has_vmap = test_has_vmap(kgem);
-	DBG(("%s: has vmap? %d\n", __FUNCTION__,
-	     kgem->has_vmap));
+	kgem->has_userptr = test_has_userptr(kgem);
+	DBG(("%s: has userptr? %d\n", __FUNCTION__,
+	     kgem->has_userptr));
 
 	kgem->has_semaphores = false;
 	if (kgem->has_blt && test_has_semaphores_enabled())
@@ -846,9 +846,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
-	     kgem->has_llc | kgem->has_vmap | kgem->has_cacheing,
-	     kgem->has_llc, kgem->has_cacheing, kgem->has_vmap));
+	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
+	     kgem->has_llc | kgem->has_userptr | kgem->has_cacheing,
+	     kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
 
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
@@ -919,7 +919,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->large_object_size = MAX_CACHE_SIZE;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
-	if (kgem->has_llc | kgem->has_cacheing | kgem->has_vmap) {
+	if (kgem->has_llc | kgem->has_cacheing | kgem->has_userptr) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
 	} else
@@ -1174,7 +1174,7 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	int type = IS_CPU_MAP(bo->map);
 
-	assert(!IS_VMAP_MAP(bo->map));
+	assert(!IS_USER_MAP(bo->map));
 
 	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
 	     __FUNCTION__, type ? "CPU" : "GTT",
@@ -1204,7 +1204,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 
 	kgem_bo_binding_free(kgem, bo);
 
-	if (IS_VMAP_MAP(bo->map)) {
+	if (IS_USER_MAP(bo->map)) {
 		assert(bo->rq == NULL);
 		assert(MAP(bo->map) != bo || bo->io);
 		if (bo != MAP(bo->map)) {
@@ -3296,7 +3296,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		return bo;
 	}
 
-	if (kgem->has_vmap) {
+	if (kgem->has_userptr) {
 		void *ptr;
 
 		/* XXX */
@@ -3310,7 +3310,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return NULL;
 		}
 
-		bo->map = MAKE_VMAP_MAP(ptr);
+		bo->map = MAKE_USER_MAP(ptr);
 		bo->pitch = stride;
 		return bo;
 	}
@@ -3788,10 +3788,10 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	struct kgem_bo *bo;
 	uint32_t handle;
 
-	if (!kgem->has_vmap)
+	if (!kgem->has_userptr)
 		return NULL;
 
-	handle = gem_vmap(kgem->fd, ptr, size, read_only);
+	handle = gem_userptr(kgem->fd, ptr, size, read_only);
 	if (handle == 0)
 		return NULL;
 
@@ -4052,7 +4052,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		return bo;
 	}
 
-	if (kgem->has_vmap) {
+	if (kgem->has_userptr) {
 		bo = buffer_alloc();
 		if (bo == NULL)
 			return NULL;
@@ -4063,7 +4063,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 			return NULL;
 		}
 
-		handle = gem_vmap(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
+		handle = gem_userptr(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
 		if (handle == 0) {
 			free(bo->mem);
 			free(bo);
@@ -4081,7 +4081,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		bo->base.refcnt = 1;
 		bo->base.snoop = true;
 		bo->base.reusable = false;
-		bo->base.map = MAKE_VMAP_MAP(bo->mem);
+		bo->base.map = MAKE_USER_MAP(bo->mem);
 
 		return bo;
 	}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 351aa32..f1ded24 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -149,7 +149,7 @@ struct kgem {
 	uint32_t need_throttle:1;
 	uint32_t busy:1;
 
-	uint32_t has_vmap :1;
+	uint32_t has_userptr :1;
 	uint32_t has_blt :1;
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_relaxed_delta :1;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f792430..32f7007 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,7 +60,6 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
-#define USE_SHM_VMAP 1
 
 #define MIGRATE_ALL 0
 #define DBG_NO_CPU_UPLOAD 0
@@ -13053,7 +13052,7 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	assert(screen->SetWindowPixmap == NULL);
 	screen->SetWindowPixmap = sna_set_window_pixmap;
 
-	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+	if (sna->kgem.has_userptr)
 		ShmRegisterFuncs(screen, &shm_funcs);
 	else
 		ShmRegisterFbFuncs(screen);
commit 8dcccd308222bcf1b96f2ee15842b4558ea5f29e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 12:47:21 2012 +0100

    sna: s/vmap/snoop/ since we use the flag more generically
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index de6c8c4..bc37615 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -727,7 +727,7 @@ gen4_bind_bo(struct sna *sna,
 	uint32_t domains;
 	uint16_t offset;
 
-	assert(!kgem_bo_is_vmap(bo));
+	assert(!kgem_bo_is_snoop(bo));
 
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5af0a9e..578c8e4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -54,7 +54,7 @@ static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 static struct kgem_bo *
-search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
+search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
@@ -830,7 +830,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->large);
-	list_init(&kgem->vmap);
+	list_init(&kgem->snoop);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
 	for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
@@ -1195,7 +1195,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	assert(bo->refcnt == 0);
 	assert(bo->exec == NULL);
-	assert(!bo->vmap || bo->rq == NULL);
+	assert(!bo->snoop || bo->rq == NULL);
 
 #ifdef DEBUG_MEMORY
 	kgem->debug_memory.bo_allocs--;
@@ -1208,7 +1208,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 		assert(bo->rq == NULL);
 		assert(MAP(bo->map) != bo || bo->io);
 		if (bo != MAP(bo->map)) {
-			DBG(("%s: freeing vmap base\n", __FUNCTION__));
+			DBG(("%s: freeing snooped base\n", __FUNCTION__));
 			free(MAP(bo->map));
 		}
 		bo->map = NULL;
@@ -1320,7 +1320,7 @@ static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 		io->used = bo->delta;
 }
 
-static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
+static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
 		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
@@ -1332,18 +1332,18 @@ static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->tiling == I915_TILING_NONE);
 	assert(bo->rq == NULL);
 
-	DBG(("%s: moving %d to vmap\n", __FUNCTION__, bo->handle));
-	list_add(&bo->list, &kgem->vmap);
+	DBG(("%s: moving %d to snoop cachee\n", __FUNCTION__, bo->handle));
+	list_add(&bo->list, &kgem->snoop);
 }
 
 static struct kgem_bo *
-search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
+search_snoop_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 {
 	struct kgem_bo *bo, *first = NULL;
 
 	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
 
-	if (list_is_empty(&kgem->vmap)) {
+	if (list_is_empty(&kgem->snoop)) {
 		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
 		if (!__kgem_throttle_retire(kgem, flags)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
@@ -1351,9 +1351,9 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		}
 	}
 
-	list_for_each_entry(bo, &kgem->vmap, list) {
+	list_for_each_entry(bo, &kgem->snoop, list) {
 		assert(bo->refcnt == 0);
-		assert(bo->vmap);
+		assert(bo->snoop);
 		assert(bo->proxy == NULL);
 		assert(bo->tiling == I915_TILING_NONE);
 		assert(bo->rq == NULL);
@@ -1371,7 +1371,7 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		bo->pitch = 0;
 		bo->delta = 0;
 
-		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
 		     __FUNCTION__, bo->handle, num_pages(bo)));
 		return bo;
 	}
@@ -1381,7 +1381,7 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		first->pitch = 0;
 		first->delta = 0;
 
-		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		DBG(("  %s: found handle=%d (num_pages=%d) in snoop cache\n",
 		     __FUNCTION__, first->handle, num_pages(first)));
 		return first;
 	}
@@ -1404,27 +1404,27 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (DBG_NO_CACHE)
 		goto destroy;
 
-	if (bo->vmap && !bo->flush) {
-		DBG(("%s: handle=%d is vmapped\n", __FUNCTION__, bo->handle));
+	if (bo->snoop && !bo->flush) {
+		DBG(("%s: handle=%d is snooped\n", __FUNCTION__, bo->handle));
 		assert(!bo->flush);
 		assert(list_is_empty(&bo->list));
 		if (bo->rq == NULL) {
 			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
-				DBG(("%s: handle=%d is vmapped, tracking until free\n",
+				DBG(("%s: handle=%d is snooped, tracking until free\n",
 				     __FUNCTION__, bo->handle));
 				list_add(&bo->request, &kgem->flushing);
 				bo->rq = &_kgem_static_request;
 			}
 		}
 		if (bo->rq == NULL)
-			kgem_bo_move_to_vmap(kgem, bo);
+			kgem_bo_move_to_snoop(kgem, bo);
 		return;
 	}
 
 	if (bo->io) {
 		struct kgem_bo *base;
 
-		assert(!bo->vmap);
+		assert(!bo->snoop);
 		base = malloc(sizeof(*base));
 		if (base) {
 			DBG(("%s: transferring io handle=%d to bo\n",
@@ -1452,7 +1452,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
-	assert(bo->vmap == false);
+	assert(bo->snoop == false);
 	assert(bo->io == false);
 	assert(bo->scanout == false);
 	assert(bo->flush == false);
@@ -1576,8 +1576,8 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 		list_del(&bo->request);
 
 		if (!bo->refcnt) {
-			if (bo->vmap) {
-				kgem_bo_move_to_vmap(kgem, bo);
+			if (bo->snoop) {
+				kgem_bo_move_to_snoop(kgem, bo);
 			} else if (kgem_bo_set_purgeable(kgem, bo)) {
 				assert(bo->reusable);
 				kgem_bo_move_to_inactive(kgem, bo);
@@ -1641,12 +1641,12 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			if (bo->refcnt)
 				continue;
 
-			if (bo->vmap) {
+			if (bo->snoop) {
 				if (bo->needs_flush) {
 					list_add(&bo->request, &kgem->flushing);
 					bo->rq = &_kgem_static_request;
 				} else {
-					kgem_bo_move_to_vmap(kgem, bo);
+					kgem_bo_move_to_snoop(kgem, bo);
 				}
 				continue;
 			}
@@ -1738,9 +1738,10 @@ static void kgem_commit(struct kgem *kgem)
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
 		assert(next->request.prev == &bo->request);
 
-		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d -> offset=%x\n",
+		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d, snoop? %d -> offset=%x\n",
 		     __FUNCTION__, bo->handle, bo->proxy != NULL,
-		     bo->dirty, bo->needs_flush, (unsigned)bo->exec->offset));
+		     bo->dirty, bo->needs_flush, bo->snoop,
+		     (unsigned)bo->exec->offset));
 
 		assert(!bo->purged);
 		assert(bo->rq == rq || (bo->proxy->rq == rq));
@@ -1748,7 +1749,7 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 
-		if (!bo->refcnt && !bo->reusable && !bo->vmap) {
+		if (!bo->refcnt && !bo->reusable && !bo->snoop) {
 			kgem_bo_free(kgem, bo);
 			continue;
 		}
@@ -1834,7 +1835,7 @@ static void kgem_finish_buffers(struct kgem *kgem)
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
-				assert(!bo->base.vmap);
+				assert(!bo->base.snoop);
 				list_move(&bo->base.list,
 					  &kgem->active_buffers);
 				continue;
@@ -2285,7 +2286,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 
 
 	expire = 0;
-	list_for_each_entry(bo, &kgem->vmap, list) {
+	list_for_each_entry(bo, &kgem->snoop, list) {
 		if (bo->delta) {
 			expire = now - MAX_INACTIVE_TIME/2;
 			break;
@@ -2294,8 +2295,8 @@ bool kgem_expire_cache(struct kgem *kgem)
 		bo->delta = now;
 	}
 	if (expire) {
-		while (!list_is_empty(&kgem->vmap)) {
-			bo = list_last_entry(&kgem->vmap, struct kgem_bo, list);
+		while (!list_is_empty(&kgem->snoop)) {
+			bo = list_last_entry(&kgem->snoop, struct kgem_bo, list);
 
 			if (bo->delta > expire)
 				break;
@@ -2411,9 +2412,9 @@ void kgem_cleanup_cache(struct kgem *kgem)
 						     struct kgem_bo, list));
 	}
 
-	while (!list_is_empty(&kgem->vmap))
+	while (!list_is_empty(&kgem->snoop))
 		kgem_bo_free(kgem,
-			     list_last_entry(&kgem->vmap,
+			     list_last_entry(&kgem->snoop,
 					     struct kgem_bo, list));
 
 	while (__kgem_freed_bo) {
@@ -3262,10 +3263,10 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
 	     __FUNCTION__, width, height, bpp, stride));
 
-	bo = search_vmap_cache(kgem, NUM_PAGES(size), 0);
+	bo = search_snoop_cache(kgem, NUM_PAGES(size), 0);
 	if (bo) {
 		assert(bo->tiling == I915_TILING_NONE);
-		assert(bo->vmap);
+		assert(bo->snoop);
 		bo->refcnt = 1;
 		bo->pitch = stride;
 		return bo;
@@ -3284,7 +3285,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		}
 
 		bo->reusable = false;
-		bo->vmap = true;
+		bo->snoop = true;
 
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
@@ -3801,7 +3802,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	}
 
 	bo->reusable = false;
-	bo->vmap = true;
+	bo->snoop = true;
 
 	debug_alloc__bo(kgem, bo);
 
@@ -3964,7 +3965,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	struct kgem_buffer *bo;
 	struct kgem_bo *old;
 
-	old = search_vmap_cache(kgem, alloc, 0);
+	old = search_snoop_cache(kgem, alloc, 0);
 	if (old) {
 		if (!old->io) {
 			bo = buffer_alloc();
@@ -3980,7 +3981,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
 		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
 
-		assert(bo->base.vmap);
+		assert(bo->base.snoop);
 		assert(bo->base.tiling == I915_TILING_NONE);
 		assert(num_pages(&bo->base) >= alloc);
 		assert(bo->mmapped == true);
@@ -4040,7 +4041,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->need_io == false);
 
 		bo->base.reusable = false;
-		bo->base.vmap = true;
+		bo->base.snoop = true;
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem == NULL) {
@@ -4071,14 +4072,14 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 
 		debug_alloc(kgem, alloc);
 		__kgem_bo_init(&bo->base, handle, alloc);
-		DBG(("%s: created vmap handle=%d for buffer\n",
+		DBG(("%s: created snoop handle=%d for buffer\n",
 		     __FUNCTION__, bo->base.handle));
 
 		assert(bo->mmapped == true);
 		assert(bo->need_io == false);
 
 		bo->base.refcnt = 1;
-		bo->base.vmap = true;
+		bo->base.snoop = true;
 		bo->base.reusable = false;
 		bo->base.map = MAKE_VMAP_MAP(bo->mem);
 
@@ -4133,7 +4134,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (flags & KGEM_BUFFER_WRITE) {
 			if ((bo->write & KGEM_BUFFER_WRITE) == 0 ||
 			    (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
-			     !bo->base.vmap)) {
+			     !bo->base.snoop)) {
 				DBG(("%s: skip write %x buffer, need %x\n",
 				     __FUNCTION__, bo->write, flags));
 				continue;
@@ -4161,7 +4162,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			assert(bo->base.io);
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
-			assert(!bo->base.vmap);
+			assert(!bo->base.snoop);
 
 			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
@@ -4546,8 +4547,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 
 	bo = (struct kgem_buffer *)_bo;
 
-	DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
-	     offset, length, bo->base.vmap));
+	DBG(("%s(offset=%d, length=%d, snooped=%d)\n", __FUNCTION__,
+	     offset, length, bo->base.snoop));
 
 	if (bo->mmapped) {
 		struct drm_i915_gem_set_domain set_domain;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index ff592e0..351aa32 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -82,7 +82,7 @@ struct kgem_bo {
 	uint32_t dirty : 1;
 	uint32_t domain : 2;
 	uint32_t needs_flush : 1;
-	uint32_t vmap : 1;
+	uint32_t snoop : 1;
 	uint32_t io : 1;
 	uint32_t flush : 1;
 	uint32_t scanout : 1;
@@ -124,7 +124,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list vmap;
+	struct list snoop;
 	struct list batch_buffers, active_buffers;
 	struct list requests;
 	struct kgem_request *next_request;
@@ -503,11 +503,11 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
 }
 
-static inline bool kgem_bo_is_vmap(struct kgem_bo *bo)
+static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
 {
 	while (bo->proxy)
 		bo = bo->proxy;
-	return bo->vmap;
+	return bo->snoop;
 }
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dee8c02..f792430 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -422,8 +422,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 						  pixmap->drawable.bitsPerPixel,
 						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
 		if (priv->cpu_bo) {
-			DBG(("%s: allocated CPU handle=%d (vmap? %d)\n", __FUNCTION__,
-			     priv->cpu_bo->handle, priv->cpu_bo->vmap));
+			DBG(("%s: allocated CPU handle=%d (snooped? %d)\n", __FUNCTION__,
+			     priv->cpu_bo->handle, priv->cpu_bo->snoop));
 
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
 			priv->stride = priv->cpu_bo->pitch;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index a8b5a06..17ac814 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -310,7 +310,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 		return NULL;
 	}
 
-	if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
+	if (priv->cpu_bo->snoop && priv->source_count > SOURCE_BIAS) {
 		DBG(("%s: promoting snooped CPU bo due to reuse\n",
 		     __FUNCTION__));
 		return NULL;
@@ -558,7 +558,7 @@ sna_render_pixmap_bo(struct sna *sna,
 
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
-		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
+		    !priv->cpu_bo->snoop && priv->cpu_bo->pitch < 4096) {
 			DBG(("%s: CPU all damaged\n", __FUNCTION__));
 			channel->bo = priv->cpu_bo;
 			goto done;
commit 6acc9e6a6e1de2a11597c810e02f793774cef2dd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 21 12:07:46 2012 +0100

    sna: Fix role reversal of __kgem_bo_size() and kgem_bo_size()!
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9668976..ff592e0 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -421,13 +421,13 @@ static inline int __kgem_buffer_size(struct kgem_bo *bo)
 	return bo->size.bytes;
 }
 
-static inline int kgem_bo_size(struct kgem_bo *bo)
+static inline int __kgem_bo_size(struct kgem_bo *bo)
 {
 	assert(!(bo->proxy && bo->io));
 	return PAGE_SIZE * bo->size.pages.count;
 }
 
-static inline int __kgem_bo_size(struct kgem_bo *bo)
+static inline int kgem_bo_size(struct kgem_bo *bo)
 {
 	if (bo->io)
 		return __kgem_buffer_size(bo);
commit 286b0e1a48cab85191dfbb112c8dd14aeaa70956
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 16:04:37 2012 +0100

    sna: Refresh experimental userptr vmap support
    
    Bring the code uptodate with both kernel interface changes and internal
    adjustments following the creation of CPU buffers with set-cacheing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 577fa6c..5af0a9e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -90,8 +90,8 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
 #define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
 #define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
+#define __MAP_TYPE(ptr) ((uintptr_t)(ptr) & 3)
 
-#if defined(USE_VMAP)
 #define LOCAL_I915_GEM_VMAP       0x32
 #define LOCAL_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_VMAP, struct local_i915_gem_vmap)
 struct local_i915_gem_vmap {
@@ -101,7 +101,6 @@ struct local_i915_gem_vmap {
 #define I915_VMAP_READ_ONLY 0x1
 	uint32_t handle;
 };
-#endif
 
 #define UNCACHED	0
 #define SNOOPED		1
@@ -196,6 +195,26 @@ static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
 	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
 }
 
+static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
+{
+	struct local_i915_gem_vmap vmap;
+
+	VG_CLEAR(vmap);
+	vmap.user_ptr = (uintptr_t)ptr;
+	vmap.user_size = size;
+	vmap.flags = 0;
+	if (read_only)
+		vmap.flags |= I915_VMAP_READ_ONLY;
+
+	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
+		DBG(("%s: failed to map %p + %d bytes: %d\n",
+		     __FUNCTION__, ptr, size, errno));
+		return 0;
+	}
+
+	return vmap.handle;
+}
+
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
 {
 	if (flags & CREATE_NO_RETIRE) {
@@ -227,6 +246,7 @@ static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
 	     bo->handle, bytes(bo)));
+	assert(bo->proxy == NULL);
 
 retry_gtt:
 	VG_CLEAR(mmap_arg);
@@ -700,6 +720,7 @@ static bool test_has_vmap(struct kgem *kgem)
 {
 #if defined(USE_VMAP)
 	uint32_t handle;
+	void *ptr;
 
 	if (DBG_NO_VMAP)
 		return false;
@@ -808,7 +829,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->active_buffers);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
-	list_init(&kgem->sync_list);
 	list_init(&kgem->large);
 	list_init(&kgem->vmap);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
@@ -1187,8 +1207,10 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	if (IS_VMAP_MAP(bo->map)) {
 		assert(bo->rq == NULL);
 		assert(MAP(bo->map) != bo || bo->io);
-		if (bo != MAP(bo->map))
+		if (bo != MAP(bo->map)) {
+			DBG(("%s: freeing vmap base\n", __FUNCTION__));
 			free(MAP(bo->map));
+		}
 		bo->map = NULL;
 	}
 	if (bo->map)
@@ -1209,8 +1231,7 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 					    struct kgem_bo *bo)
 {
-	DBG(("%s: moving %d from flush to inactive\n",
-	     __FUNCTION__, bo->handle));
+	DBG(("%s: moving handle=%d to inactive\n", __FUNCTION__, bo->handle));
 
 	assert(bo->reusable);
 	assert(bo->rq == NULL);
@@ -1246,6 +1267,8 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
 						struct kgem_bo *bo)
 {
+	DBG(("%s: removing handle=%d from inactive\n", __FUNCTION__, bo->handle));
+
 	list_del(&bo->list);
 	assert(bo->rq == NULL);
 	if (bo->map) {
@@ -1258,6 +1281,8 @@ inline static void kgem_bo_remove_from_inactive(struct kgem *kgem,
 inline static void kgem_bo_remove_from_active(struct kgem *kgem,
 					      struct kgem_bo *bo)
 {
+	DBG(("%s: removing handle=%d from active\n", __FUNCTION__, bo->handle));
+
 	list_del(&bo->list);
 	if (bo->rq == &_kgem_static_request)
 		list_del(&bo->request);
@@ -1298,13 +1323,14 @@ static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
+		DBG(("%s handle=%d discarding large CPU buffer (%d >%d pages)\n",
+		     __FUNCTION__, bo->handle, num_pages(bo), kgem->max_cpu_size >> 13));
 		kgem_bo_free(kgem, bo);
 		return;
 	}
 
 	assert(bo->tiling == I915_TILING_NONE);
 	assert(bo->rq == NULL);
-	assert(!bo->io);
 
 	DBG(("%s: moving %d to vmap\n", __FUNCTION__, bo->handle));
 	list_add(&bo->list, &kgem->vmap);
@@ -1328,6 +1354,7 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 	list_for_each_entry(bo, &kgem->vmap, list) {
 		assert(bo->refcnt == 0);
 		assert(bo->vmap);
+		assert(bo->proxy == NULL);
 		assert(bo->tiling == I915_TILING_NONE);
 		assert(bo->rq == NULL);
 
@@ -1369,6 +1396,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	assert(list_is_empty(&bo->list));
 	assert(bo->refcnt == 0);
 	assert(!bo->purged);
+	assert(bo->proxy == NULL);
 
 	bo->binding.offset = 0;
 	kgem_bo_clear_scanout(kgem, bo);
@@ -1376,15 +1404,33 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (DBG_NO_CACHE)
 		goto destroy;
 
+	if (bo->vmap && !bo->flush) {
+		DBG(("%s: handle=%d is vmapped\n", __FUNCTION__, bo->handle));
+		assert(!bo->flush);
+		assert(list_is_empty(&bo->list));
+		if (bo->rq == NULL) {
+			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
+				DBG(("%s: handle=%d is vmapped, tracking until free\n",
+				     __FUNCTION__, bo->handle));
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			}
+		}
+		if (bo->rq == NULL)
+			kgem_bo_move_to_vmap(kgem, bo);
+		return;
+	}
+
 	if (bo->io) {
 		struct kgem_bo *base;
 
+		assert(!bo->vmap);
 		base = malloc(sizeof(*base));
 		if (base) {
 			DBG(("%s: transferring io handle=%d to bo\n",
 			     __FUNCTION__, bo->handle));
 			/* transfer the handle to a minimum bo */
-			memcpy(base, bo, sizeof (*base));
+			memcpy(base, bo, sizeof(*base));
 			base->reusable = true;
 			base->io = false;
 			list_init(&base->list);
@@ -1395,21 +1441,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		}
 	}
 
-	if (bo->vmap) {
-		assert(!bo->flush);
-		DBG(("%s: handle=%d is vmapped, tracking until free\n",
-		     __FUNCTION__, bo->handle));
-		if (bo->rq == NULL) {
-			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
-				list_add(&bo->request, &kgem->flushing);
-				bo->rq = &_kgem_static_request;
-			}
-		}
-		if (bo->rq == NULL)
-			kgem_bo_move_to_vmap(kgem, bo);
-		return;
-	}
-
 	if (!bo->reusable) {
 		DBG(("%s: handle=%d, not reusable\n",
 		     __FUNCTION__, bo->handle));
@@ -1808,6 +1839,8 @@ static void kgem_finish_buffers(struct kgem *kgem)
 					  &kgem->active_buffers);
 				continue;
 			}
+			DBG(("%s: discarding mmapped buffer, used=%d, map type=%d\n",
+			     __FUNCTION__, bo->used, (int)__MAP_TYPE(bo->base.map)));
 			goto decouple;
 		}
 
@@ -2127,24 +2160,15 @@ void _kgem_submit(struct kgem *kgem)
 			}
 #if !NDEBUG
 			if (ret < 0) {
-				int i;
-
+				ret = errno;
 				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
 				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
 				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
 
-				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
-				if (i != -1) {
-					ret = write(i, kgem->batch, batch_end*sizeof(uint32_t));
-					close(i);
-					(void)ret;
-				}
-
 				for (i = 0; i < kgem->nexec; i++) {
-					struct kgem_request *rq = kgem->next_request;
 					struct kgem_bo *bo, *found = NULL;
 
-					list_for_each_entry(bo, &rq->buffers, request) {
+					list_for_each_entry(bo, &kgem->next_request->buffers, request) {
 						if (bo->handle == kgem->exec[i].handle) {
 							found = bo;
 							break;
@@ -2169,7 +2193,14 @@ void _kgem_submit(struct kgem *kgem)
 					       kgem->reloc[i].write_domain,
 					       (int)kgem->reloc[i].presumed_offset);
 				}
-				FatalError("SNA: failed to submit batchbuffer\n");
+
+				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
+				if (i != -1) {
+					i = write(i, kgem->batch, batch_end*sizeof(uint32_t));
+					(void)i;
+				}
+
+				FatalError("SNA: failed to submit batchbuffer, errno=%d\n", ret);
 			}
 #endif
 
@@ -2442,6 +2473,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		list_for_each_entry(bo, cache, vma) {
 			assert(IS_CPU_MAP(bo->map) == for_cpu);
 			assert(bucket(bo) == cache_bucket(num_pages));
+			assert(bo->proxy == NULL);
 
 			if (num_pages > num_pages(bo)) {
 				DBG(("inactive too small: %d < %d\n",
@@ -2481,6 +2513,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		assert(bo->refcnt == 0);
 		assert(bo->reusable);
 		assert(!!bo->rq == !!use_active);
+		assert(bo->proxy == NULL);
 
 		if (num_pages > num_pages(bo))
 			continue;
@@ -2547,6 +2580,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
 		     __FUNCTION__, bo->handle, num_pages(bo),
 		     use_active ? "active" : "inactive"));
+		assert(list_is_empty(&bo->list));
 		assert(use_active || bo->domain != DOMAIN_GPU);
 		assert(!bo->needs_flush || use_active);
 		//assert(use_active || !kgem_busy(kgem, bo->handle));
@@ -2563,9 +2597,10 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 
 		first->pitch = 0;
 		first->delta = 0;
-		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
+		DBG(("  %s: found handle=%d (near-miss) (num_pages=%d) in linear %s cache\n",
 		     __FUNCTION__, first->handle, num_pages(first),
 		     use_active ? "active" : "inactive"));
+		assert(list_is_empty(&first->list));
 		assert(use_active || first->domain != DOMAIN_GPU);
 		assert(!first->needs_flush || use_active);
 		//assert(use_active || !kgem_busy(kgem, first->handle));
@@ -3677,10 +3712,12 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap mmap_arg;
 
-	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bytes(bo)));
+	DBG(("%s(handle=%d, size=%d, mapped? %d)\n",
+	     __FUNCTION__, bo->handle, bytes(bo), (int)__MAP_TYPE(bo->map)));
 	assert(!bo->purged);
 	assert(list_is_empty(&bo->list));
 	assert(!bo->scanout);
+	assert(bo->proxy == NULL);
 
 	if (IS_CPU_MAP(bo->map))
 		return MAP(bo->map);
@@ -3743,27 +3780,6 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 	return flink.name;
 }
 
-#if defined(USE_VMAP)
-static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
-{
-	struct local_i915_gem_vmap vmap;
-
-	VG_CLEAR(vmap);
-	vmap.user_ptr = (uintptr_t)ptr;
-	vmap.user_size = size;
-	vmap.flags = 0;
-	if (read_only)
-		vmap.flags |= I915_VMAP_READ_ONLY;
-
-	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
-		DBG(("%s: failed to map %p + %d bytes: %d\n",
-		     __FUNCTION__, ptr, size, errno));
-		return 0;
-	}
-
-	return vmap.handle;
-}
-
 struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				void *ptr, uint32_t size,
 				bool read_only)
@@ -3793,18 +3809,6 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle));
 	return bo;
 }
-#else
-static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
-{
-	return 0;
-}
-struct kgem_bo *kgem_create_map(struct kgem *kgem,
-				void *ptr, uint32_t size,
-				bool read_only)
-{
-	return 0;
-}
-#endif
 
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
@@ -3852,27 +3856,6 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
-void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
-{
-	assert(bo->vmap);
-	assert(!bo->reusable);
-	assert(list_is_empty(&bo->list));
-	list_add(&bo->list, &kgem->sync_list);
-	bo->flush = true;
-}
-
-void kgem_sync(struct kgem *kgem)
-{
-	struct kgem_bo *bo;
-
-	DBG(("%s\n", __FUNCTION__));
-
-	list_for_each_entry(bo, &kgem->sync_list, list) {
-		kgem_bo_submit(kgem, bo);
-		kgem_bo_sync__cpu(kgem, bo);
-	}
-}
-
 void kgem_clear_dirty(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
@@ -3914,16 +3897,33 @@ struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
 	return bo;
 }
 
-static struct kgem_buffer *buffer_alloc(int num_pages)
+static struct kgem_buffer *
+buffer_alloc(void)
+{
+	struct kgem_buffer *bo;
+
+	bo = malloc(sizeof(*bo));
+	if (bo == NULL)
+		return NULL;
+
+	bo->mem = NULL;
+	bo->need_io = false;
+	bo->mmapped = true;
+
+	return bo;
+}
+
+static struct kgem_buffer *
+buffer_alloc_with_data(int num_pages)
 {
 	struct kgem_buffer *bo;
 
 	bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
-	if (bo) {
-		bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT);
-		bo->mmapped = false;
-	}
+	if (bo == NULL)
+		return NULL;
 
+	bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT);
+	bo->mmapped = false;
 	return bo;
 }
 
@@ -3936,6 +3936,28 @@ use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 	return true;
 }
 
+static void
+init_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old)
+{
+	DBG(("%s: reusing handle=%d for buffer\n",
+	     __FUNCTION__, old->handle));
+
+	assert(old->proxy == NULL);
+
+	memcpy(&bo->base, old, sizeof(*old));
+	if (old->rq)
+		list_replace(&old->request, &bo->base.request);
+	else
+		list_init(&bo->base.request);
+	list_replace(&old->vma, &bo->base.vma);
+	list_init(&bo->base.list);
+	free(old);
+
+	assert(bo->base.tiling == I915_TILING_NONE);
+
+	bo->base.refcnt = 1;
+}
+
 static struct kgem_buffer *
 search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
@@ -3944,18 +3966,16 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 
 	old = search_vmap_cache(kgem, alloc, 0);
 	if (old) {
-		bo = malloc(sizeof(*bo));
-		if (bo == NULL)
-			return NULL;
+		if (!old->io) {
+			bo = buffer_alloc();
+			if (bo == NULL)
+				return NULL;
 
-		memcpy(&bo->base, old, sizeof(*old));
-		if (old->rq)
-			list_replace(&old->request, &bo->base.request);
-		else
-			list_init(&bo->base.request);
-		list_replace(&old->vma, &bo->base.vma);
-		list_init(&bo->base.list);
-		free(old);
+			init_buffer_from_bo(bo, old);
+		} else {
+			bo = (struct kgem_buffer *)old;
+			bo->base.refcnt = 1;
+		}
 
 		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
 		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
@@ -3963,51 +3983,32 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 		assert(bo->base.vmap);
 		assert(bo->base.tiling == I915_TILING_NONE);
 		assert(num_pages(&bo->base) >= alloc);
+		assert(bo->mmapped == true);
+		assert(bo->need_io == false);
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem) {
-			bo->mmapped = true;
-			bo->need_io = false;
-			bo->base.io = true;
-			bo->base.refcnt = 1;
-
-			return bo;
-		} else
+		if (bo->mem == NULL) {
+			bo->base.refcnt = 0;
 			kgem_bo_free(kgem, &bo->base);
+			bo = NULL;
+		}
+
+		return bo;
 	}
 
 	return NULL;
 }
 
-static void
-init_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old)
-{
-	DBG(("%s: reusing handle=%d for buffer\n",
-	     __FUNCTION__, old->handle));
-
-	memcpy(&bo->base, old, sizeof(*old));
-	if (old->rq)
-		list_replace(&old->request, &bo->base.request);
-	else
-		list_init(&bo->base.request);
-	list_replace(&old->vma, &bo->base.vma);
-	list_init(&bo->base.list);
-	free(old);
-	bo->base.refcnt = 1;
-
-	assert(bo->base.tiling == I915_TILING_NONE);
-}
-
 static struct kgem_buffer *
 create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
 	struct kgem_buffer *bo;
+	uint32_t handle;
 
 	if (kgem->has_cacheing) {
 		struct kgem_bo *old;
-		uint32_t handle;
 
-		bo = malloc(sizeof(*bo));
+		bo = buffer_alloc();
 		if (bo == NULL)
 			return NULL;
 
@@ -4015,62 +4016,73 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 					 CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT);
 		if (old) {
 			init_buffer_from_bo(bo, old);
-			return bo;
-		}
+		} else {
+			handle = gem_create(kgem->fd, alloc);
+			if (handle == 0) {
+				free(bo);
+				return NULL;
+			}
 
-		handle = gem_create(kgem->fd, alloc);
-		if (handle == 0) {
-			free(bo);
-			return NULL;
-		}
+			if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
+				gem_close(kgem->fd, handle);
+				free(bo);
+				return NULL;
+			}
 
-		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
-			gem_close(kgem->fd, handle);
-			free(bo);
-			return NULL;
+			debug_alloc(kgem, alloc);
+			__kgem_bo_init(&bo->base, handle, alloc);
+			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
+			     __FUNCTION__, bo->base.handle, alloc));
 		}
 
-		debug_alloc(kgem, alloc);
-		__kgem_bo_init(&bo->base, handle, alloc);
-		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
-		     __FUNCTION__, bo->base.handle, alloc));
+		assert(bo->base.refcnt == 1);
+		assert(bo->mmapped == true);
+		assert(bo->need_io == false);
 
 		bo->base.reusable = false;
 		bo->base.vmap = true;
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem) {
-			bo->mmapped = true;
-			bo->need_io = false;
-			bo->base.io = true;
-			return bo;
-		} else {
+		if (bo->mem == NULL) {
 			bo->base.refcnt = 0; /* for valgrind */
 			kgem_bo_free(kgem, &bo->base);
+			bo = NULL;
 		}
+		return bo;
 	}
 
 	if (kgem->has_vmap) {
-		bo = buffer_alloc(alloc);
-		if (bo) {
-			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
-						   alloc * PAGE_SIZE, false);
-			if (handle == 0 ||
-			    !__kgem_bo_init(&bo->base, handle, alloc)) {
-				free(bo);
-			} else {
-				DBG(("%s: created vmap handle=%d for buffer\n",
-				     __FUNCTION__, bo->base.handle));
+		bo = buffer_alloc();
+		if (bo == NULL)
+			return NULL;
 
-				bo->base.io = true;
-				bo->base.vmap = true;
-				bo->base.map = MAKE_VMAP_MAP(bo);
-				bo->mmapped = true;
-				bo->need_io = false;
+		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
+		if (posix_memalign(&bo->mem, PAGE_SIZE, alloc *PAGE_SIZE)) {
+			free(bo);
+			return NULL;
+		}
 
-				return bo;
-			}
+		handle = gem_vmap(kgem->fd, bo->mem, alloc * PAGE_SIZE, false);
+		if (handle == 0) {
+			free(bo->mem);
+			free(bo);
+			return NULL;
 		}
+
+		debug_alloc(kgem, alloc);
+		__kgem_bo_init(&bo->base, handle, alloc);
+		DBG(("%s: created vmap handle=%d for buffer\n",
+		     __FUNCTION__, bo->base.handle));
+
+		assert(bo->mmapped == true);
+		assert(bo->need_io == false);
+
+		bo->base.refcnt = 1;
+		bo->base.vmap = true;
+		bo->base.reusable = false;
+		bo->base.map = MAKE_VMAP_MAP(bo->mem);
+
+		return bo;
 	}
 
 	return NULL;
@@ -4178,7 +4190,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
 	if (kgem->has_llc) {
-		bo = malloc(sizeof(*bo));
+		bo = buffer_alloc();
 		if (bo == NULL)
 			return NULL;
 
@@ -4190,35 +4202,36 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old == NULL)
 			old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP);
 		if (old) {
+			DBG(("%s: found LLC handle=%d for buffer\n",
+			     __FUNCTION__, old->handle));
+
 			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
-			if (handle == 0 ||
-			    !__kgem_bo_init(&bo->base, handle, alloc)) {
+			if (handle == 0) {
 				free(bo);
 				return NULL;
 			}
-			DBG(("%s: created handle=%d for buffer\n",
+			__kgem_bo_init(&bo->base, handle, alloc);
+			DBG(("%s: created LLC handle=%d for buffer\n",
 			     __FUNCTION__, bo->base.handle));
 
 			debug_alloc(kgem, alloc);
 		}
 
+		assert(bo->mmapped);
+		assert(!bo->need_io);
+
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem) {
 			if (flags & KGEM_BUFFER_WRITE)
 				kgem_bo_sync__cpu(kgem, &bo->base);
 
-			bo->need_io = false;
-			bo->base.io = true;
-			bo->mmapped = true;
-
 			alloc = num_pages(&bo->base);
 			goto init;
 		} else {
 			bo->base.refcnt = 0; /* for valgrind */
 			kgem_bo_free(kgem, &bo->base);
-			bo = NULL;
 		}
 	}
 
@@ -4271,25 +4284,23 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
 
-			bo = malloc(sizeof(*bo));
+			bo = buffer_alloc();
 			if (bo == NULL)
 				return NULL;
 
 			init_buffer_from_bo(bo, old);
 			assert(num_pages(&bo->base) >= NUM_PAGES(size));
 
+			assert(bo->mmapped);
+			assert(bo->base.refcnt == 1);
+
 			bo->mem = kgem_bo_map(kgem, &bo->base);
 			if (bo->mem) {
-				bo->need_io = false;
-				bo->base.io = true;
-				bo->mmapped = true;
-
 				alloc = num_pages(&bo->base);
 				goto init;
 			} else {
 				bo->base.refcnt = 0;
 				kgem_bo_free(kgem, &bo->base);
-				bo = NULL;
 			}
 		}
 	}
@@ -4301,6 +4312,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (use_snoopable_buffer(kgem, flags)) {
 		bo = search_snoopable_buffer(kgem, alloc);
 		if (bo) {
+			if (flags & KGEM_BUFFER_WRITE)
+				kgem_bo_sync__cpu(kgem, &bo->base);
 			flags &= ~KGEM_BUFFER_INPLACE;
 			alloc = num_pages(&bo->base);
 			goto init;
@@ -4326,13 +4339,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		DBG(("%s: reusing ordinary handle %d for io\n",
 		     __FUNCTION__, old->handle));
 		alloc = num_pages(old);
-		bo = buffer_alloc(alloc);
+		bo = buffer_alloc_with_data(alloc);
 		if (bo == NULL)
 			return NULL;
 
 		init_buffer_from_bo(bo, old);
 		bo->need_io = flags & KGEM_BUFFER_WRITE;
-		bo->base.io = true;
 	} else {
 		if (use_snoopable_buffer(kgem, flags)) {
 			bo = create_snoopable_buffer(kgem, alloc);
@@ -4340,7 +4352,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				goto init;
 		}
 
-		bo = malloc(sizeof(*bo));
+		bo = buffer_alloc();
 		if (bo == NULL)
 			return NULL;
 
@@ -4349,59 +4361,59 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old) {
 			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
+
 			alloc = num_pages(old);
 			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
-			if (handle == 0 ||
-			    !__kgem_bo_init(&bo->base, handle, alloc)) {
+			if (handle == 0) {
 				free(bo);
 				return NULL;
 			}
+
 			DBG(("%s: created handle=%d for buffer\n",
 			     __FUNCTION__, bo->base.handle));
 
+			__kgem_bo_init(&bo->base, handle, alloc);
 			debug_alloc(kgem, alloc * PAGE_SIZE);
 		}
 
+		assert(bo->mmapped);
+		assert(!bo->need_io);
+		assert(bo->base.refcnt == 1);
+
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem != NULL) {
 			if (flags & KGEM_BUFFER_WRITE)
 				kgem_bo_sync__cpu(kgem, &bo->base);
-
-			bo->need_io = false;
-			bo->base.io = true;
-			bo->mmapped = true;
 			goto init;
 		}
 
 		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
 		old = &bo->base;
-		bo = buffer_alloc(alloc);
+		bo = buffer_alloc_with_data(alloc);
 		if (bo == NULL) {
 			free(old);
 			return NULL;
 		}
 
-		memcpy(&bo->base, old, sizeof(*old));
-		free(old);
+		init_buffer_from_bo(bo, old);
 
 		assert(bo->mem);
 		assert(!bo->mmapped);
+		assert(bo->base.refcnt == 1);
 
-		list_init(&bo->base.request);
-		list_init(&bo->base.vma);
-		list_init(&bo->base.list);
-		bo->base.refcnt = 1;
 		bo->need_io = flags & KGEM_BUFFER_WRITE;
-		bo->base.io = true;
 	}
 init:
+	bo->base.io = true;
 	bo->base.reusable = false;
+	assert(bo->base.refcnt == 1);
 	assert(num_pages(&bo->base) == alloc);
-	assert(bo->base.io);
 	assert(!bo->need_io || !bo->base.needs_flush);
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
+	assert(bo->mem);
+	assert(!bo->mmapped || bo->base.map != NULL);
 
 	bo->used = size;
 	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
@@ -4410,8 +4422,8 @@ init:
 	assert(list_is_empty(&bo->base.list));
 	list_add(&bo->base.list, &kgem->batch_buffers);
 
-	DBG(("%s(pages=%d) new handle=%d\n",
-	     __FUNCTION__, alloc, bo->base.handle));
+	DBG(("%s(pages=%d) new handle=%d, used=%d, write=%d\n",
+	     __FUNCTION__, alloc, bo->base.handle, bo->used, bo->write));
 
 done:
 	bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 165e7b9..9668976 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -127,7 +127,6 @@ struct kgem {
 	struct list vmap;
 	struct list batch_buffers, active_buffers;
 	struct list requests;
-	struct list sync_list;
 	struct kgem_request *next_request;
 
 	struct {
@@ -407,7 +406,6 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
-void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
@@ -547,8 +545,6 @@ static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
 	list_move(&bo->request, &kgem->next_request->buffers);
 }
 
-void kgem_sync(struct kgem *kgem);
-
 #define KGEM_BUFFER_WRITE	0x1
 #define KGEM_BUFFER_INPLACE	0x2
 #define KGEM_BUFFER_LAST	0x4
diff --git a/src/sna/sna.h b/src/sna/sna.h
index f274de9..91db995 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -121,6 +121,7 @@ struct sna_pixmap {
 	uint16_t source_count;
 	uint8_t pinned :1;
 	uint8_t mapped :1;
+	uint8_t shm :1;
 	uint8_t clear :1;
 	uint8_t undamaged :1;
 	uint8_t create :3;
@@ -199,7 +200,7 @@ struct sna {
 
 	int vblank_interval;
 
-	struct list dirty_pixmaps;
+	struct list flush_pixmaps;
 	struct list active_pixmaps;
 	struct list inactive_clock[2];
 
@@ -415,6 +416,7 @@ PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
 				   unsigned flags);
 PixmapPtr sna_pixmap_create_unattached(ScreenPtr screen,
 				       int width, int height, int depth);
+void sna_pixmap_destroy(PixmapPtr pixmap);
 
 #define MOVE_WRITE 0x1
 #define MOVE_READ 0x2
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5466f38..dee8c02 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,7 +60,7 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
-#define USE_SHM_VMAP 0
+#define USE_SHM_VMAP 1
 
 #define MIGRATE_ALL 0
 #define DBG_NO_CPU_UPLOAD 0
@@ -387,6 +387,7 @@ static void sna_pixmap_free_gpu(struct sna *sna, struct sna_pixmap *priv)
 	}
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		priv->pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
@@ -404,17 +405,13 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 		     bool from_gpu)
 {
 	/* Restore after a GTT mapping? */
+	assert(!priv->shm);
 	if (priv->ptr)
 		goto done;
 
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-#ifdef DEBUG_MEMORY
-	sna->debug_memory.shadow_pixels_allocs++;
-	sna->debug_memory.shadow_pixels_bytes += priv->stride * pixmap->drawable.height;
-#endif
-
 	if (priv->create & KGEM_CAN_CREATE_CPU) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
@@ -453,14 +450,9 @@ done:
 
 static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 {
-	assert(priv->stride);
 	assert(priv->cpu_damage == NULL);
 	assert(list_is_empty(&priv->list));
 
-#ifdef DEBUG_MEMORY
-	sna->debug_memory.shadow_pixels_allocs--;
-	sna->debug_memory.shadow_pixels_bytes -= priv->stride * priv->pixmap->drawable.height;
-#endif
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
@@ -482,39 +474,6 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		priv->pixmap->devPrivate.ptr = NULL;
 }
 
-static bool sna_destroy_private(PixmapPtr pixmap)
-{
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
-
-	if (priv == NULL)
-		return true;
-
-	list_del(&priv->list);
-	list_del(&priv->inactive);
-
-	assert_pixmap_damage(pixmap);
-
-	sna_damage_destroy(&priv->gpu_damage);
-	sna_damage_destroy(&priv->cpu_damage);
-
-	/* Always release the gpu bo back to the lower levels of caching */
-	if (priv->gpu_bo)
-		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
-
-	if (priv->ptr)
-		sna_pixmap_free_cpu(sna, priv);
-
-	if (!sna->freed_pixmap && priv->header) {
-		sna->freed_pixmap = pixmap;
-		assert(priv->ptr == NULL);
-		return false;
-	}
-
-	free(priv);
-	return true;
-}
-
 static inline uint32_t default_tiling(PixmapPtr pixmap,
 				      uint32_t tiling)
 {
@@ -619,6 +578,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
@@ -781,12 +741,25 @@ sna_pixmap_create_shm(ScreenPtr screen,
 	struct sna_pixmap *priv;
 	PixmapPtr pixmap;
 
-	DBG(("%s(%d, %d, %d)\n", __FUNCTION__,
-	     width, height, depth));
+	DBG(("%s(%d, %d, %d)\n", __FUNCTION__, width, height, depth));
+
+	if (wedged(sna)) {
+		pixmap = sna_pixmap_create_unattached(screen, 0, 0, depth);
+		if (pixmap == NULL)
+			return NULL;
+
+		if (!screen->ModifyPixmapHeader(pixmap, width, height, depth,
+						bpp, pitch, addr)) {
+			screen->DestroyPixmap(pixmap);
+			return NULL;
+		}
+
+		return pixmap;
+	}
 
 	if (sna->freed_pixmap) {
 		pixmap = sna->freed_pixmap;
-		sna->freed_pixmap = NULL;
+		sna->freed_pixmap = pixmap->devPrivate.ptr;
 
 		pixmap->usage_hint = -1;
 		pixmap->refcnt = 1;
@@ -828,10 +801,11 @@ sna_pixmap_create_shm(ScreenPtr screen,
 		return GetScratchPixmapHeader(screen, width, height, depth,
 					      bpp, pitch, addr);
 	}
-	kgem_bo_set_sync(&sna->kgem, priv->cpu_bo);
-	sna_accel_watch_flush(sna, 1);
+	priv->cpu_bo->flush = true;
 	priv->cpu_bo->pitch = pitch;
+	sna_accel_watch_flush(sna, 1);
 
+	priv->shm = true;
 	priv->header = true;
 	sna_damage_all(&priv->cpu_damage, width, height);
 
@@ -876,7 +850,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	/* you promise never to access this via the cpu... */
 	if (sna->freed_pixmap) {
 		pixmap = sna->freed_pixmap;
-		sna->freed_pixmap = NULL;
+		sna->freed_pixmap = pixmap->devPrivate.ptr;
 
 		pixmap->usage_hint = CREATE_PIXMAP_USAGE_SCRATCH;
 		pixmap->refcnt = 1;
@@ -1031,16 +1005,53 @@ fallback:
 
 static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 {
+	struct sna *sna;
+	struct sna_pixmap *priv;
+
 	if (--pixmap->refcnt)
 		return TRUE;
 
-	if (!sna_destroy_private(pixmap))
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		FreePixmap(pixmap);
 		return TRUE;
+	}
+
+	assert_pixmap_damage(pixmap);
+
+	list_del(&priv->list);
+	list_del(&priv->inactive);
+
+	sna_damage_destroy(&priv->gpu_damage);
+	sna_damage_destroy(&priv->cpu_damage);
+
+	sna = to_sna_from_pixmap(pixmap);
+
+	/* Always release the gpu bo back to the lower levels of caching */
+	if (priv->gpu_bo)
+		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+
+	sna_pixmap_free_cpu(sna, priv);
+
+	if (priv->header) {
+		pixmap->devPrivate.ptr = sna->freed_pixmap;
+		sna->freed_pixmap = pixmap;
+	} else {
+		free(priv);
+		FreePixmap(pixmap);
+	}
 
-	FreePixmap(pixmap);
 	return TRUE;
 }
 
+void sna_pixmap_destroy(PixmapPtr pixmap)
+{
+	assert(pixmap->refcnt == 1);
+	assert(sna_pixmap(pixmap) == NULL || sna_pixmap(pixmap)->header == true);
+
+	sna_destroy_pixmap(pixmap);
+}
+
 static inline bool pixmap_inplace(struct sna *sna,
 				  PixmapPtr pixmap,
 				  struct sna_pixmap *priv)
@@ -1121,6 +1132,12 @@ static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags)
 	return priv->stride != 0;
 }
 
+static inline void add_flush_pixmap(struct sna *sna, struct sna_pixmap *priv)
+{
+	list_move(&priv->list, &sna->flush_pixmaps);
+	sna->kgem.flush |= 1;
+}
+
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -1153,6 +1170,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 		if (priv->create & KGEM_CAN_CREATE_GPU &&
 		    pixmap_inplace(sna, pixmap, priv)) {
+			assert(!priv->shm);
 			DBG(("%s: write inplace\n", __FUNCTION__));
 			if (priv->gpu_bo) {
 				if (kgem_bo_is_busy(priv->gpu_bo) &&
@@ -1190,6 +1208,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
+				assert(priv->shm);
 				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
@@ -1201,6 +1220,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 skip_inplace_map:
 		sna_damage_destroy(&priv->gpu_damage);
 		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
+			assert(!priv->shm);
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
@@ -1257,13 +1277,16 @@ skip_inplace_map:
 	}
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo))
+		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
+			assert(!priv->shm);
 			sna_pixmap_free_cpu(sna, priv);
+		}
 		sna_damage_destroy(&priv->gpu_damage);
 	}
 
@@ -1334,8 +1357,8 @@ skip_inplace_map:
 		priv->undamaged = false;
 
 		if (priv->flush) {
-			list_move(&priv->list, &sna->dirty_pixmaps);
-			sna->kgem.flush |= 1;
+			assert(!priv->shm);
+			add_flush_pixmap(sna, priv);
 		}
 	}
 
@@ -1622,6 +1645,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					return false;
 				}
 
+				assert(!priv->shm);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -1706,6 +1730,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
@@ -1974,8 +1999,8 @@ done:
 			priv->undamaged = false;
 		}
 		if (priv->flush) {
-			list_move(&priv->list, &sna->dirty_pixmaps);
-			sna->kgem.flush |= 1;
+			assert(!priv->shm);
+			add_flush_pixmap(sna, priv);
 		}
 	}
 
@@ -2167,6 +2192,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	assert(priv->gpu_bo->proxy == NULL);
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
@@ -2284,6 +2310,11 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		RegionUninit(&i);
 	}
 
+	if (priv->shm) {
+		assert(!priv->flush);
+		add_flush_pixmap(sna, priv);
+	}
+
 done:
 	if (flags & MOVE_WRITE) {
 		priv->clear = false;
@@ -2338,6 +2369,8 @@ sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 
 	if (priv->flush)
 		flags |= PREFER_GPU;
+	if (priv->shm)
+		flags = 0;
 	if (priv->cpu && (flags & (IGNORE_CPU | FORCE_GPU)) == 0)
 		flags = 0;
 
@@ -2517,6 +2550,11 @@ use_cpu_bo:
 	else
 		*damage = &priv->cpu_damage;
 
+	if (priv->shm) {
+		assert(!priv->flush);
+		add_flush_pixmap(to_sna_from_pixmap(pixmap), priv);
+	}
+
 	DBG(("%s: using CPU bo with damage? %d\n",
 	     __FUNCTION__, *damage != NULL));
 	return priv->cpu_bo;
@@ -2540,7 +2578,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 
 	if (sna->freed_pixmap) {
 		pixmap = sna->freed_pixmap;
-		sna->freed_pixmap = NULL;
+		sna->freed_pixmap = pixmap->devPrivate.ptr;
 
 		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
 		pixmap->refcnt = 1;
@@ -2751,12 +2789,16 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	priv->cpu_damage = NULL;
 	priv->undamaged = true;
 
+	if (priv->shm) {
+		assert(!priv->flush);
+		add_flush_pixmap(sna, priv);
+	}
+
 	/* For large bo, try to keep only a single copy around */
 	if (priv->create & KGEM_CAN_CREATE_LARGE)
 		sna_damage_all(&priv->gpu_damage,
 			       pixmap->drawable.width,
 			       pixmap->drawable.height);
-
 done:
 	list_del(&priv->list);
 
@@ -2767,6 +2809,7 @@ done:
 		priv->undamaged = false;
 		if (priv->ptr) {
 			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
+			assert(!priv->shm);
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
@@ -3035,6 +3078,11 @@ static bool upload_inplace(struct sna *sna,
 			   struct sna_pixmap *priv,
 			   RegionRec *region)
 {
+	if (priv->shm) {
+		DBG(("%s: no, SHM Pixmap\n", __FUNCTION__));
+		return false;
+	}
+
 	if (priv->create & KGEM_CAN_CREATE_LARGE) {
 		if (priv->gpu_bo) {
 			DBG(("%s: yes, large buffer and already have GPU bo\n",
@@ -3226,6 +3274,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 					priv->undamaged = false;
 				}
 				assert(!priv->cpu_bo->flush);
+				assert(!priv->shm);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -3235,6 +3284,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	}
 
 	if (priv->mapped) {
+		assert(!priv->shm);
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
 	}
@@ -3296,8 +3346,8 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			}
 		}
 		if (priv->flush) {
-			list_move(&priv->list, &sna->dirty_pixmaps);
-			sna->kgem.flush |= 1;
+			assert(!priv->shm);
+			add_flush_pixmap(sna, priv);
 		}
 	}
 	priv->cpu = true;
@@ -4150,6 +4200,11 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
+			if (src_priv->shm) {
+				assert(!src_priv->flush);
+				add_flush_pixmap(sna, src_priv);
+			}
+
 			if (damage)
 				sna_damage_add(damage, region);
 			return;
@@ -4219,6 +4274,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (src_priv) {
 				/* Fixup the shadow pointer as necessary */
 				if (src_priv->mapped) {
+					assert(!src_priv->shm);
 					src_pixmap->devPrivate.ptr = NULL;
 					src_priv->mapped = false;
 				}
@@ -12433,6 +12489,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
+	struct sna_pixmap *priv;
 
 	/* XXX we should be able to reduce the frequency of flushes further
 	 * by checking for outgoing damage events or sync replies. Tricky,
@@ -12442,32 +12499,27 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		return;
 
 	/* flush any pending damage from shadow copies to tfp clients */
-	if (!list_is_empty(&sna->dirty_pixmaps)) {
-		struct list preserve;
+	while (!list_is_empty(&sna->flush_pixmaps)) {
+		bool ret;
 
-		list_init(&preserve);
+		priv = list_first_entry(&sna->flush_pixmaps,
+					struct sna_pixmap, list);
 
-		do {
-			struct sna_pixmap *priv;
-
-			priv = list_first_entry(&sna->dirty_pixmaps,
-						struct sna_pixmap, list);
-			if (!sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ))
-				list_move(&priv->list, &preserve);
-
-		} while (!list_is_empty(&sna->dirty_pixmaps));
-
-		if (!list_is_empty(&preserve)) {
-			sna->dirty_pixmaps.next = preserve.next;
-			preserve.next->prev = &sna->dirty_pixmaps;
-			preserve.prev->next = &sna->dirty_pixmaps;
-			sna->dirty_pixmaps.prev = preserve.prev;
+		list_del(&priv->list);
+		if (priv->shm) {
+			DBG(("%s: syncing SHM pixmap=%ld\n", __FUNCTION__,
+			     priv->pixmap->drawable.serialNumber));
+			ret = sna_pixmap_move_to_cpu(priv->pixmap,
+						     MOVE_READ | MOVE_WRITE);
+		} else {
+			DBG(("%s: flushing DRI pixmap=%ld\n", __FUNCTION__,
+			     priv->pixmap->drawable.serialNumber));
+			ret = sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ);
 		}
+		(void)ret;
 	}
 
 	kgem_submit(&sna->kgem);
-	kgem_sync(&sna->kgem);
-
 	sna->kgem.flush = false;
 }
 
@@ -12778,6 +12830,7 @@ static void sna_accel_inactive(struct sna *sna)
 			list_del(&priv->list);
 
 			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
+			assert(!priv->shm);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 			priv->cpu = false;
@@ -12828,14 +12881,12 @@ static bool sna_accel_do_debug_memory(struct sna *sna)
 
 static void sna_accel_debug_memory(struct sna *sna)
 {
-	ErrorF("Allocated shadow pixels: %d, %ld bytes, as CPU bo: %d, %ld bytes\n",
-	       sna->debug_memory.shadow_pixels_allocs,
-	       (long)sna->debug_memory.shadow_pixels_bytes,
-	       sna->debug_memory.cpu_bo_allocs,
-	       (long)sna->debug_memory.cpu_bo_bytes);
 	ErrorF("Allocated bo: %d, %ld bytes\n",
 	       sna->kgem.debug_memory.bo_allocs,
 	       (long)sna->kgem.debug_memory.bo_bytes);
+	ErrorF("Allocated CPU bo: %d, %ld bytes\n",
+	       sna->debug_memory.cpu_bo_allocs,
+	       (long)sna->debug_memory.cpu_bo_bytes);
 }
 
 #else
@@ -12951,7 +13002,7 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 
 	sna_font_key = AllocateFontPrivateIndex();
 
-	list_init(&sna->dirty_pixmaps);
+	list_init(&sna->flush_pixmaps);
 	list_init(&sna->active_pixmaps);
 	list_init(&sna->inactive_clock[0]);
 	list_init(&sna->inactive_clock[1]);
@@ -13086,11 +13137,12 @@ void sna_accel_close(struct sna *sna)
 	sna_gradients_close(sna);
 	sna_glyphs_close(sna);
 
-	if (sna->freed_pixmap) {
-		assert(sna->freed_pixmap->refcnt == 0);
-		free(sna_pixmap(sna->freed_pixmap));
-		FreePixmap(sna->freed_pixmap);
-		sna->freed_pixmap = NULL;
+	while (sna->freed_pixmap) {
+		PixmapPtr pixmap = sna->freed_pixmap;
+		sna->freed_pixmap = pixmap->devPrivate.ptr;
+		assert(pixmap->refcnt == 0);
+		free(sna_pixmap(pixmap));
+		FreePixmap(pixmap);
 	}
 
 	DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index d7d095a..4263bf7 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -2134,8 +2134,8 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 
 				assert(box->x1 + src_dx >= 0);
 				assert(box->y1 + src_dy >= 0);
-				assert(box->x1 + src_dx <= MAX_SHORT);
-				assert(box->y1 + src_dy <= MAX_SHORT);
+				assert(box->x1 + src_dx <= INT16_MAX);
+				assert(box->y1 + src_dy <= INT16_MAX);
 
 				assert(box->x1 >= 0);
 				assert(box->y1 >= 0);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index da2f358..cfb9d98 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -155,6 +155,10 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	struct sna_pixmap *priv;
 	int tiling;
 
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL || priv->shm)
+		return NULL;
+
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
 		return NULL;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 2822368..46fbf8d 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -863,7 +863,6 @@ glyphs_via_mask(struct sna *sna,
 		     __FUNCTION__, (unsigned long)format->format,
 		     format->depth, (uint32_t)width*height*format->depth));
 
-upload:
 		pixmap = sna_pixmap_create_upload(screen,
 						  width, height,
 						  format->depth,
@@ -876,10 +875,8 @@ upload:
 						 width, height,
 						 pixmap->devPrivate.ptr,
 						 pixmap->devKind);
-		if (mask_image == NULL) {
-			screen->DestroyPixmap(pixmap);
-			return false;
-		}
+		if (mask_image == NULL)
+			goto err_pixmap;
 
 		memset(pixmap->devPrivate.ptr, 0, pixmap->devKind*height);
 #if HAS_PIXMAN_GLYPHS
@@ -897,10 +894,8 @@ upload:
 				count += list[n].len;
 			if (count > N_STACK_GLYPHS) {
 				pglyphs = malloc (count * sizeof(pixman_glyph_t));
-				if (pglyphs == NULL) {
-					screen->DestroyPixmap(pixmap);
-					return false;
-				}
+				if (pglyphs == NULL)
+					goto err_pixmap;
 			}
 
 			count = 0;
@@ -1021,9 +1016,8 @@ next_image:
 		mask = CreatePicture(0, &pixmap->drawable,
 				     format, CPComponentAlpha,
 				     &component_alpha, serverClient, &error);
-		screen->DestroyPixmap(pixmap);
 		if (!mask)
-			return false;
+			goto err_pixmap;
 
 		ValidatePicture(mask);
 	} else {
@@ -1036,15 +1030,12 @@ next_image:
 		mask = CreatePicture(0, &pixmap->drawable,
 				     format, CPComponentAlpha,
 				     &component_alpha, serverClient, &error);
-		screen->DestroyPixmap(pixmap);
 		if (!mask)
-			return false;
+			goto err_pixmap;
 
 		ValidatePicture(mask);
-		if (!clear_pixmap(sna, pixmap)) {
-			FreePicture(mask, 0);
-			goto upload;
-		}
+		if (!clear_pixmap(sna, pixmap))
+			goto err_mask;
 
 		memset(&tmp, 0, sizeof(tmp));
 		glyph_atlas = NULL;
@@ -1106,8 +1097,7 @@ next_image:
 					if (!ok) {
 						DBG(("%s: fallback -- can not handle PictOpAdd of glyph onto mask!\n",
 						     __FUNCTION__));
-						FreePicture(mask, 0);
-						return false;
+						goto err_mask;
 					}
 
 					glyph_atlas = this_atlas;
@@ -1143,9 +1133,11 @@ next_glyph:
 		      0, 0,
 		      box.x1, box.y1,
 		      width, height);
-
+err_mask:
 	FreePicture(mask, 0);
-	return true;
+err_pixmap:
+	sna_pixmap_destroy(pixmap);
+	return TRUE;
 }
 
 static PictFormatPtr
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 1db8958..a8b5a06 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -370,6 +370,12 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 		}
 	}
 
+	if (priv->shm) {
+		assert(!priv->flush);
+		list_move(&priv->list, &sna->flush_pixmaps);
+		sna->kgem.flush |= 1;
+	}
+
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
 	++priv->source_count;
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index fff5436..0024f99 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -75,7 +75,8 @@ is_gpu(DrawablePtr drawable)
 	if (priv == NULL || priv->clear)
 		return false;
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage) || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
+	if (DAMAGE_IS_ALL(priv->gpu_damage) ||
+	    (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
 		return true;
 
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index efb53dd..e63981f 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2479,7 +2479,7 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 				pixman_image_unref(image);
 			}
 			if (format != PIXMAN_a8) {
-				screen->DestroyPixmap(scratch);
+				sna_pixmap_destroy(scratch);
 				return;
 			}
 		} else {
@@ -2505,17 +2505,16 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		mask = CreatePicture(0, &scratch->drawable,
 				     PictureMatchFormat(screen, depth, format),
 				     0, 0, serverClient, &error);
-		screen->DestroyPixmap(scratch);
-		if (!mask)
-			return;
-
-		CompositePicture(op, src, mask, dst,
-				 xSrc + bounds.x1 - dst_x,
-				 ySrc + bounds.y1 - dst_y,
-				 0, 0,
-				 bounds.x1, bounds.y1,
-				 width, height);
-		FreePicture(mask, 0);
+		if (mask) {
+			CompositePicture(op, src, mask, dst,
+					 xSrc + bounds.x1 - dst_x,
+					 ySrc + bounds.y1 - dst_y,
+					 0, 0,
+					 bounds.x1, bounds.y1,
+					 width, height);
+			FreePicture(mask, 0);
+		}
+		sna_pixmap_destroy(scratch);
 	} else {
 		if (dst->polyEdge == PolyEdgeSharp)
 			maskFormat = PictureMatchFormat(screen, 1, PICT_a1);
@@ -3630,7 +3629,6 @@ composite_unaligned_boxes_fallback(CARD8 op,
 		mask = CreatePicture(0, &scratch->drawable,
 				     PictureMatchFormat(screen, 8, PICT_a8),
 				     0, 0, serverClient, &error);
-		screen->DestroyPixmap(scratch);
 		if (mask) {
 			CompositePicture(op, src, mask, dst,
 					 src_x + extents.x1 - dst_x,
@@ -3641,6 +3639,7 @@ composite_unaligned_boxes_fallback(CARD8 op,
 					 extents.y2 - extents.y1);
 			FreePicture(mask, 0);
 		}
+		sna_pixmap_destroy(scratch);
 	}
 
 	return true;
@@ -4260,7 +4259,7 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
 	if (tor_init(&tor, &extents, 2*ntrap)) {
-		screen->DestroyPixmap(scratch);
+		sna_pixmap_destroy(scratch);
 		return true;
 	}
 
@@ -4294,7 +4293,6 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	mask = CreatePicture(0, &scratch->drawable,
 			     PictureMatchFormat(screen, 8, PICT_a8),
 			     0, 0, serverClient, &error);
-	screen->DestroyPixmap(scratch);
 	if (mask) {
 		CompositePicture(op, src, mask, dst,
 				 src_x + dst_x - pixman_fixed_to_int(traps[0].left.p1.x),
@@ -4304,6 +4302,7 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 				 extents.x2, extents.y2);
 		FreePicture(mask, 0);
 	}
+	sna_pixmap_destroy(scratch);
 
 	return true;
 }
@@ -5323,7 +5322,7 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
 	if (tor_init(&tor, &extents, 2*ntrap)) {
-		screen->DestroyPixmap(scratch);
+		sna_pixmap_destroy(scratch);
 		return true;
 	}
 
@@ -5355,7 +5354,6 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	mask = CreatePicture(0, &scratch->drawable,
 			     PictureMatchFormat(screen, 8, PICT_a8),
 			     0, 0, serverClient, &error);
-	screen->DestroyPixmap(scratch);
 	if (mask) {
 		RegionRec region;
 
@@ -5393,6 +5391,7 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 done:
 		FreePicture(mask, 0);
 	}
+	sna_pixmap_destroy(scratch);
 
 	return true;
 }
@@ -5823,7 +5822,7 @@ trap_mask_converter(PicturePtr picture,
 	dx *= FAST_SAMPLES_X;
 	dy *= FAST_SAMPLES_Y;
 	if (tor_init(&tor, &extents, 2*ntrap)) {
-		screen->DestroyPixmap(scratch);
+		sna_pixmap_destroy(scratch);
 		return true;
 	}
 
@@ -5871,8 +5870,7 @@ trap_mask_converter(PicturePtr picture,
 			       pixmap, priv->gpu_bo, x, y,
 			       &extents, 1, 0);
 	mark_damaged(pixmap, priv, &extents ,x, y);
-
-	screen->DestroyPixmap(scratch);
+	sna_pixmap_destroy(scratch);
 	return true;
 }
 
@@ -5950,7 +5948,7 @@ trap_upload(PicturePtr picture,
 			       &extents, 1, 0);
 	mark_damaged(pixmap, priv, &extents, x, y);
 
-	screen->DestroyPixmap(scratch);
+	sna_pixmap_destroy(scratch);
 	return true;
 }
 
@@ -6362,7 +6360,7 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     __FUNCTION__, scratch->devPrivate.ptr, scratch->devKind));
 
 	if (tor_init(&tor, &extents, 3*count)) {
-		screen->DestroyPixmap(scratch);
+		sna_pixmap_destroy(scratch);
 		return true;
 	}
 
@@ -6390,7 +6388,6 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	mask = CreatePicture(0, &scratch->drawable,
 			     PictureMatchFormat(screen, 8, PICT_a8),
 			     0, 0, serverClient, &error);
-	screen->DestroyPixmap(scratch);
 	if (mask) {
 		CompositePicture(op, src, mask, dst,
 				 src_x + dst_x - pixman_fixed_to_int(tri[0].p1.x),
@@ -6401,6 +6398,7 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		FreePicture(mask, 0);
 	}
 	tor_fini(&tor);
+	sna_pixmap_destroy(scratch);
 
 	return true;
 }
@@ -6478,17 +6476,16 @@ triangles_fallback(CARD8 op,
 		mask = CreatePicture(0, &scratch->drawable,
 				     PictureMatchFormat(screen, depth, format),
 				     0, 0, serverClient, &error);
-		screen->DestroyPixmap(scratch);
-		if (!mask)
-			return;
-
-		CompositePicture(op, src, mask, dst,
-				 xSrc + bounds.x1 - dst_x,
-				 ySrc + bounds.y1 - dst_y,
-				 0, 0,
-				 bounds.x1, bounds.y1,
-				 width, height);
-		FreePicture(mask, 0);
+		if (mask) {
+			CompositePicture(op, src, mask, dst,
+					 xSrc + bounds.x1 - dst_x,
+					 ySrc + bounds.y1 - dst_y,
+					 0, 0,
+					 bounds.x1, bounds.y1,
+					 width, height);
+			FreePicture(mask, 0);
+		}
+		sna_pixmap_destroy(scratch);
 	} else {
 		if (dst->polyEdge == PolyEdgeSharp)
 			maskFormat = PictureMatchFormat(screen, 1, PICT_a1);
@@ -6746,17 +6743,16 @@ tristrip_fallback(CARD8 op,
 		mask = CreatePicture(0, &scratch->drawable,
 				     PictureMatchFormat(screen, depth, format),
 				     0, 0, serverClient, &error);
-		screen->DestroyPixmap(scratch);
-		if (!mask)
-			return;
-
-		CompositePicture(op, src, mask, dst,
-				 xSrc + bounds.x1 - dst_x,
-				 ySrc + bounds.y1 - dst_y,
-				 0, 0,
-				 bounds.x1, bounds.y1,
-				 width, height);
-		FreePicture(mask, 0);
+		if (mask) {
+			CompositePicture(op, src, mask, dst,
+					 xSrc + bounds.x1 - dst_x,
+					 ySrc + bounds.y1 - dst_y,
+					 0, 0,
+					 bounds.x1, bounds.y1,
+					 width, height);
+			FreePicture(mask, 0);
+		}
+		sna_pixmap_destroy(scratch);
 	} else {
 		xTriangle tri;
 		xPointFixed *p[3] = { &tri.p1, &tri.p2, &tri.p3 };
@@ -6881,17 +6877,16 @@ trifan_fallback(CARD8 op,
 		mask = CreatePicture(0, &scratch->drawable,
 				     PictureMatchFormat(screen, depth, format),
 				     0, 0, serverClient, &error);
-		screen->DestroyPixmap(scratch);
-		if (!mask)
-			return;
-
-		CompositePicture(op, src, mask, dst,
-				 xSrc + bounds.x1 - dst_x,
-				 ySrc + bounds.y1 - dst_y,
-				 0, 0,
-				 bounds.x1, bounds.y1,
-				 width, height);
-		FreePicture(mask, 0);
+		if (mask) {
+			CompositePicture(op, src, mask, dst,
+					 xSrc + bounds.x1 - dst_x,
+					 ySrc + bounds.y1 - dst_y,
+					 0, 0,
+					 bounds.x1, bounds.y1,
+					 width, height);
+			FreePicture(mask, 0);
+		}
+		sna_pixmap_destroy(scratch);
 	} else {
 		xTriangle tri;
 		xPointFixed *p[3] = { &tri.p1, &tri.p2, &tri.p3 };
commit 93c794eb3f80bef64f1619986a7c950229dc7a47
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 20:34:53 2012 +0100

    sna: Micro-optimise copying boxes with the blitter
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index ff8e3eb..d7d095a 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -2028,6 +2028,13 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	return true;
 }
 
+static inline uint32_t add2(uint32_t v, int16_t x, int16_t y)
+{
+	x += v & 0xffff;
+	y += v >> 16;
+	return (uint16_t)y << 16 | x;
+}
+
 bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2104,56 +2111,110 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
-	do {
-		int nbox_this_time;
-
-		nbox_this_time = nbox;
-		if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
-			nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
-		if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
-			nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
-		assert(nbox_this_time);
-		nbox -= nbox_this_time;
-
+	if ((dst_dx | dst_dy) == 0) {
+		uint64_t hdr = (uint64_t)br13 << 32 | cmd;
 		do {
-			uint32_t *b = kgem->batch + kgem->nbatch;
-
-			DBG(("  %s: box=(%d, %d)x(%d, %d)\n",
-			     __FUNCTION__,
-			     box->x1, box->y1,
-			     box->x2 - box->x1, box->y2 - box->y1));
-
-			assert(box->x1 + src_dx >= 0);
-			assert(box->y1 + src_dy >= 0);
-
-			assert(box->x1 + dst_dx >= 0);
-			assert(box->y1 + dst_dy >= 0);
-
-			b[0] = cmd;
-			b[1] = br13;
-			b[2] = ((box->y1 + dst_dy) << 16) | (box->x1 + dst_dx);
-			b[3] = ((box->y2 + dst_dy) << 16) | (box->x2 + dst_dx);
-			b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo,
-					      I915_GEM_DOMAIN_RENDER << 16 |
-					      I915_GEM_DOMAIN_RENDER |
-					      KGEM_RELOC_FENCED,
-					      0);
-			b[5] = ((box->y1 + src_dy) << 16) | (box->x1 + src_dx);
-			b[6] = src_pitch;
-			b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo,
-					      I915_GEM_DOMAIN_RENDER << 16 |
-					      KGEM_RELOC_FENCED,
-					      0);
-			kgem->nbatch += 8;
-			box++;
-		} while (--nbox_this_time);
+			int nbox_this_time;
+
+			nbox_this_time = nbox;
+			if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+				nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
+			if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+				nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+			assert(nbox_this_time);
+			nbox -= nbox_this_time;
+
+			do {
+				uint32_t *b = kgem->batch + kgem->nbatch;
+
+				DBG(("  %s: box=(%d, %d)x(%d, %d)\n",
+				     __FUNCTION__,
+				     box->x1, box->y1,
+				     box->x2 - box->x1, box->y2 - box->y1));
+
+				assert(box->x1 + src_dx >= 0);
+				assert(box->y1 + src_dy >= 0);
+				assert(box->x1 + src_dx <= MAX_SHORT);
+				assert(box->y1 + src_dy <= MAX_SHORT);
+
+				assert(box->x1 >= 0);
+				assert(box->y1 >= 0);
+
+				*(uint64_t *)&b[0] = hdr;
+				*(uint64_t *)&b[2] = *(uint64_t *)box;
+				b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      I915_GEM_DOMAIN_RENDER |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[5] = add2(b[2], src_dx, src_dy);
+				b[6] = src_pitch;
+				b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      KGEM_RELOC_FENCED,
+						      0);
+				kgem->nbatch += 8;
+				box++;
+			} while (--nbox_this_time);
+
+			if (!nbox)
+				break;
 
-		if (!nbox)
-			break;
+			_kgem_submit(kgem);
+			_kgem_set_mode(kgem, KGEM_BLT);
+		} while (1);
+	} else {
+		do {
+			int nbox_this_time;
+
+			nbox_this_time = nbox;
+			if (8*nbox_this_time > kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED)
+				nbox_this_time = (kgem->surface - kgem->nbatch - KGEM_BATCH_RESERVED) / 8;
+			if (2*nbox_this_time > KGEM_RELOC_SIZE(kgem) - kgem->nreloc)
+				nbox_this_time = (KGEM_RELOC_SIZE(kgem) - kgem->nreloc)/2;
+			assert(nbox_this_time);
+			nbox -= nbox_this_time;
+
+			do {
+				uint32_t *b = kgem->batch + kgem->nbatch;
+
+				DBG(("  %s: box=(%d, %d)x(%d, %d)\n",
+				     __FUNCTION__,
+				     box->x1, box->y1,
+				     box->x2 - box->x1, box->y2 - box->y1));
+
+				assert(box->x1 + src_dx >= 0);
+				assert(box->y1 + src_dy >= 0);
+
+				assert(box->x1 + dst_dx >= 0);
+				assert(box->y1 + dst_dy >= 0);
+
+				b[0] = cmd;
+				b[1] = br13;
+				b[2] = ((box->y1 + dst_dy) << 16) | (box->x1 + dst_dx);
+				b[3] = ((box->y2 + dst_dy) << 16) | (box->x2 + dst_dx);
+				b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, dst_bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      I915_GEM_DOMAIN_RENDER |
+						      KGEM_RELOC_FENCED,
+						      0);
+				b[5] = ((box->y1 + src_dy) << 16) | (box->x1 + src_dx);
+				b[6] = src_pitch;
+				b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo,
+						      I915_GEM_DOMAIN_RENDER << 16 |
+						      KGEM_RELOC_FENCED,
+						      0);
+				kgem->nbatch += 8;
+				box++;
+			} while (--nbox_this_time);
+
+			if (!nbox)
+				break;
 
-		_kgem_submit(kgem);
-		_kgem_set_mode(kgem, KGEM_BLT);
-	} while (1);
+			_kgem_submit(kgem);
+			_kgem_set_mode(kgem, KGEM_BLT);
+		} while (1);
+	}
 
 	if (kgem->gen >= 60 && kgem_check_batch(kgem, 3)) {
 		uint32_t *b = kgem->batch + kgem->nbatch;
commit a0d95a9c2d3a27eafbe459e2aefe772c006e596f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 20:34:23 2012 +0100

    sna: Only update a buffer when it becomes dirty
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 04c351c..b65454d 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -547,7 +547,7 @@ static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
 	assert(sna->render_state.gen2.vertex_offset == 0);
 
 	if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
-		kgem_bo_mark_dirty(op->dst.bo);
+		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
 		return;
 	}
 
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 1f6c1aa..18c5d85 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1373,7 +1373,7 @@ static void gen3_emit_target(struct sna *sna,
 
 		state->current_dst = bo->unique_id;
 	}
-	kgem_bo_mark_dirty(bo);
+	kgem_bo_mark_dirty(&sna->kgem, bo);
 }
 
 static void gen3_emit_composite_state(struct sna *sna,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6fcce71..de6c8c4 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -732,7 +732,7 @@ gen4_bind_bo(struct sna *sna,
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(bo);
+		kgem_bo_mark_dirty(&sna->kgem, bo);
 	} else
 		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 
@@ -1457,7 +1457,7 @@ gen4_emit_state(struct sna *sna,
 		     kgem_bo_is_dirty(op->mask.bo)));
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
 	}
 }
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index d776e77..db7eb7b 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -726,7 +726,7 @@ gen5_bind_bo(struct sna *sna,
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(bo);
+		kgem_bo_mark_dirty(&sna->kgem, bo);
 	} else
 		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 
@@ -1472,7 +1472,7 @@ gen5_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
 	}
 }
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d4783e0..c292da1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -914,7 +914,7 @@ gen6_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen6_emit_flush(sna);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall) {
@@ -1246,7 +1246,7 @@ gen6_bind_bo(struct sna *sna,
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(bo);
+		kgem_bo_mark_dirty(&sna->kgem, bo);
 	} else
 		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c041d66..ae0aa9d 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1048,7 +1048,7 @@ gen7_emit_state(struct sna *sna,
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen7_emit_pipe_invalidate(sna, need_stall);
 		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
+		kgem_bo_mark_dirty(&sna->kgem, op->dst.bo);
 		need_stall = false;
 	}
 	if (need_stall)
@@ -1355,7 +1355,7 @@ gen7_bind_bo(struct sna *sna,
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER;
-		kgem_bo_mark_dirty(bo);
+		kgem_bo_mark_dirty(&sna->kgem, bo);
 	} else
 		domains = I915_GEM_DOMAIN_SAMPLER << 16;
 
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 66a23bf..577fa6c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -349,9 +349,10 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 		assert(list_is_empty(&bo->vma));
 		bo->rq = NULL;
 		list_del(&bo->request);
+
+		bo->needs_flush = false;
 	}
 
-	bo->needs_flush = false;
 	bo->domain = DOMAIN_NONE;
 }
 
@@ -3494,12 +3495,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 		kgem->reloc[index].target_handle = bo->handle;
 		kgem->reloc[index].presumed_offset = bo->presumed_offset;
 
-		if (read_write_domain & 0x7fff) {
-			DBG(("%s: marking handle=%d dirty\n",
-			     __FUNCTION__, bo->handle));
-			bo->needs_flush = bo->dirty = true;
-			list_move(&bo->request, &kgem->next_request->buffers);
-		}
+		if (read_write_domain & 0x7ff)
+			kgem_bo_mark_dirty(kgem, bo);
 
 		delta += bo->presumed_offset;
 	} else {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 533a919..165e7b9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -536,10 +536,15 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 	return bo->dirty;
 }
 
-static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
+static inline void kgem_bo_mark_dirty(struct kgem *kgem, struct kgem_bo *bo)
 {
+	if (bo->dirty)
+		return;
+
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
-	bo->dirty = true;
+
+	bo->needs_flush = bo->dirty = true;
+	list_move(&bo->request, &kgem->next_request->buffers);
 }
 
 void kgem_sync(struct kgem *kgem);
commit c52d265b83b033fb2a275fcc9a8a8d146e3afdf6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 19:38:38 2012 +0100

    sna: Tweak CPU bo promotion rules for CopyArea
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4ef52d7..5466f38 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3733,35 +3733,40 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 {
 	int w = box->x2 - box->x1;
 	int h = box->y2 - box->y1;
+	int count;
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		return true;
 
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return false;
-
 	if (priv->gpu_bo) {
 		if (alu != GXcopy)
 			return true;
 
 		if (!priv->cpu)
 			return true;
+
+		if (priv->gpu_bo->tiling)
+			return true;
 	} else {
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 			return false;
 	}
 
+	count = priv->source_count++;
 	if (priv->cpu_bo) {
-		if (sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) == I915_TILING_NONE)
+		if (priv->cpu_bo->flush && count > SOURCE_BIAS)
+			return true;
+
+		if (sna_pixmap_choose_tiling(pixmap,
+					     DEFAULT_TILING) == I915_TILING_NONE)
 			return false;
 
 		if (priv->cpu)
 			return false;
 
-		return (priv->source_count++-SOURCE_BIAS) * w*h >=
-			(int)pixmap->drawable.width * pixmap->drawable.height;
+		return count > SOURCE_BIAS;
 	} else {
-		return ++priv->source_count * w*h >= (SOURCE_BIAS+2) * (int)pixmap->drawable.width * pixmap->drawable.height;
+		return count * w*h >= (SOURCE_BIAS+2) * (int)pixmap->drawable.width * pixmap->drawable.height;
 	}
 }
 
commit f92a64dd9162731210b14368b6ee408356d7fefc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 15:54:35 2012 +0100

    sna: Only set the vmap flag after we make the bo snoopable
    
    Otherwise if we fail then we incorrectly add the handle to the vmap
    cache.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 95d634a..66a23bf 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3242,10 +3242,15 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 		assert(bo->tiling == I915_TILING_NONE);
 
+		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED)) {
+			kgem_bo_destroy(kgem, bo);
+			return NULL;
+		}
+
 		bo->reusable = false;
 		bo->vmap = true;
-		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED) ||
-		    kgem_bo_map__cpu(kgem, bo) == NULL) {
+
+		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
 			return NULL;
 		}
commit 8b4cf24f1403bf3d929cc0725de66b3d0e08ebaf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 14:46:32 2012 +0100

    sna: Also check whether the first upload box can use the BLT
    
    No point checking boxes 1..n if box 0 is the troublemaker!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index b53143f..733e542 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -170,7 +170,8 @@ fallback:
 		return;
 	}
 
-	can_blt = kgem_bo_can_blt(kgem, src_bo);
+	can_blt = kgem_bo_can_blt(kgem, src_bo) &&
+		(box[0].x2 - box[0].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
 	extents = box[0];
 	for (n = 1; n < nbox; n++) {
 		if (box[n].x1 < extents.x1)
@@ -575,7 +576,8 @@ fallback:
 					   box, nbox);
 	}
 
-	can_blt = kgem_bo_can_blt(kgem, dst_bo);
+	can_blt = kgem_bo_can_blt(kgem, dst_bo) &&
+		(box[0].x2 - box[0].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
 	extents = box[0];
 	for (n = 1; n < nbox; n++) {
 		if (box[n].x1 < extents.x1)
commit df14b285be44f0c40a718bb8ae09a9558b1eb2c7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 14:35:28 2012 +0100

    sna/gen6: Prefer the more flexible render ring for large surfaces
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 044a9f9..d4783e0 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2397,27 +2397,8 @@ try_blt(struct sna *sna,
 		return true;
 	}
 
-	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
-		DBG(("%s: dst too large for 3D pipe (%d, %d)\n",
-		     __FUNCTION__,
-		     dst->pDrawable->width, dst->pDrawable->height));
-		return true;
-	}
-
-	if (src->pDrawable &&
-	    too_large(src->pDrawable->width, src->pDrawable->height)) {
-		DBG(("%s: src too large for 3D pipe (%d, %d)\n",
-		     __FUNCTION__,
-		     src->pDrawable->width, src->pDrawable->height));
+	if (can_switch_rings(sna) && sna_picture_is_solid(src, NULL))
 		return true;
-	}
-
-	if (can_switch_rings(sna)) {
-		if (sna_picture_is_solid(src, NULL))
-			return true;
-		if (src->pDrawable)
-			return true;
-	}
 
 	return false;
 }
commit 578ff11c3753ede2c81afc47302991e3d3b316f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 14:24:06 2012 +0100

    sna: Just use composite.box() when we only have one box
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index e503586..1db8958 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -791,7 +791,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 						   &op))
 				goto cleanup_src;
 
-			op.boxes(sna, &op, &b, 1);
+			op.box(sna, &op, &b);
 			op.done(sna, &op);
 		}
 	}
commit fb7987fc0b51cf3b83dcf78bcefe65ec3af32ccf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 13:12:27 2012 +0100

    sna/dri: Cleanup ring selection for SNB+ CopyRegion
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 4ced0eb..da2f358 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -426,6 +426,7 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
 {
 	struct drm_i915_gem_busy busy;
+	int mode;
 
 	if (sna->kgem.gen < 60)
 		return;
@@ -463,11 +464,14 @@ static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
 	 * our operation on the same ring, and ideally on the same
 	 * ring as we will flip from (which should be the RENDER ring
 	 * as well).
+	 *
+	 * The ultimate question is whether preserving the ring outweighs
+	 * the cost of the query.
 	 */
-	if ((busy.busy & 0xffff0000) == 0 || busy.busy & (1 << 16))
-		kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	else
-		kgem_set_mode(&sna->kgem, KGEM_BLT);
+	mode = KGEM_RENDER;
+	if (busy.busy & (1 << 16))
+		mode = KGEM_BLT;
+	_kgem_set_mode(&sna->kgem, mode);
 }
 
 static struct kgem_bo *
commit 3b56588fbaa2c4ccdfb2f2a8f5656d2cda9dacd7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 10:19:25 2012 +0100

    sna: Update WIP userptr example usage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ab736ec..95d634a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -91,11 +91,10 @@ search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
 #define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
 
-#if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
-#define DRM_I915_GEM_VMAP       0x2d
-#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
-#define I915_PARAM_HAS_VMAP              19
-struct drm_i915_gem_vmap {
+#if defined(USE_VMAP)
+#define LOCAL_I915_GEM_VMAP       0x32
+#define LOCAL_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + LOCAL_I915_GEM_VMAP, struct local_i915_gem_vmap)
+struct local_i915_gem_vmap {
 	uint64_t user_ptr;
 	uint32_t user_size;
 	uint32_t flags;
@@ -699,6 +698,8 @@ static bool test_has_cacheing(struct kgem *kgem)
 static bool test_has_vmap(struct kgem *kgem)
 {
 #if defined(USE_VMAP)
+	uint32_t handle;
+
 	if (DBG_NO_VMAP)
 		return false;
 
@@ -706,7 +707,12 @@ static bool test_has_vmap(struct kgem *kgem)
 	if (kgem->gen == 40)
 		return false;
 
-	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
+	ptr = malloc(PAGE_SIZE);
+	handle = gem_vmap(kgem->fd, ptr, PAGE_SIZE, false);
+	gem_close(kgem->fd, handle);
+	free(ptr);
+
+	return handle != 0;
 #else
 	return false;
 #endif
@@ -3738,7 +3744,7 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 #if defined(USE_VMAP)
 static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 {
-	struct drm_i915_gem_vmap vmap;
+	struct local_i915_gem_vmap vmap;
 
 	VG_CLEAR(vmap);
 	vmap.user_ptr = (uintptr_t)ptr;
@@ -3747,7 +3753,7 @@ static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 	if (read_only)
 		vmap.flags |= I915_VMAP_READ_ONLY;
 
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_VMAP, &vmap)) {
+	if (drmIoctl(fd, LOCAL_IOCTL_I915_GEM_VMAP, &vmap)) {
 		DBG(("%s: failed to map %p + %d bytes: %d\n",
 		     __FUNCTION__, ptr, size, errno));
 		return 0;
commit 473a1dfb683ed576d86b37aba36aaa0e379f4606
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 09:56:13 2012 +0100

    sna: Rename kgem_partial_bo to kgem_buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5361fa6..ab736ec 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -115,7 +115,7 @@ struct local_i915_gem_cacheing {
 #define LOCAL_I915_GEM_SET_CACHEING	0x2f
 #define LOCAL_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + LOCAL_I915_GEM_SET_CACHEING, struct local_i915_gem_cacheing)
 
-struct kgem_partial_bo {
+struct kgem_buffer {
 	struct kgem_bo base;
 	void *mem;
 	uint32_t used;
@@ -797,8 +797,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
 	     kgem->half_cpu_cache_pages));
 
-	list_init(&kgem->batch_partials);
-	list_init(&kgem->active_partials);
+	list_init(&kgem->batch_buffers);
+	list_init(&kgem->active_buffers);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -841,11 +841,11 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__,
 	     kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024)));
 
-	kgem->partial_buffer_size = 64 * 1024;
-	while (kgem->partial_buffer_size < kgem->aperture_mappable >> 10)
-		kgem->partial_buffer_size *= 2;
-	DBG(("%s: partial buffer size=%d [%d KiB]\n", __FUNCTION__,
-	     kgem->partial_buffer_size, kgem->partial_buffer_size / 1024));
+	kgem->buffer_size = 64 * 1024;
+	while (kgem->buffer_size < kgem->aperture_mappable >> 10)
+		kgem->buffer_size *= 2;
+	DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
+	     kgem->buffer_size, kgem->buffer_size / 1024));
 
 	kgem->max_object_size = 2 * aperture.aper_size / 3;
 	kgem->max_gpu_size = kgem->max_object_size;
@@ -1277,9 +1277,9 @@ static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 	bo->reusable = true;
 }
 
-static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
+static void _kgem_bo_delete_buffer(struct kgem *kgem, struct kgem_bo *bo)
 {
-	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+	struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
 
 	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
 	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
@@ -1479,8 +1479,7 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
-static void kgem_partial_buffer_release(struct kgem *kgem,
-					struct kgem_partial_bo *bo)
+static void kgem_buffer_release(struct kgem *kgem, struct kgem_buffer *bo)
 {
 	while (!list_is_empty(&bo->base.vma)) {
 		struct kgem_bo *cached;
@@ -1497,14 +1496,14 @@ static void kgem_partial_buffer_release(struct kgem *kgem,
 	}
 }
 
-static bool kgem_retire__partials(struct kgem *kgem)
+static bool kgem_retire__buffers(struct kgem *kgem)
 {
 	bool retired = false;
 
-	while (!list_is_empty(&kgem->active_partials)) {
-		struct kgem_partial_bo *bo =
-			list_last_entry(&kgem->active_partials,
-					struct kgem_partial_bo,
+	while (!list_is_empty(&kgem->active_buffers)) {
+		struct kgem_buffer *bo =
+			list_last_entry(&kgem->active_buffers,
+					struct kgem_buffer,
 					base.list);
 
 		if (bo->base.rq)
@@ -1513,7 +1512,7 @@ static bool kgem_retire__partials(struct kgem *kgem)
 		DBG(("%s: releasing upload cache for handle=%d? %d\n",
 		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
 		list_del(&bo->base.list);
-		kgem_partial_buffer_release(kgem, bo);
+		kgem_buffer_release(kgem, bo);
 		kgem_bo_unref(kgem, &bo->base);
 		retired = true;
 	}
@@ -1680,7 +1679,7 @@ bool kgem_retire(struct kgem *kgem)
 
 	retired |= kgem_retire__flushing(kgem);
 	retired |= kgem_retire__requests(kgem);
-	retired |= kgem_retire__partials(kgem);
+	retired |= kgem_retire__buffers(kgem);
 
 	kgem->need_retire =
 		!list_is_empty(&kgem->requests) ||
@@ -1766,12 +1765,12 @@ static void kgem_close_inactive(struct kgem *kgem)
 		kgem_close_list(kgem, &kgem->inactive[i]);
 }
 
-static void kgem_finish_partials(struct kgem *kgem)
+static void kgem_finish_buffers(struct kgem *kgem)
 {
-	struct kgem_partial_bo *bo, *next;
+	struct kgem_buffer *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->batch_partials, base.list) {
-		DBG(("%s: partial handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
+	list_for_each_entry_safe(bo, next, &kgem->batch_buffers, base.list) {
+		DBG(("%s: buffer handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
 		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
 		     bo->write, bo->mmapped));
 
@@ -1795,11 +1794,11 @@ static void kgem_finish_partials(struct kgem *kgem)
 			if (!DBG_NO_UPLOAD_ACTIVE &&
 			    bo->used + PAGE_SIZE <= bytes(&bo->base) &&
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
-				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
+				DBG(("%s: retaining upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
 				assert(!bo->base.vmap);
 				list_move(&bo->base.list,
-					  &kgem->active_partials);
+					  &kgem->active_buffers);
 				continue;
 			}
 			goto decouple;
@@ -1808,7 +1807,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 		if (!bo->used) {
 			/* Unless we replace the handle in the execbuffer,
 			 * then this bo will become active. So decouple it
-			 * from the partial list and track it in the normal
+			 * from the buffer list and track it in the normal
 			 * manner.
 			 */
 			goto decouple;
@@ -2044,7 +2043,7 @@ void _kgem_submit(struct kgem *kgem)
 	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
 	assert(kgem->nfence <= kgem->fence_max);
 
-	kgem_finish_partials(kgem);
+	kgem_finish_buffers(kgem);
 
 #if HAS_DEBUG_FULL && SHOW_BATCH
 	__kgem_batch_debug(kgem, batch_end);
@@ -3280,7 +3279,7 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		_list_del(&bo->vma);
 		_list_del(&bo->request);
 		if (bo->io && bo->exec == NULL)
-			_kgem_bo_delete_partial(kgem, bo);
+			_kgem_bo_delete_buffer(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
 		free(bo);
@@ -3907,9 +3906,9 @@ struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
 	return bo;
 }
 
-static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
+static struct kgem_buffer *buffer_alloc(int num_pages)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 
 	bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
 	if (bo) {
@@ -3929,10 +3928,10 @@ use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 	return true;
 }
 
-static struct kgem_partial_bo *
+static struct kgem_buffer *
 search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	struct kgem_bo *old;
 
 	old = search_vmap_cache(kgem, alloc, 0);
@@ -3973,7 +3972,7 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 }
 
 static void
-init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
+init_buffer_from_bo(struct kgem_buffer *bo, struct kgem_bo *old)
 {
 	DBG(("%s: reusing handle=%d for buffer\n",
 	     __FUNCTION__, old->handle));
@@ -3991,10 +3990,10 @@ init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
 	assert(bo->base.tiling == I915_TILING_NONE);
 }
 
-static struct kgem_partial_bo *
+static struct kgem_buffer *
 create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 
 	if (kgem->has_cacheing) {
 		struct kgem_bo *old;
@@ -4044,7 +4043,7 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	}
 
 	if (kgem->has_vmap) {
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo) {
 			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
 						   alloc * PAGE_SIZE, false);
@@ -4073,7 +4072,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	unsigned offset, alloc;
 	struct kgem_bo *old;
 
@@ -4090,7 +4089,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		flags &= ~KGEM_BUFFER_INPLACE;
 
 #if !DBG_NO_UPLOAD_CACHE
-	list_for_each_entry(bo, &kgem->batch_partials, base.list) {
+	list_for_each_entry(bo, &kgem->batch_buffers, base.list) {
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
 
@@ -4103,7 +4102,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 			gem_write(kgem->fd, bo->base.handle,
 				  0, bo->used, bo->mem);
-			kgem_partial_buffer_release(kgem, bo);
+			kgem_buffer_release(kgem, bo);
 			bo->need_io = 0;
 			bo->write = 0;
 			offset = 0;
@@ -4129,7 +4128,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 
 		if (bo->used + size <= bytes(&bo->base)) {
-			DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
+			DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
 			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
 			offset = bo->used;
 			bo->used += size;
@@ -4138,7 +4137,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	}
 
 	if (flags & KGEM_BUFFER_WRITE) {
-		list_for_each_entry(bo, &kgem->active_partials, base.list) {
+		list_for_each_entry(bo, &kgem->active_buffers, base.list) {
 			assert(bo->base.io);
 			assert(bo->base.refcnt >= 1);
 			assert(bo->mmapped);
@@ -4151,11 +4150,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 
 			if (bo->used + size <= bytes(&bo->base)) {
-				DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
+				DBG(("%s: reusing buffer? used=%d + size=%d, total=%d\n",
 				     __FUNCTION__, bo->used, size, bytes(&bo->base)));
 				offset = bo->used;
 				bo->used += size;
-				list_move(&bo->base.list, &kgem->batch_partials);
+				list_move(&bo->base.list, &kgem->batch_buffers);
 				goto done;
 			}
 		}
@@ -4164,9 +4163,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
-	alloc = ALIGN(2*size, kgem->partial_buffer_size);
+	alloc = ALIGN(2*size, kgem->buffer_size);
 	if (alloc > MAX_CACHE_SIZE)
-		alloc = ALIGN(size, kgem->partial_buffer_size);
+		alloc = ALIGN(size, kgem->buffer_size);
 	if (alloc > MAX_CACHE_SIZE)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
@@ -4319,7 +4318,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		DBG(("%s: reusing ordinary handle %d for io\n",
 		     __FUNCTION__, old->handle));
 		alloc = num_pages(old);
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo == NULL)
 			return NULL;
 
@@ -4370,7 +4369,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
 		old = &bo->base;
-		bo = partial_bo_alloc(alloc);
+		bo = buffer_alloc(alloc);
 		if (bo == NULL) {
 			free(old);
 			return NULL;
@@ -4401,7 +4400,7 @@ init:
 	offset = 0;
 
 	assert(list_is_empty(&bo->base.list));
-	list_add(&bo->base.list, &kgem->batch_partials);
+	list_add(&bo->base.list, &kgem->batch_buffers);
 
 	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
@@ -4415,7 +4414,7 @@ done:
 
 bool kgem_buffer_is_inplace(struct kgem_bo *_bo)
 {
-	struct kgem_partial_bo *bo = (struct kgem_partial_bo *)_bo->proxy;
+	struct kgem_buffer *bo = (struct kgem_buffer *)_bo->proxy;
 	return bo->write & KGEM_BUFFER_WRITE_INPLACE;
 }
 
@@ -4444,7 +4443,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	assert(*ret != NULL);
 
 	if (height & 1) {
-		struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+		struct kgem_buffer *io = (struct kgem_buffer *)bo->proxy;
 		int min;
 
 		assert(io->used);
@@ -4456,7 +4455,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		min = bo->delta + height * stride;
 		min = ALIGN(min, UPLOAD_ALIGNMENT);
 		if (io->used != min) {
-			DBG(("%s: trimming partial buffer from %d to %d\n",
+			DBG(("%s: trimming buffer from %d to %d\n",
 			     __FUNCTION__, io->used, min));
 			io->used = min;
 		}
@@ -4513,7 +4512,7 @@ void kgem_proxy_bo_attach(struct kgem_bo *bo,
 
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
-	struct kgem_partial_bo *bo;
+	struct kgem_buffer *bo;
 	uint32_t offset = _bo->delta, length = _bo->size.bytes;
 
 	assert(_bo->io);
@@ -4525,7 +4524,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	assert(_bo->proxy == NULL);
 	assert(_bo->exec == NULL);
 
-	bo = (struct kgem_partial_bo *)_bo;
+	bo = (struct kgem_buffer *)_bo;
 
 	DBG(("%s(offset=%d, length=%d, vmap=%d)\n", __FUNCTION__,
 	     offset, length, bo->base.vmap));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index bd1219a..533a919 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list vmap;
-	struct list batch_partials, active_partials;
+	struct list batch_buffers, active_buffers;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
@@ -167,7 +167,7 @@ struct kgem {
 	uint32_t max_upload_tile_size, max_copy_tile_size;
 	uint32_t max_gpu_size, max_cpu_size;
 	uint32_t large_object_size, max_object_size;
-	uint32_t partial_buffer_size;
+	uint32_t buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
 	void (*retire)(struct kgem *kgem);
commit 8e6e8a2fa8adda9ae9be8a88fbb14851e9d2df2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 20 09:51:46 2012 +0100

    sna: Allow the snoopable upload buffer to take pages from the CPU vma cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index eeb6774..5361fa6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3972,26 +3972,54 @@ search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 	return NULL;
 }
 
+static void
+init_buffer_from_bo(struct kgem_partial_bo *bo, struct kgem_bo *old)
+{
+	DBG(("%s: reusing handle=%d for buffer\n",
+	     __FUNCTION__, old->handle));
+
+	memcpy(&bo->base, old, sizeof(*old));
+	if (old->rq)
+		list_replace(&old->request, &bo->base.request);
+	else
+		list_init(&bo->base.request);
+	list_replace(&old->vma, &bo->base.vma);
+	list_init(&bo->base.list);
+	free(old);
+	bo->base.refcnt = 1;
+
+	assert(bo->base.tiling == I915_TILING_NONE);
+}
+
 static struct kgem_partial_bo *
 create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
 	struct kgem_partial_bo *bo;
 
 	if (kgem->has_cacheing) {
+		struct kgem_bo *old;
 		uint32_t handle;
 
-		handle = gem_create(kgem->fd, alloc);
-		if (handle == 0)
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL)
 			return NULL;
 
-		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
-			gem_close(kgem->fd, handle);
+		old = search_linear_cache(kgem, alloc,
+					 CREATE_INACTIVE | CREATE_CPU_MAP | CREATE_EXACT);
+		if (old) {
+			init_buffer_from_bo(bo, old);
+			return bo;
+		}
+
+		handle = gem_create(kgem->fd, alloc);
+		if (handle == 0) {
+			free(bo);
 			return NULL;
 		}
 
-		bo = malloc(sizeof(*bo));
-		if (bo == NULL) {
+		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
 			gem_close(kgem->fd, handle);
+			free(bo);
 			return NULL;
 		}
 
@@ -4155,18 +4183,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old == NULL)
 			old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP);
 		if (old) {
-			DBG(("%s: reusing handle=%d for buffer\n",
-			     __FUNCTION__, old->handle));
-
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-			bo->base.refcnt = 1;
+			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
 			if (handle == 0 ||
@@ -4251,16 +4268,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			if (bo == NULL)
 				return NULL;
 
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-
-			assert(bo->base.tiling == I915_TILING_NONE);
+			init_buffer_from_bo(bo, old);
 			assert(num_pages(&bo->base) >= NUM_PAGES(size));
 
 			bo->mem = kgem_bo_map(kgem, &bo->base);
@@ -4268,11 +4276,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				bo->need_io = false;
 				bo->base.io = true;
 				bo->mmapped = true;
-				bo->base.refcnt = 1;
 
 				alloc = num_pages(&bo->base);
 				goto init;
 			} else {
+				bo->base.refcnt = 0;
 				kgem_bo_free(kgem, &bo->base);
 				bo = NULL;
 			}
@@ -4315,17 +4323,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (bo == NULL)
 			return NULL;
 
-		memcpy(&bo->base, old, sizeof(*old));
-		if (old->rq)
-			list_replace(&old->request,
-				     &bo->base.request);
-		else
-			list_init(&bo->base.request);
-		list_replace(&old->vma, &bo->base.vma);
-		list_init(&bo->base.list);
-		free(old);
-		bo->base.refcnt = 1;
-
+		init_buffer_from_bo(bo, old);
 		bo->need_io = flags & KGEM_BUFFER_WRITE;
 		bo->base.io = true;
 	} else {
@@ -4345,16 +4343,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
 			alloc = num_pages(old);
-
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-			bo->base.refcnt = 1;
+			init_buffer_from_bo(bo, old);
 		} else {
 			uint32_t handle = gem_create(kgem->fd, alloc);
 			if (handle == 0 ||
commit 979035bb9ce04db5fe30efa4f6daab0a40f6af57
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 20:06:28 2012 +0100

    sna: Remove topmost unused 'flush' attribute
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3ced2b4..f274de9 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -192,7 +192,6 @@ struct sna {
 #define SNA_FORCE_SHADOW	0x20
 
 	unsigned watch_flush;
-	unsigned flush;
 
 	struct timeval timer_tv;
 	uint32_t timer_expire[NUM_TIMERS];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6ec982e..4ef52d7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12428,7 +12428,6 @@ sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
-	struct list preserve;
 
 	/* XXX we should be able to reduce the frequency of flushes further
 	 * by checking for outgoing damage events or sync replies. Tricky,
@@ -12438,26 +12437,32 @@ sna_accel_flush_callback(CallbackListPtr *list,
 		return;
 
 	/* flush any pending damage from shadow copies to tfp clients */
-	list_init(&preserve);
-	while (!list_is_empty(&sna->dirty_pixmaps)) {
-		struct sna_pixmap *priv = list_first_entry(&sna->dirty_pixmaps,
-							   struct sna_pixmap,
-							   list);
-		if (!sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ))
-			list_move(&priv->list, &preserve);
-	}
-	if (!list_is_empty(&preserve)) {
-		sna->dirty_pixmaps.next = preserve.next;
-		preserve.next->prev = &sna->dirty_pixmaps;
-		preserve.prev->next = &sna->dirty_pixmaps;
-		sna->dirty_pixmaps.prev = preserve.prev;
+	if (!list_is_empty(&sna->dirty_pixmaps)) {
+		struct list preserve;
+
+		list_init(&preserve);
+
+		do {
+			struct sna_pixmap *priv;
+
+			priv = list_first_entry(&sna->dirty_pixmaps,
+						struct sna_pixmap, list);
+			if (!sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ))
+				list_move(&priv->list, &preserve);
+
+		} while (!list_is_empty(&sna->dirty_pixmaps));
+
+		if (!list_is_empty(&preserve)) {
+			sna->dirty_pixmaps.next = preserve.next;
+			preserve.next->prev = &sna->dirty_pixmaps;
+			preserve.prev->next = &sna->dirty_pixmaps;
+			sna->dirty_pixmaps.prev = preserve.prev;
+		}
 	}
 
 	kgem_submit(&sna->kgem);
-
 	kgem_sync(&sna->kgem);
 
-	sna->flush = false;
 	sna->kgem.flush = false;
 }
 
@@ -13121,7 +13126,7 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 	if (sna_accel_do_debug_memory(sna))
 		sna_accel_debug_memory(sna);
 
-	if (sna->flush == 0 && sna->watch_flush == 1) {
+	if (sna->watch_flush == 1) {
 		DBG(("%s: removing watchers\n", __FUNCTION__));
 		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
 		sna->watch_flush = 0;
commit b83011909aaf185f05fc2df743882c2410eff46d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 19:51:46 2012 +0100

    sna: Replace 'sync' flag with equivalent 'flush'
    
    The only difference is in semantics. Currently 'sync' was only used on
    CPU buffers for shared memory segments with 2D clients, and 'flush' on GPU
    buffers shared with DRI clients.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f80690d..eeb6774 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1104,9 +1104,6 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-
-	if (bo->sync)
-		kgem->sync = kgem->next_request;
 }
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -1650,9 +1647,6 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			}
 		}
 
-		if (kgem->sync == rq)
-			kgem->sync = NULL;
-
 		_list_del(&rq->list);
 		free(rq);
 	}
@@ -3853,31 +3847,23 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
+	assert(bo->vmap);
 	assert(!bo->reusable);
 	assert(list_is_empty(&bo->list));
 	list_add(&bo->list, &kgem->sync_list);
-	bo->sync = true;
+	bo->flush = true;
 }
 
 void kgem_sync(struct kgem *kgem)
 {
-	struct kgem_request *rq;
 	struct kgem_bo *bo;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	rq = kgem->sync;
-	if (rq == NULL)
-		return;
-
-	if (rq == kgem->next_request)
-		_kgem_submit(kgem);
-
-	kgem_bo_sync__gtt(kgem, rq->bo);
-	list_for_each_entry(bo, &kgem->sync_list, list)
+	list_for_each_entry(bo, &kgem->sync_list, list) {
+		kgem_bo_submit(kgem, bo);
 		kgem_bo_sync__cpu(kgem, bo);
-
-	assert(kgem->sync == NULL);
+	}
 }
 
 void kgem_clear_dirty(struct kgem *kgem)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0117dcd..bd1219a 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -86,7 +86,6 @@ struct kgem_bo {
 	uint32_t io : 1;
 	uint32_t flush : 1;
 	uint32_t scanout : 1;
-	uint32_t sync : 1;
 	uint32_t purged : 1;
 };
 #define DOMAIN_NONE 0
@@ -130,7 +129,6 @@ struct kgem {
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
-	struct kgem_request *sync;
 
 	struct {
 		struct list inactive[NUM_CACHE_BUCKETS];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f52c848..6ec982e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -468,7 +468,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		sna->debug_memory.cpu_bo_allocs--;
 		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
 #endif
-		if (priv->cpu_bo->sync) {
+		if (priv->cpu_bo->flush) {
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 			sna_accel_watch_flush(sna, -1);
 		}
@@ -1190,7 +1190,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
-				assert(!priv->cpu_bo->sync);
+				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 
@@ -1200,7 +1200,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 skip_inplace_map:
 		sna_damage_destroy(&priv->gpu_damage);
-		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo)) {
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
@@ -1262,7 +1262,7 @@ skip_inplace_map:
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo))
+		if (priv->cpu_bo && !priv->cpu_bo->flush && kgem_bo_is_busy(priv->cpu_bo))
 			sna_pixmap_free_cpu(sna, priv);
 		sna_damage_destroy(&priv->gpu_damage);
 	}
@@ -1333,8 +1333,10 @@ skip_inplace_map:
 		sna_pixmap_free_gpu(sna, priv);
 		priv->undamaged = false;
 
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 
 done:
@@ -1609,7 +1611,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		}
 
-		if (priv->cpu_bo && !priv->cpu_bo->sync) {
+		if (priv->cpu_bo && !priv->cpu_bo->flush) {
 			if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 			if (sync_will_stall(priv->cpu_bo)) {
@@ -1971,8 +1973,10 @@ done:
 			}
 			priv->undamaged = false;
 		}
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 
 	if (dx | dy)
@@ -2762,7 +2766,7 @@ done:
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
 		if (priv->ptr) {
-			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
@@ -3177,7 +3181,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 			kgem_retire(&sna->kgem);
 		if (sync_will_stall(priv->cpu_bo)) {
-			if (priv->cpu_bo->sync) {
+			if (priv->cpu_bo->flush) {
 				if (sna_put_image_upload_blt(drawable, gc, region,
 							     x, y, w, h, bits, stride)) {
 					if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
@@ -3221,7 +3225,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
-				assert(!priv->cpu_bo->sync);
+				assert(!priv->cpu_bo->flush);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -3291,8 +3295,10 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 				priv->undamaged = false;
 			}
 		}
-		if (priv->flush)
+		if (priv->flush) {
 			list_move(&priv->list, &sna->dirty_pixmaps);
+			sna->kgem.flush |= 1;
+		}
 	}
 	priv->cpu = true;
 
@@ -12428,13 +12434,8 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	 * by checking for outgoing damage events or sync replies. Tricky,
 	 * and doesn't appear to mitigate the performance loss.
 	 */
-	if (!(sna->kgem.flush ||
-	      sna->kgem.sync ||
-	      !list_is_empty(&sna->dirty_pixmaps)))
-	    return;
-
-	DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__,
-	     sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps)));
+	if (!sna->kgem.flush)
+		return;
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	list_init(&preserve);
@@ -12766,7 +12767,7 @@ static void sna_accel_inactive(struct sna *sna)
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 
-			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->flush);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 			priv->cpu = false;
commit 88bee3caeaacbbb1b4d789ea3db9a3802a62b59d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 19:40:34 2012 +0100

    sna: Remove unused scanout-is-dirty? flag
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e5c97f6..f80690d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1104,7 +1104,6 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-	kgem->scanout |= bo->scanout;
 
 	if (bo->sync)
 		kgem->sync = kgem->next_request;
@@ -1990,8 +1989,6 @@ void kgem_reset(struct kgem *kgem)
 	kgem->nbatch = 0;
 	kgem->surface = kgem->batch_size;
 	kgem->mode = KGEM_NONE;
-	kgem->flush = 0;
-	kgem->scanout = 0;
 
 	kgem->next_request = __kgem_request_alloc();
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f7ee5b4..0117dcd 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -150,7 +150,6 @@ struct kgem {
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
 	uint32_t need_throttle:1;
-	uint32_t scanout:1;
 	uint32_t busy:1;
 
 	uint32_t has_vmap :1;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d3dad62..f52c848 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12457,6 +12457,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	kgem_sync(&sna->kgem);
 
 	sna->flush = false;
+	sna->kgem.flush = false;
 }
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
commit 6f60f89588caa70e7d8ed53ba453bbe8c2094a95
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 16:58:34 2012 +0100

    sna/gen6: Bump the WM thread count to 80
    
    Note that we should only do this when "WiZ Hashing" is disabled. So we
    should be checking the GT_MODE register (bring on i915_read!) to be sure
    that is safe to do so. However, it gives a big boost to performance of
    render copies...  It also causes perf benchmarks to hit thermal limits
    much quicker.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 20a09d0..044a9f9 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -56,6 +56,31 @@
 
 #define GEN6_MAX_SIZE 8192
 
+struct gt_info {
+	int max_vs_threads;
+	int max_gs_threads;
+	int max_wm_threads;
+	struct {
+		int size;
+		int max_vs_entries;
+		int max_gs_entries;
+	} urb;
+};
+
+static const struct gt_info gt1_info = {
+	.max_vs_threads = 24,
+	.max_gs_threads = 21,
+	.max_wm_threads = 40,
+	.urb = { 32, 256, 256 },
+};
+
+static const struct gt_info gt2_info = {
+	.max_vs_threads = 60,
+	.max_gs_threads = 60,
+	.max_wm_threads = 80,
+	.urb = { 64, 256, 256 },
+};
+
 static const uint32_t ps_kernel_nomask_affine[][4] = {
 #include "exa_wm_src_affine.g6b"
 #include "exa_wm_src_sample_argb.g6b"
@@ -422,7 +447,7 @@ gen6_emit_urb(struct sna *sna)
 {
 	OUT_BATCH(GEN6_3DSTATE_URB | (3 - 2));
 	OUT_BATCH(((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
-		  (256 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
+		  (sna->render_state.gen6.info->urb.max_vs_entries << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
 	OUT_BATCH((0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
 		  (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
 }
@@ -665,7 +690,7 @@ gen6_emit_wm(struct sna *sna, unsigned int kernel)
 		  wm_kernels[kernel].num_surfaces << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0);
 	OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT); /* DW4 */
-	OUT_BATCH((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT |
+	OUT_BATCH((sna->render_state.gen6.info->max_wm_threads - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT |
 		  GEN6_3DSTATE_WM_DISPATCH_ENABLE |
 		  GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
 	OUT_BATCH(wm_kernels[kernel].num_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
@@ -4198,6 +4223,10 @@ static bool gen6_render_setup(struct sna *sna)
 	struct gen6_sampler_state *ss;
 	int i, j, k, l, m;
 
+	state->info = &gt1_info;
+	if (DEVICE_ID(sna->PciInfo) & 0x20)
+		state->info = &gt2_info; /* XXX requires GT_MODE WiZ disabled */
+
 	sna_static_stream_init(&general);
 
 	/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 18ba826..c041d66 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4268,7 +4268,7 @@ static bool gen7_render_setup(struct sna *sna)
 
 	state->info = &gt1_info;
 	if (DEVICE_ID(sna->PciInfo) & 0x20)
-		state->info = &gt2_info;
+		state->info = &gt2_info; /* XXX requires GT_MODE WiZ disabled */
 
 	sna_static_stream_init(&general);
 
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index f0f4a2d..5662a79 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -388,6 +388,7 @@ enum {
 };
 
 struct gen6_render_state {
+	const struct gt_info *info;
 	struct kgem_bo *general_bo;
 
 	uint32_t vs_state;
commit fc39d4b5cb105d269c5349e479daf112f5d93580
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 16:25:54 2012 +0100

    sna/gen6: Add a simple DBG option to limit usage of either BLT/RENDER
    
    We can force the code to either select only BLT or RENDER operations -
    for those that we have a choice for at least!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 389d002..20a09d0 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -52,6 +52,7 @@
 #define NO_FILL_CLEAR 0
 
 #define NO_RING_SWITCH 0
+#define PREFER_RENDER 0
 
 #define GEN6_MAX_SIZE 8192
 
@@ -2344,6 +2345,9 @@ gen6_composite_set_target(struct sna *sna,
 
 static bool prefer_blt_ring(struct sna *sna)
 {
+	if (PREFER_RENDER)
+		return PREFER_RENDER < 0;
+
 	return sna->kgem.ring != KGEM_RENDER;
 }
 
@@ -3272,6 +3276,9 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr dst, struct kgem_bo *dst_bo,
 				   unsigned flags)
 {
+	if (PREFER_RENDER)
+		return PREFER_RENDER > 0;
+
 	return (sna->kgem.ring == KGEM_BLT ||
 		(flags & COPY_LAST && sna->kgem.mode == KGEM_NONE) ||
 		prefer_blt_bo(sna, src, src_bo) ||
@@ -3647,6 +3654,9 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
+	if (PREFER_RENDER)
+		return PREFER_RENDER < 0;
+
 	return (can_switch_rings(sna) ||
 		prefer_blt_ring(sna) ||
 		untiled_tlb_miss(bo));
commit 15d3eea7004822e5cbd48d676692e1b6a2b26d3e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 16:22:20 2012 +0100

    sna: Handle mixed bo/buffers in assertions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index b038bb1..f7ee5b4 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -420,16 +420,24 @@ int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_get_tile_size(struct kgem *kgem, int tiling,
 			int *tile_width, int *tile_height, int *tile_size);
 
+static inline int __kgem_buffer_size(struct kgem_bo *bo)
+{
+	assert(bo->proxy && bo->io);
+	return bo->size.bytes;
+}
+
 static inline int kgem_bo_size(struct kgem_bo *bo)
 {
 	assert(!(bo->proxy && bo->io));
 	return PAGE_SIZE * bo->size.pages.count;
 }
 
-static inline int kgem_buffer_size(struct kgem_bo *bo)
+static inline int __kgem_bo_size(struct kgem_bo *bo)
 {
-	assert(bo->proxy && bo->io);
-	return bo->size.bytes;
+	if (bo->io)
+		return __kgem_buffer_size(bo);
+	else
+		return __kgem_bo_size(bo);
 }
 
 static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index f1df84a..b53143f 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -438,7 +438,7 @@ fallback:
 		_kgem_set_mode(kgem, KGEM_BLT);
 		tmp_box += nbox_this_time;
 	} while (1);
-	assert(offset == kgem_buffer_size(dst_bo));
+	assert(offset == __kgem_buffer_size(dst_bo));
 
 	kgem_buffer_read_sync(kgem, dst_bo);
 
@@ -470,7 +470,7 @@ fallback:
 
 		src += pitch * height;
 	} while (--nbox);
-	assert(src - (char *)ptr == kgem_buffer_size(dst_bo));
+	assert(src - (char *)ptr == __kgem_buffer_size(dst_bo));
 	kgem_bo_destroy(kgem, dst_bo);
 	sna->blt_state.fill_bo = 0;
 }
@@ -841,7 +841,7 @@ tile:
 			box++;
 			offset += pitch * height;
 		} while (--nbox_this_time);
-		assert(offset == kgem_buffer_size(src_bo));
+		assert(offset == __kgem_buffer_size(src_bo));
 
 		if (nbox) {
 			_kgem_submit(kgem);
@@ -1079,7 +1079,7 @@ fallback:
 			box++;
 			offset += pitch * height;
 		} while (--nbox_this_time);
-		assert(offset == kgem_buffer_size(src_bo));
+		assert(offset == __kgem_buffer_size(src_bo));
 
 		if (nbox) {
 			_kgem_submit(kgem);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index fd105f4..e503586 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1604,7 +1604,7 @@ do_fixup:
 					       w, h);
 			pixman_image_unref(src);
 		} else {
-			memset(ptr, 0, kgem_buffer_size(channel->bo));
+			memset(ptr, 0, __kgem_buffer_size(channel->bo));
 			dst = src;
 		}
 	}
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index b76a3c4..b8690ec 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -102,7 +102,7 @@ sna_video_buffer(struct sna *sna,
 		 struct sna_video_frame *frame)
 {
 	/* Free the current buffer if we're going to have to reallocate */
-	if (video->buf && kgem_bo_size(video->buf) < frame->size)
+	if (video->buf && __kgem_bo_size(video->buf) < frame->size)
 		sna_video_free_buffers(sna, video);
 
 	if (video->buf == NULL)
commit e4fce3b7801038e4f64d848a0995f4b441b4d2aa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 10:50:09 2012 +0100

    sna/gen4: Hookup composite spans
    
    Due to the unresolved flushing bug it is no faster (so only enable when
    we definitely can't do the operation inplace), however it does eliminate
    a chunk of CPU overhead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 622ba1f..04c351c 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2192,6 +2192,28 @@ gen2_render_composite_spans_done(struct sna *sna,
 }
 
 static bool
+gen2_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width, int16_t height, unsigned flags)
+{
+	if (op >= ARRAY_SIZE(gen2_blend_op))
+		return false;
+
+	if (gen2_composite_fallback(sna, src, NULL, dst))
+		return false;
+
+	if (need_tiling(sna, width, height)) {
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
 gen2_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -2205,29 +2227,10 @@ gen2_render_composite_spans(struct sna *sna,
 	DBG(("%s(src=(%d, %d), dst=(%d, %d), size=(%d, %d))\n", __FUNCTION__,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
-#if NO_COMPOSITE_SPANS
-	return false;
-#endif
-
-	if (op >= ARRAY_SIZE(gen2_blend_op)) {
-		DBG(("%s: fallback due to unhandled blend op: %d\n",
-		     __FUNCTION__, op));
-		return false;
-	}
-
-	if (gen2_composite_fallback(sna, src, NULL, dst))
-		return false;
-
+	assert(gen2_check_composite_spans(sna, op, src, dst, width, height, flags));
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
 		     __FUNCTION__, width, height));
-
-		if (!is_gpu(dst->pDrawable)) {
-			DBG(("%s: fallback, tiled operation not on GPU\n",
-			     __FUNCTION__));
-			return false;
-		}
-
 		return sna_tiling_composite_spans(op, src, dst,
 						  src_x, src_y, dst_x, dst_y,
 						  width, height, flags, tmp);
@@ -3134,7 +3137,10 @@ bool gen2_render_init(struct sna *sna)
 	 * use the texture combiners.
 	 */
 	render->composite = gen2_render_composite;
+#if !NO_COMPOSITE_SPANS
+	render->check_composite_spans = gen2_check_composite_spans;
 	render->composite_spans = gen2_render_composite_spans;
+#endif
 	render->fill_boxes = gen2_render_fill_boxes;
 	render->fill = gen2_render_fill;
 	render->fill_one = gen2_render_fill_one;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index e02eb89..1f6c1aa 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3368,6 +3368,28 @@ gen3_render_composite_spans_done(struct sna *sna,
 }
 
 static bool
+gen3_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width, int16_t height, unsigned flags)
+{
+	if (op >= ARRAY_SIZE(gen3_blend_op))
+		return false;
+
+	if (gen3_composite_fallback(sna, op, src, NULL, dst))
+		return false;
+
+	if (need_tiling(sna, width, height)) {
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
 gen3_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3383,29 +3405,11 @@ gen3_render_composite_spans(struct sna *sna,
 	DBG(("%s(src=(%d, %d), dst=(%d, %d), size=(%d, %d))\n", __FUNCTION__,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
-#if NO_COMPOSITE_SPANS
-	return false;
-#endif
-
-	if (op >= ARRAY_SIZE(gen3_blend_op)) {
-		DBG(("%s: fallback due to unhandled blend op: %d\n",
-		     __FUNCTION__, op));
-		return false;
-	}
-
-	if (gen3_composite_fallback(sna, op, src, NULL, dst))
-		return false;
+	assert(gen3_check_composite_spans(sna, op, src, dst, width, height, flags));
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
 		     __FUNCTION__, width, height));
-
-		if (!is_gpu(dst->pDrawable)) {
-			DBG(("%s: fallback, tiled operation not on GPU\n",
-			     __FUNCTION__));
-			return false;
-		}
-
 		return sna_tiling_composite_spans(op, src, dst,
 						  src_x, src_y, dst_x, dst_y,
 						  width, height, flags, tmp);
@@ -4666,7 +4670,10 @@ bool gen3_render_init(struct sna *sna)
 	struct sna_render *render = &sna->render;
 
 	render->composite = gen3_render_composite;
+#if !NO_COMPOSITE_SPANS
+	render->check_composite_spans = gen3_check_composite_spans;
 	render->composite_spans = gen3_render_composite_spans;
+#endif
 
 	render->video = gen3_render_video;
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 4a67728..6fcce71 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -50,6 +50,7 @@
 #define FLUSH_EVERY_VERTEX 1
 
 #define NO_COMPOSITE 0
+#define NO_COMPOSITE_SPANS 0
 #define NO_COPY 0
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
@@ -61,8 +62,13 @@
 	gen4_magic_ca_pass(sna, OP); \
 	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
 } while (0)
+#define FLUSH_NOCA() do { \
+	gen4_vertex_flush(sna); \
+	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
+} while (0)
 #else
 #define FLUSH(OP)
+#define FLUSH_NOCA()
 #endif
 
 #define GEN4_GRF_BLOCKS(nreg)    ((nreg + 15) / 16 - 1)
@@ -2494,6 +2500,320 @@ cleanup_dst:
 	return false;
 }
 
+/* A poor man's span interface. But better than nothing? */
+#if !NO_COMPOSITE_SPANS
+static bool
+gen4_composite_alpha_gradient_init(struct sna *sna,
+				   struct sna_composite_channel *channel)
+{
+	DBG(("%s\n", __FUNCTION__));
+
+	channel->filter = PictFilterNearest;
+	channel->repeat = RepeatPad;
+	channel->is_affine = true;
+	channel->is_solid  = false;
+	channel->transform = NULL;
+	channel->width  = 256;
+	channel->height = 1;
+	channel->card_format = GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
+
+	channel->bo = sna_render_get_alpha_gradient(sna);
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+	return channel->bo != NULL;
+}
+
+inline static void
+gen4_emit_composite_texcoord(struct sna *sna,
+			     const struct sna_composite_channel *channel,
+			     int16_t x, int16_t y)
+{
+	float t[3];
+
+	if (channel->is_affine) {
+		sna_get_transformed_coordinates(x + channel->offset[0],
+						y + channel->offset[1],
+						channel->transform,
+						&t[0], &t[1]);
+		OUT_VERTEX_F(t[0] * channel->scale[0]);
+		OUT_VERTEX_F(t[1] * channel->scale[1]);
+	} else {
+		t[0] = t[1] = 0; t[2] = 1;
+		sna_get_transformed_coordinates_3d(x + channel->offset[0],
+						   y + channel->offset[1],
+						   channel->transform,
+						   &t[0], &t[1], &t[2]);
+		OUT_VERTEX_F(t[0] * channel->scale[0]);
+		OUT_VERTEX_F(t[1] * channel->scale[1]);
+		OUT_VERTEX_F(t[2]);
+	}
+}
+
+inline static void
+gen4_emit_composite_texcoord_affine(struct sna *sna,
+				    const struct sna_composite_channel *channel,
+				    int16_t x, int16_t y)
+{
+	float t[2];
+
+	sna_get_transformed_coordinates(x + channel->offset[0],
+					y + channel->offset[1],
+					channel->transform,
+					&t[0], &t[1]);
+	OUT_VERTEX_F(t[0] * channel->scale[0]);
+	OUT_VERTEX_F(t[1] * channel->scale[1]);
+}
+
+inline static void
+gen4_emit_composite_spans_vertex(struct sna *sna,
+				 const struct sna_composite_spans_op *op,
+				 int16_t x, int16_t y)
+{
+	OUT_VERTEX(x, y);
+	gen4_emit_composite_texcoord(sna, &op->base.src, x, y);
+}
+
+fastcall static void
+gen4_emit_composite_spans_primitive(struct sna *sna,
+				    const struct sna_composite_spans_op *op,
+				    const BoxRec *box,
+				    float opacity)
+{
+	gen4_emit_composite_spans_vertex(sna, op, box->x2, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+
+	gen4_emit_composite_spans_vertex(sna, op, box->x1, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+
+	gen4_emit_composite_spans_vertex(sna, op, box->x1, box->y1);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(0);
+	if (!op->base.is_affine)
+		OUT_VERTEX_F(1);
+}
+
+fastcall static void
+gen4_emit_composite_spans_solid(struct sna *sna,
+				const struct sna_composite_spans_op *op,
+				const BoxRec *box,
+				float opacity)
+{
+	OUT_VERTEX(box->x2, box->y2);
+	OUT_VERTEX_F(1); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y2);
+	OUT_VERTEX_F(0); OUT_VERTEX_F(1);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y1);
+	OUT_VERTEX_F(0); OUT_VERTEX_F(0);
+	OUT_VERTEX_F(opacity); OUT_VERTEX_F(0);
+}
+
+fastcall static void
+gen4_emit_composite_spans_affine(struct sna *sna,
+				 const struct sna_composite_spans_op *op,
+				 const BoxRec *box,
+				 float opacity)
+{
+	OUT_VERTEX(box->x2, box->y2);
+	gen4_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x2, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y2);
+	gen4_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x1, box->y2);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(1);
+
+	OUT_VERTEX(box->x1, box->y1);
+	gen4_emit_composite_texcoord_affine(sna, &op->base.src,
+					    box->x1, box->y1);
+	OUT_VERTEX_F(opacity);
+	OUT_VERTEX_F(0);
+}
+
+fastcall static void
+gen4_render_composite_spans_box(struct sna *sna,
+				const struct sna_composite_spans_op *op,
+				const BoxRec *box, float opacity)
+{
+	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
+	     __FUNCTION__,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y,
+	     box->x1, box->y1,
+	     box->x2 - box->x1,
+	     box->y2 - box->y1));
+
+	gen4_get_rectangles(sna, &op->base, 1, gen4_bind_surfaces);
+	op->prim_emit(sna, op, box, opacity);
+	FLUSH_NOCA();
+}
+
+static void
+gen4_render_composite_spans_boxes(struct sna *sna,
+				  const struct sna_composite_spans_op *op,
+				  const BoxRec *box, int nbox,
+				  float opacity)
+{
+	DBG(("%s: nbox=%d, src=+(%d, %d), opacity=%f, dst=+(%d, %d)\n",
+	     __FUNCTION__, nbox,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y));
+
+	do {
+		gen4_render_composite_spans_box(sna, op, box++, opacity);
+	} while (--nbox);
+}
+
+fastcall static void
+gen4_render_composite_spans_done(struct sna *sna,
+				 const struct sna_composite_spans_op *op)
+{
+	if (sna->render_state.gen4.vertex_offset)
+		gen4_vertex_flush(sna);
+
+	DBG(("%s()\n", __FUNCTION__));
+
+	if (op->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
+
+	sna_render_composite_redirect_done(sna, &op->base);
+}
+
+static bool
+gen4_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width,  int16_t height,
+			   unsigned flags)
+{
+	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
+		return false;
+
+	if (op >= ARRAY_SIZE(gen4_blend_op))
+		return false;
+
+	if (gen4_composite_fallback(sna, src, NULL, dst))
+		return false;
+
+	if (!is_gpu(dst->pDrawable))
+		return false;
+
+	return true;
+}
+
+static bool
+gen4_render_composite_spans(struct sna *sna,
+			    uint8_t op,
+			    PicturePtr src,
+			    PicturePtr dst,
+			    int16_t src_x,  int16_t src_y,
+			    int16_t dst_x,  int16_t dst_y,
+			    int16_t width,  int16_t height,
+			    unsigned flags,
+			    struct sna_composite_spans_op *tmp)
+{
+	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
+	     width, height, flags, sna->kgem.ring));
+
+	assert(gen4_check_composite_spans(sna, op, src, dst, width, height, flags));
+
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
+	}
+
+	tmp->base.op = op;
+	if (!gen4_composite_set_target(dst, &tmp->base))
+		return false;
+	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
+
+	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
+		if (!sna_render_composite_redirect(sna, &tmp->base,
+						   dst_x, dst_y, width, height))
+			return false;
+	}
+
+	switch (gen4_composite_picture(sna, src, &tmp->base.src,
+				       src_x, src_y,
+				       width, height,
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
+	case -1:
+		goto cleanup_dst;
+	case 0:
+		gen4_composite_solid_init(sna, &tmp->base.src, 0);
+		/* fall through to fixup */
+	case 1:
+		gen4_composite_channel_convert(&tmp->base.src);
+		break;
+	}
+
+	tmp->base.mask.bo = NULL;
+	tmp->base.is_affine = tmp->base.src.is_affine;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
+
+	gen4_composite_alpha_gradient_init(sna, &tmp->base.mask);
+
+	tmp->prim_emit = gen4_emit_composite_spans_primitive;
+	if (tmp->base.src.is_solid)
+		tmp->prim_emit = gen4_emit_composite_spans_solid;
+	else if (tmp->base.is_affine)
+		tmp->prim_emit = gen4_emit_composite_spans_affine;
+	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
+	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
+
+	tmp->base.u.gen4.wm_kernel =
+		gen4_choose_composite_kernel(tmp->base.op,
+					     true, false,
+					     tmp->base.is_affine);
+	tmp->base.u.gen4.ve_id = 1 << 1 | tmp->base.is_affine;
+
+	tmp->box   = gen4_render_composite_spans_box;
+	tmp->boxes = gen4_render_composite_spans_boxes;
+	tmp->done  = gen4_render_composite_spans_done;
+
+	if (!kgem_check_bo(&sna->kgem,
+			   tmp->base.dst.bo, tmp->base.src.bo,
+			   NULL))  {
+		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
+	}
+
+	gen4_bind_surfaces(sna, &tmp->base);
+	gen4_align_vertex(sna, &tmp->base);
+	return true;
+
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
+cleanup_dst:
+	if (tmp->base.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
+	return false;
+}
+#endif
+
 static void
 gen4_copy_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 {
@@ -3472,6 +3792,10 @@ bool gen4_render_init(struct sna *sna)
 		return false;
 
 	sna->render.composite = gen4_render_composite;
+#if !NO_COMPOSITE_SPANS
+	sna->render.check_composite_spans = gen4_check_composite_spans;
+	sna->render.composite_spans = gen4_render_composite_spans;
+#endif
 	sna->render.video = gen4_render_video;
 
 	sna->render.copy_boxes = gen4_render_copy_boxes;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 51614d4..d776e77 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2729,19 +2729,10 @@ gen5_render_composite_spans_done(struct sna *sna,
 }
 
 static bool
-gen5_render_composite_spans(struct sna *sna,
-			    uint8_t op,
-			    PicturePtr src,
-			    PicturePtr dst,
-			    int16_t src_x,  int16_t src_y,
-			    int16_t dst_x,  int16_t dst_y,
-			    int16_t width,  int16_t height,
-			    unsigned flags,
-			    struct sna_composite_spans_op *tmp)
+gen5_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width, int16_t height, unsigned flags)
 {
-	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
-	     width, height, flags, sna->kgem.ring));
-
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
 		return false;
 
@@ -2752,15 +2743,35 @@ gen5_render_composite_spans(struct sna *sna,
 		return false;
 
 	if (need_tiling(sna, width, height)) {
-		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
-		     __FUNCTION__, width, height));
-
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
 			return false;
 		}
+	}
+
+	return true;
+}
 
+static bool
+gen5_render_composite_spans(struct sna *sna,
+			    uint8_t op,
+			    PicturePtr src,
+			    PicturePtr dst,
+			    int16_t src_x,  int16_t src_y,
+			    int16_t dst_x,  int16_t dst_y,
+			    int16_t width,  int16_t height,
+			    unsigned flags,
+			    struct sna_composite_spans_op *tmp)
+{
+	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
+	     width, height, flags, sna->kgem.ring));
+
+	assert(gen5_check_composite_spans(sna, op, src, dst, width, height, flags));
+
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
 		return sna_tiling_composite_spans(op, src, dst,
 						  src_x, src_y, dst_x, dst_y,
 						  width, height, flags, tmp);
@@ -3924,6 +3935,7 @@ bool gen5_render_init(struct sna *sna)
 
 	sna->render.composite = gen5_render_composite;
 #if !NO_COMPOSITE_SPANS
+	sna->render.check_composite_spans = gen5_check_composite_spans;
 	sna->render.composite_spans = gen5_render_composite_spans;
 #endif
 	sna->render.video = gen5_render_video;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 834cab3..389d002 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3084,19 +3084,10 @@ gen6_render_composite_spans_done(struct sna *sna,
 }
 
 static bool
-gen6_render_composite_spans(struct sna *sna,
-			    uint8_t op,
-			    PicturePtr src,
-			    PicturePtr dst,
-			    int16_t src_x,  int16_t src_y,
-			    int16_t dst_x,  int16_t dst_y,
-			    int16_t width,  int16_t height,
-			    unsigned flags,
-			    struct sna_composite_spans_op *tmp)
+gen6_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width, int16_t height, unsigned flags)
 {
-	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
-	     width, height, flags, sna->kgem.ring));
-
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
 		return false;
 
@@ -3107,15 +3098,35 @@ gen6_render_composite_spans(struct sna *sna,
 		return false;
 
 	if (need_tiling(sna, width, height)) {
-		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
-		     __FUNCTION__, width, height));
-
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
 			return false;
 		}
+	}
+
+	return true;
+}
 
+static bool
+gen6_render_composite_spans(struct sna *sna,
+			    uint8_t op,
+			    PicturePtr src,
+			    PicturePtr dst,
+			    int16_t src_x,  int16_t src_y,
+			    int16_t dst_x,  int16_t dst_y,
+			    int16_t width,  int16_t height,
+			    unsigned flags,
+			    struct sna_composite_spans_op *tmp)
+{
+	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
+	     width, height, flags, sna->kgem.ring));
+
+	assert(gen6_check_composite_spans(sna, op, src, dst, width, height, flags));
+
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
 		return sna_tiling_composite_spans(op, src, dst,
 						  src_x, src_y, dst_x, dst_y,
 						  width, height, flags, tmp);
@@ -4232,6 +4243,7 @@ bool gen6_render_init(struct sna *sna)
 	sna->render.composite = gen6_render_composite;
 #endif
 #if !NO_COMPOSITE_SPANS
+	sna->render.check_composite_spans = gen6_check_composite_spans;
 	sna->render.composite_spans = gen6_render_composite_spans;
 #endif
 	sna->render.video = gen6_render_video;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index aedc934..18ba826 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3166,6 +3166,28 @@ gen7_render_composite_spans_done(struct sna *sna,
 }
 
 static bool
+gen7_check_composite_spans(struct sna *sna,
+			   uint8_t op, PicturePtr src, PicturePtr dst,
+			   int16_t width, int16_t height, unsigned flags)
+{
+	if (op >= ARRAY_SIZE(gen7_blend_op))
+		return false;
+
+	if (gen7_composite_fallback(sna, src, NULL, dst))
+		return false;
+
+	if (need_tiling(sna, width, height)) {
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return false;
+		}
+	}
+
+	return true;
+}
+
+static bool
 gen7_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3179,22 +3201,11 @@ gen7_render_composite_spans(struct sna *sna,
 	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
 	     width, height, flags, sna->kgem.ring));
 
-	if (op >= ARRAY_SIZE(gen7_blend_op))
-		return false;
-
-	if (gen7_composite_fallback(sna, src, NULL, dst))
-		return false;
+	assert(gen7_check_composite_spans(sna, op, src, dst, width, height, flags));
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
 		     __FUNCTION__, width, height));
-
-		if (!is_gpu(dst->pDrawable)) {
-			DBG(("%s: fallback, tiled operation not on GPU\n",
-			     __FUNCTION__));
-			return false;
-		}
-
 		return sna_tiling_composite_spans(op, src, dst,
 						  src_x, src_y, dst_x, dst_y,
 						  width, height, flags, tmp);
@@ -4313,6 +4324,7 @@ bool gen7_render_init(struct sna *sna)
 	sna->render.composite = gen7_render_composite;
 #endif
 #if !NO_COMPOSITE_SPANS
+	sna->render.check_composite_spans = gen7_check_composite_spans;
 	sna->render.composite_spans = gen7_render_composite_spans;
 #endif
 	sna->render.video = gen7_render_video;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index cb40cd3..fd105f4 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -91,14 +91,22 @@ no_render_composite(struct sna *sna,
 			      dst_x, dst_y,
 			      width, height,
 			      tmp))
-		return TRUE;
+		return true;
 
-	return FALSE;
+	return false;
 	(void)mask_x;
 	(void)mask_y;
 }
 
 static bool
+no_render_check_composite_spans(struct sna *sna,
+				uint8_t op, PicturePtr src, PicturePtr dst,
+				int16_t width,  int16_t height, unsigned flags)
+{
+	return false;
+}
+
+static bool
 no_render_copy_boxes(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		     PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -107,7 +115,7 @@ no_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (n=%d)\n", __FUNCTION__, n));
 
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -128,9 +136,9 @@ no_render_copy(struct sna *sna, uint8_t alu,
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo, dst->drawable.bitsPerPixel,
 			 tmp))
-		return TRUE;
+		return true;
 
-	return FALSE;
+	return false;
 }
 
 static bool
@@ -160,7 +168,7 @@ no_render_fill_boxes(struct sna *sna,
 	}
 
 	if (op != PictOpSrc)
-		return FALSE;
+		return false;
 
 	if (alu == GXcopy &&
 	    !sna_get_pixel_from_rgba(&pixel,
@@ -169,7 +177,7 @@ no_render_fill_boxes(struct sna *sna,
 				     color->blue,
 				     color->alpha,
 				     format))
-		return FALSE;
+		return false;
 
 	return sna_blt_fill_boxes(sna, alu,
 				  dst_bo, dst->drawable.bitsPerPixel,
@@ -267,6 +275,7 @@ void no_render_init(struct sna *sna)
 	render->vertex_size = ARRAY_SIZE(render->vertex_data);
 
 	render->composite = no_render_composite;
+	render->check_composite_spans = no_render_check_composite_spans;
 
 	render->copy_boxes = no_render_copy_boxes;
 	render->copy = no_render_copy;
@@ -1314,7 +1323,7 @@ sna_render_picture_convolve(struct sna *sna,
 	channel->width  = w;
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNone;
-	channel->is_affine = TRUE;
+	channel->is_affine = true;
 	channel->transform = NULL;
 	channel->scale[0] = 1.f / w;
 	channel->scale[1] = 1.f / h;
@@ -1381,7 +1390,7 @@ sna_render_picture_flatten(struct sna *sna,
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNone;
 	channel->pict_format = PIXMAN_a8r8g8b8;
-	channel->is_affine = TRUE;
+	channel->is_affine = true;
 	channel->transform = NULL;
 	channel->scale[0] = 1.f / w;
 	channel->scale[1] = 1.f / h;
@@ -1440,7 +1449,7 @@ sna_render_picture_approximate_gradient(struct sna *sna,
 		return 0;
 	}
 
-	src = image_from_pict(picture, FALSE, &dx, &dy);
+	src = image_from_pict(picture, false, &dx, &dy);
 	if (src == NULL) {
 		pixman_image_unref(dst);
 		kgem_bo_destroy(&sna->kgem, channel->bo);
@@ -1468,7 +1477,7 @@ sna_render_picture_approximate_gradient(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNone;
-	channel->is_affine = TRUE;
+	channel->is_affine = true;
 
 	channel->scale[0] = 1.f/w;
 	channel->scale[1] = 1.f/h;
@@ -1561,7 +1570,7 @@ do_fixup:
 		return 0;
 	}
 
-	src = image_from_pict(picture, FALSE, &dx, &dy);
+	src = image_from_pict(picture, false, &dx, &dy);
 	if (src == NULL) {
 		pixman_image_unref(dst);
 		kgem_bo_destroy(&sna->kgem, channel->bo);
@@ -1606,7 +1615,7 @@ do_fixup:
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNone;
-	channel->is_affine = TRUE;
+	channel->is_affine = true;
 
 	channel->scale[0] = 1.f/w;
 	channel->scale[1] = 1.f/h;
@@ -1750,7 +1759,7 @@ sna_render_composite_redirect(struct sna *sna,
 	struct kgem_bo *bo;
 
 #if NO_REDIRECT
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s: target too large (%dx%d), copying to temporary %dx%d, max %d\n",
@@ -1760,11 +1769,11 @@ sna_render_composite_redirect(struct sna *sna,
 	     sna->render.max_3d_size));
 
 	if (!width || !height)
-		return FALSE;
+		return false;
 
 	if (width  > sna->render.max_3d_size ||
 	    height > sna->render.max_3d_size)
-		return FALSE;
+		return false;
 
 	if (op->dst.bo->pitch <= sna->render.max_3d_pitch) {
 		BoxRec box;
@@ -1842,7 +1851,7 @@ sna_render_composite_redirect(struct sna *sna,
 				t->real_bo = NULL;
 				if (t->damage)
 					__sna_damage_destroy(t->damage);
-				return FALSE;
+				return false;
 			}
 
 			assert(op->dst.bo != t->real_bo);
@@ -1852,7 +1861,7 @@ sna_render_composite_redirect(struct sna *sna,
 			op->dst.y -= box.y1;
 			op->dst.width  = w;
 			op->dst.height = h;
-			return TRUE;
+			return true;
 		}
 	}
 
@@ -1866,7 +1875,7 @@ sna_render_composite_redirect(struct sna *sna,
 					       width, height, bpp),
 			    CREATE_TEMPORARY);
 	if (!bo)
-		return FALSE;
+		return false;
 
 	t->box.x1 = x + op->dst.x;
 	t->box.y1 = y + op->dst.y;
@@ -1881,7 +1890,7 @@ sna_render_composite_redirect(struct sna *sna,
 				bo, -t->box.x1, -t->box.y1,
 				bpp, &t->box, 1)) {
 		kgem_bo_destroy(&sna->kgem, bo);
-		return FALSE;
+		return false;
 	}
 
 	t->real_bo = op->dst.bo;
@@ -1897,7 +1906,7 @@ sna_render_composite_redirect(struct sna *sna,
 	op->dst.y = -y;
 	op->dst.width  = width;
 	op->dst.height = height;
-	return TRUE;
+	return true;
 }
 
 void
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 41cb02b..f0f4a2d 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -194,6 +194,9 @@ struct sna_render {
 			  int16_t w, int16_t h,
 			  struct sna_composite_op *tmp);
 
+	bool (*check_composite_spans)(struct sna *sna, uint8_t op,
+				      PicturePtr dst, PicturePtr src,
+				      int16_t w, int16_t h, unsigned flags);
 	bool (*composite_spans)(struct sna *sna, uint8_t op,
 				PicturePtr dst, PicturePtr src,
 				int16_t src_x, int16_t src_y,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c3a5447..efb53dd 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3673,7 +3673,9 @@ composite_unaligned_boxes(struct sna *sna,
 	if (ntrap > 1 && maskFormat)
 		return false;
 
-	if (force_fallback || !sna->render.composite_spans) {
+	if (force_fallback ||
+	    !sna->render.check_composite_spans(sna, op, src, dst, 0, 0,
+					       COMPOSITE_SPANS_RECTILINEAR)) {
 fallback:
 		return composite_unaligned_boxes_fallback(op, src, dst,
 							  src_x, src_y,
@@ -3721,6 +3723,15 @@ fallback:
 		return true;
 	}
 
+	if (!sna->render.check_composite_spans(sna, op, src, dst,
+					       clip.extents.x2 - clip.extents.x1,
+					       clip.extents.y2 - clip.extents.y1,
+					       COMPOSITE_SPANS_RECTILINEAR)) {
+		DBG(("%s: fallback -- composite spans not supported\n",
+		     __FUNCTION__));
+		goto fallback;
+	}
+
 	c = NULL;
 	if (extents.x2 - extents.x1 > clip.extents.x2 - clip.extents.x1 ||
 	    extents.y2 - extents.y1 > clip.extents.y2 - clip.extents.y1)
@@ -4016,7 +4027,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	sna = to_sna_from_drawable(dst->pDrawable);
-	if (!sna->render.composite_spans) {
+	if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, flags)) {
 		DBG(("%s: fallback -- composite spans not supported\n",
 		     __FUNCTION__));
 		return false;
@@ -4053,6 +4064,15 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		return true;
 	}
 
+	if (!sna->render.check_composite_spans(sna, op, src, dst,
+					       clip.extents.x2 - clip.extents.x1,
+					       clip.extents.y2 - clip.extents.y1,
+					       flags)) {
+		DBG(("%s: fallback -- composite spans not supported\n",
+		     __FUNCTION__));
+		return false;
+	}
+
 	extents = *RegionExtents(&clip);
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
@@ -5633,11 +5653,8 @@ trap_span_converter(PicturePtr dst,
 	struct sna_composite_spans_op tmp;
 	struct tor tor;
 	BoxRec extents;
-	PicturePtr src;
-	xRenderColor white;
 	pixman_region16_t *clip;
-	int dx, dy;
-	int n, error;
+	int dx, dy, n;
 
 	if (NO_SCAN_CONVERTER)
 		return false;
@@ -5649,15 +5666,15 @@ trap_span_converter(PicturePtr dst,
 		return mono_trap_span_converter(dst, src_x, src_y, ntrap, trap);
 
 	sna = to_sna_from_drawable(dst->pDrawable);
-	if (!sna->render.composite_spans) {
+	if (!sna->render.check_composite_spans(sna, PictOpAdd, sna->render.white_picture, dst,
+					       dst->pCompositeClip->extents.x2 - dst->pCompositeClip->extents.x1,
+					       dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1,
+					       0)) {
 		DBG(("%s: fallback -- composite spans not supported\n",
 		     __FUNCTION__));
 		return false;
 	}
 
-	DBG(("%s: extents (%d, %d), (%d, %d)\n",
-	     __FUNCTION__, extents.x1, extents.y1, extents.x2, extents.y2));
-
 	clip = dst->pCompositeClip;
 	extents = *RegionExtents(clip);
 	dx = dst->pDrawable->x;
@@ -5669,13 +5686,8 @@ trap_span_converter(PicturePtr dst,
 	     extents.x2, extents.y2,
 	     dx, dy));
 
-	white.red = white.green = white.blue = white.alpha = 0xffff;
-	src = CreateSolidPicture(0, &white, &error);
-	if (src == NULL)
-		return true;
-
 	memset(&tmp, 0, sizeof(tmp));
-	if (!sna->render.composite_spans(sna, PictOpAdd, src, dst,
+	if (!sna->render.composite_spans(sna, PictOpAdd, sna->render.white_picture, dst,
 					 0, 0,
 					 extents.x1,  extents.y1,
 					 extents.x2 - extents.x1,
@@ -5684,7 +5696,6 @@ trap_span_converter(PicturePtr dst,
 					 &tmp)) {
 		DBG(("%s: fallback -- composite spans render op not supported\n",
 		     __FUNCTION__));
-		FreePicture(src, 0);
 		return false;
 	}
 
@@ -5723,7 +5734,6 @@ trap_span_converter(PicturePtr dst,
 skip:
 	tor_fini(&tor);
 	tmp.done(sna, &tmp);
-	FreePicture(src, 0);
 	return true;
 }
 
@@ -6175,7 +6185,7 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	sna = to_sna_from_drawable(dst->pDrawable);
-	if (!sna->render.composite_spans) {
+	if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, 0)) {
 		DBG(("%s: fallback -- composite spans not supported\n",
 		     __FUNCTION__));
 		return false;
@@ -6212,6 +6222,15 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		return true;
 	}
 
+	if (!sna->render.check_composite_spans(sna, op, src, dst,
+					       clip.extents.x2 - clip.extents.x1,
+					       clip.extents.y2 - clip.extents.y1,
+					       0)) {
+		DBG(("%s: fallback -- composite spans not supported\n",
+		     __FUNCTION__));
+		return false;
+	}
+
 	extents = *RegionExtents(&clip);
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
@@ -6531,7 +6550,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	sna = to_sna_from_drawable(dst->pDrawable);
-	if (!sna->render.composite_spans) {
+	if (!sna->render.check_composite_spans(sna, op, src, dst, 0, 0, 0)) {
 		DBG(("%s: fallback -- composite spans not supported\n",
 		     __FUNCTION__));
 		return false;
@@ -6568,6 +6587,15 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		return true;
 	}
 
+	if (!sna->render.check_composite_spans(sna, op, src, dst,
+					       clip.extents.x2 - clip.extents.x1,
+					       clip.extents.y2 - clip.extents.y1,
+					       0)) {
+		DBG(("%s: fallback -- composite spans not supported\n",
+		     __FUNCTION__));
+		return false;
+	}
+
 	extents = *RegionExtents(&clip);
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
commit 5f138176bf15682324d2e8cfa9fac3e49604bf8f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 10:32:33 2012 +0100

    sna: Tweak order of screen re-initialisation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b8c8ac..d3dad62 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12990,6 +12990,11 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	assert(screen->SetWindowPixmap == NULL);
 	screen->SetWindowPixmap = sna_set_window_pixmap;
 
+	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+		ShmRegisterFuncs(screen, &shm_funcs);
+	else
+		ShmRegisterFbFuncs(screen);
+
 	if (!sna_picture_init(screen))
 		return false;
 
@@ -13033,11 +13038,6 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 
 bool sna_accel_create(ScreenPtr screen, struct sna *sna)
 {
-	if (USE_SHM_VMAP && sna->kgem.has_vmap)
-		ShmRegisterFuncs(screen, &shm_funcs);
-	else
-		ShmRegisterFbFuncs(screen);
-
 	if (!sna_glyphs_create(sna))
 		return false;
 
commit 9bd0f8f3e7783d7a6bab707fc08ec96830cd5809
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 09:40:07 2012 +0100

    i810: Correct the double negative and enable XAA when available
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 33b5aa4..b7212cf 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -1694,7 +1694,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
    if (pI810->LpRing->mem.Size != 0) {
       I810SetRingRegs(scrn);
 
-      if (pI810->noAccel && !I810AccelInit(screen)) {
+      if (!pI810->noAccel && !I810AccelInit(screen)) {
 	 xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		    "Hardware acceleration initialization failed\n");
       }
commit d145d0e1459f578eea621e6944814642e5dd431f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 09:23:10 2012 +0100

    i810: Handle initialisation without the XAA module present at runtime
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index 823631f..de250ab 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -267,7 +267,11 @@ extern Bool I810CleanupDma(ScrnInfoPtr pScrn);
 #define I810REGPTR(p) (&(I810PTR(p)->ModeReg))
 
 extern Bool I810CursorInit(ScreenPtr pScreen);
+#ifdef HAVE_XAA_H
 extern Bool I810AccelInit(ScreenPtr pScreen);
+#else
+static inline  Bool I810AccelInit(ScreenPtr pScreen) { return TRUE; }
+#endif
 extern void I810SetPIOAccess(I810Ptr pI810);
 extern void I810SetMMIOAccess(I810Ptr pI810);
 extern unsigned int I810CalcWatermark(ScrnInfoPtr pScrn, double freq,
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index bd80077..33b5aa4 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -343,12 +343,8 @@ I810PreInit(ScrnInfoPtr scrn, int flags)
    if (xf86ReturnOptValBool(pI810->Options, OPTION_NOACCEL, FALSE))
       pI810->noAccel = TRUE;
 
-   if (!pI810->noAccel) {
-      if (!xf86LoadSubModule(scrn, "xaa")) {
-	 I810FreeRec(scrn);
-	 return FALSE;
-      }
-   }
+   if (!pI810->noAccel && !xf86LoadSubModule(scrn, "xaa"))
+      pI810->noAccel = TRUE;
    
 #ifdef HAVE_DRI1
    pI810->directRenderingDisabled =
@@ -1089,7 +1085,6 @@ DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
    hwp->writeCrtc(hwp, IO_CTNL, temp);
 }
 
-#ifdef HAVE_XAA_H
 static void
 I810SetRingRegs(ScrnInfoPtr scrn)
 {
@@ -1109,7 +1104,6 @@ I810SetRingRegs(ScrnInfoPtr scrn)
    itemp |= ((pI810->LpRing->mem.Size - 4096) | RING_NO_REPORT | RING_VALID);
    OUTREG(LP_RING + RING_LEN, itemp);
 }
-#endif
 
 static void
 I810Restore(ScrnInfoPtr scrn)
@@ -1697,19 +1691,16 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
       return FALSE;
    }
 
-#ifdef HAVE_XAA_H
-   if (!xf86ReturnOptValBool(pI810->Options, OPTION_NOACCEL, FALSE)) {
-      if (pI810->LpRing->mem.Size != 0) {
-	 I810SetRingRegs(scrn);
-
-	 if (!I810AccelInit(screen)) {
-	    xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-		       "Hardware acceleration initialization failed\n");
-	 }  else /* PK added 16.02.2004 */
-	     I810EmitFlush(scrn);
+   if (pI810->LpRing->mem.Size != 0) {
+      I810SetRingRegs(scrn);
+
+      if (pI810->noAccel && !I810AccelInit(screen)) {
+	 xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+		    "Hardware acceleration initialization failed\n");
       }
+
+      I810EmitFlush(scrn);
    }
-#endif
 
    miInitializeBackingStore(screen);
    xf86SetBackingStore(screen);
commit 7a3b98e05b706548527e73b2008600391c601a62
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 19 08:42:05 2012 +0100

    sna: Re-register the SHM funcs every server generation
    
    As the SHM layer hooks into the CloseScreen chain to free its privates,
    we then need to call the registration function again on the next
    generation to ensure that the private is reallocated before use.
    
    Reported-by: Pawel Sikora <pluto at agmk.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52255
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 194c712..3ced2b4 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -569,7 +569,7 @@ void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
-bool sna_accel_create(struct sna *sna);
+bool sna_accel_create(ScreenPtr screen, struct sna *sna);
 void sna_copy_fbcon(struct sna *sna);
 
 bool sna_composite_create(struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d4b9f37..1b8c8ac 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12990,11 +12990,6 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	assert(screen->SetWindowPixmap == NULL);
 	screen->SetWindowPixmap = sna_set_window_pixmap;
 
-	if (USE_SHM_VMAP && sna->kgem.has_vmap)
-		ShmRegisterFuncs(screen, &shm_funcs);
-	else
-		ShmRegisterFbFuncs(screen);
-
 	if (!sna_picture_init(screen))
 		return false;
 
@@ -13036,8 +13031,13 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	return true;
 }
 
-bool sna_accel_create(struct sna *sna)
+bool sna_accel_create(ScreenPtr screen, struct sna *sna)
 {
+	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+		ShmRegisterFuncs(screen, &shm_funcs);
+	else
+		ShmRegisterFbFuncs(screen);
+
 	if (!sna_glyphs_create(sna))
 		return false;
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 3871ab6..21e967a 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -168,6 +168,12 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 	free(screen->devPrivate);
 	screen->devPrivate = NULL;
 
+	if (!sna_accel_create(screen, sna)) {
+		xf86DrvMsg(screen->myNum, X_ERROR,
+			   "[intel] Failed to initialise acceleration routines\n");
+		goto cleanup_front;
+	}
+
 	sna->front = screen->CreatePixmap(screen,
 					  screen->width,
 					  screen->height,
@@ -194,12 +200,6 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 
 	screen->SetScreenPixmap(sna->front);
 
-	if (!sna_accel_create(sna)) {
-		xf86DrvMsg(screen->myNum, X_ERROR,
-			   "[intel] Failed to initialise acceleration routines\n");
-		goto cleanup_front;
-	}
-
 	sna_copy_fbcon(sna);
 
 	if (!sna_enter_vt(VT_FUNC_ARGS(0))) {
commit 4bcab83bbddf8a698aa83f5038f9ab019a404bd5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 22:19:45 2012 +0100

    i810: DRI is not dependent upon XAA
    
    The blit routines is uses are independent of the XAA driver interface
    and can be used separately.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index f2643f5..3bdb2ce 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -1424,5 +1424,7 @@ I810DRIEnter(ScrnInfoPtr pScrn)
 			pI810->CursorARGBStart) != 0)
 	    return FALSE;
    }
+
+   I810SelectBuffer(pScrn, I810_SELECT_FRONT);
    return TRUE;
 }
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 9821612..bd80077 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -355,11 +355,7 @@ I810PreInit(ScrnInfoPtr scrn, int flags)
      !xf86ReturnOptValBool(pI810->Options, OPTION_DRI, TRUE);
 
    if (!pI810->directRenderingDisabled) {
-     if (pI810->noAccel) {
-       xf86DrvMsg(scrn->scrnIndex, X_WARNING, "DRI is disabled because it "
-		  "needs 2D acceleration.\n");
-       pI810->directRenderingDisabled=TRUE;
-     } else if (scrn->depth!=16) {
+     if (scrn->depth!=16) {
        xf86DrvMsg(scrn->scrnIndex, X_WARNING, "DRI is disabled because it "
 		  "runs only at 16-bit depth.\n");
        pI810->directRenderingDisabled=TRUE;
commit 558c8251299b786cab1ac83dbd35f077224b5950
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 22:00:30 2012 +0100

    sna/gen4+: Drop unsupported source formats
    
    Once again I've confused existence of the enum with the ability of the
    sampler to read that format.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c985c8d..4a67728 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -527,14 +527,10 @@ static uint32_t gen4_get_card_format(PictFormat format)
 		return GEN4_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_x2r10g10b10:
 		return GEN4_SURFACEFORMAT_B10G10R10X2_UNORM;
-	case PICT_a2b10g10r10:
-		return GEN4_SURFACEFORMAT_R10G10B10A2_UNORM;
 	case PICT_r8g8b8:
 		return GEN4_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN4_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-		return GEN4_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN4_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index f611a51..51614d4 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -521,14 +521,10 @@ static uint32_t gen5_get_card_format(PictFormat format)
 		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_x2r10g10b10:
 		return GEN5_SURFACEFORMAT_B10G10R10X2_UNORM;
-	case PICT_a2b10g10r10:
-		return GEN5_SURFACEFORMAT_R10G10B10A2_UNORM;
 	case PICT_r8g8b8:
 		return GEN5_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-		return GEN5_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1ff54ac..834cab3 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -284,14 +284,10 @@ static uint32_t gen6_get_card_format(PictFormat format)
 		return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_x2r10g10b10:
 		return GEN6_SURFACEFORMAT_B10G10R10X2_UNORM;
-	case PICT_a2b10g10r10:
-		return GEN6_SURFACEFORMAT_R10G10B10A2_UNORM;
 	case PICT_r8g8b8:
 		return GEN6_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN6_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-		return GEN6_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index cb497b9..aedc934 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -316,14 +316,10 @@ static uint32_t gen7_get_card_format(PictFormat format)
 		return GEN7_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_x2r10g10b10:
 		return GEN7_SURFACEFORMAT_B10G10R10X2_UNORM;
-	case PICT_a2b10g10r10:
-		return GEN7_SURFACEFORMAT_R10G10B10A2_UNORM;
 	case PICT_r8g8b8:
 		return GEN7_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN7_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-		return GEN7_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN7_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
commit 9f3b3098c9f870d303a9de2b9c0db119eff5a865
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 21:39:47 2012 +0100

    sna/dri: Allow DRI2 to be loaded even if we are wedged
    
    Just because the GPU is spitting EIO at us does not necessarily imply
    that a DRI client will also suffer. Spit out a warning for later bug
    reporting and let them find out for themselves!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 65c3550..4ced0eb 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -2146,8 +2146,7 @@ bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 
 	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-			   "cannot enable DRI2 whilst the GPU is wedged\n");
-		return false;
+			   "loading DRI2 whilst the GPU is wedged.\n");
 	}
 
 	if (xf86LoaderCheckSymbol("DRI2Version"))
commit 15b7191fd363e9e6083844a218e25419695d55f1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 12:59:41 2012 +0100

    sna/gen6: Micro-optimise render copy emission
    
    Backport of the changes made for IVB.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c635f4d..1ff54ac 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -48,7 +48,8 @@
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
 #define NO_FILL_BOXES 0
-#define NO_CLEAR 0
+#define NO_FILL_ONE 0
+#define NO_FILL_CLEAR 0
 
 #define NO_RING_SWITCH 0
 
@@ -134,28 +135,29 @@ static const uint32_t ps_kernel_planar[][4] = {
 #include "exa_wm_write.g6b"
 };
 
-#define KERNEL(kernel_enum, kernel, masked) \
-    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), masked}
+#define KERNEL(kernel_enum, kernel, ns, ni) \
+    [GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), ns, ni}
 static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	bool has_mask;
+	unsigned int num_surfaces;
+	unsigned int num_inputs;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, 2, 1),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, 2, 1),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, true),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
+	KERNEL(MASK, ps_kernel_masknoca_affine, 3, 2),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, 3, 2),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, 3, 2),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3, 2),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, 3, 2),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3, 2),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7, 1),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2, 1),
 };
 #undef KERNEL
 
@@ -193,8 +195,32 @@ static const struct blendinfo {
 #define BLEND_OFFSET(s, d) \
 	(((s) * GEN6_BLENDFACTOR_COUNT + (d)) * GEN6_BLEND_STATE_PADDED_SIZE)
 
+#define NO_BLEND BLEND_OFFSET(GEN6_BLENDFACTOR_ONE, GEN6_BLENDFACTOR_ZERO)
+#define CLEAR BLEND_OFFSET(GEN6_BLENDFACTOR_ZERO, GEN6_BLENDFACTOR_ZERO)
+
 #define SAMPLER_OFFSET(sf, se, mf, me) \
-	(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * 2 * sizeof(struct gen6_sampler_state))
+	(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me) + 2) * 2 * sizeof(struct gen6_sampler_state))
+
+#define VERTEX_2s2s 4
+
+#define COPY_SAMPLER 0
+#define COPY_VERTEX VERTEX_2s2s
+#define COPY_FLAGS(a) GEN6_SET_FLAGS(COPY_SAMPLER, (a) == GXcopy ? NO_BLEND : CLEAR, GEN6_WM_KERNEL_NOMASK, COPY_VERTEX)
+
+#define FILL_SAMPLER (2 * sizeof(struct gen6_sampler_state))
+#define FILL_VERTEX VERTEX_2s2s
+#define FILL_FLAGS(op, format) GEN6_SET_FLAGS(FILL_SAMPLER, gen6_get_blend((op), false, (format)), GEN6_WM_KERNEL_NOMASK, FILL_VERTEX)
+#define FILL_FLAGS_NOBLEND GEN6_SET_FLAGS(FILL_SAMPLER, NO_BLEND, GEN6_WM_KERNEL_NOMASK, FILL_VERTEX)
+
+#define VIDEO_SAMPLER \
+	SAMPLER_OFFSET(SAMPLER_FILTER_BILINEAR, SAMPLER_EXTEND_PAD, \
+		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+
+#define GEN6_SAMPLER(f) (((f) >> 16) & 0xfff0)
+#define GEN6_BLEND(f) (((f) >> 0) & 0xfff0)
+#define GEN6_KERNEL(f) (((f) >> 16) & 0xf)
+#define GEN6_VERTEX(f) (((f) >> 0) & 0xf)
+#define GEN6_SET_FLAGS(S, B, K, V)  (((S) | (K)) << 16 | ((B) | (V)))
 
 #define OUT_BATCH(v) batch_emit(sna, v)
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
@@ -545,20 +571,14 @@ gen6_emit_invariant(struct sna *sna)
 }
 
 static bool
-gen6_emit_cc(struct sna *sna,
-	     int op, bool has_component_alpha, uint32_t dst_format)
+gen6_emit_cc(struct sna *sna, int blend)
 {
 	struct gen6_render_state *render = &sna->render_state.gen6;
-	uint32_t blend;
-
-	blend = gen6_get_blend(op, has_component_alpha, dst_format);
 
-	DBG(("%s(op=%d, ca=%d, format=%x): new=%x, current=%x\n",
-	     __FUNCTION__,
-	     op, has_component_alpha, dst_format,
-	     blend, render->blend));
 	if (render->blend == blend)
-		return op <= PictOpSrc;
+		return blend != NO_BLEND;
+
+	DBG(("%s: blend = %x\n", __FUNCTION__, blend));
 
 	OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
 	OUT_BATCH((render->cc_blend + blend) | 1);
@@ -571,22 +591,19 @@ gen6_emit_cc(struct sna *sna,
 	}
 
 	render->blend = blend;
-	return op <= PictOpSrc;
+	return blend != NO_BLEND;
 }
 
 static void
 gen6_emit_sampler(struct sna *sna, uint32_t state)
 {
-	assert(state <
-	       2 * sizeof(struct gen6_sampler_state) *
-	       FILTER_COUNT * EXTEND_COUNT *
-	       FILTER_COUNT * EXTEND_COUNT);
-
 	if (sna->render_state.gen6.samplers == state)
 		return;
 
 	sna->render_state.gen6.samplers = state;
 
+	DBG(("%s: sampler = %x\n", __FUNCTION__, state));
+
 	OUT_BATCH(GEN6_3DSTATE_SAMPLER_STATE_POINTERS |
 		  GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS |
 		  (4 - 2));
@@ -633,25 +650,28 @@ gen6_emit_sf(struct sna *sna, bool has_mask)
 }
 
 static void
-gen6_emit_wm(struct sna *sna, unsigned int kernel, int nr_surfaces, int nr_inputs)
+gen6_emit_wm(struct sna *sna, unsigned int kernel)
 {
 	if (sna->render_state.gen6.kernel == kernel)
 		return;
 
 	sna->render_state.gen6.kernel = kernel;
 
-	DBG(("%s: switching to %s\n", __FUNCTION__, wm_kernels[kernel].name));
+	DBG(("%s: switching to %s, num_surfaces=%d\n",
+	     __FUNCTION__,
+	     wm_kernels[kernel].name,
+	     wm_kernels[kernel].num_surfaces));
 
 	OUT_BATCH(GEN6_3DSTATE_WM | (9 - 2));
 	OUT_BATCH(sna->render_state.gen6.wm_kernel[kernel]);
 	OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT |
-		  nr_surfaces << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
+		  wm_kernels[kernel].num_surfaces << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0);
 	OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT); /* DW4 */
 	OUT_BATCH((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT |
 		  GEN6_3DSTATE_WM_DISPATCH_ENABLE |
 		  GEN6_3DSTATE_WM_16_DISPATCH_ENABLE);
-	OUT_BATCH(nr_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
+	OUT_BATCH(wm_kernels[kernel].num_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT |
 		  GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
 	OUT_BATCH(0);
 	OUT_BATCH(0);
@@ -737,22 +757,59 @@ gen6_emit_vertex_elements(struct sna *sna,
 	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen6_render_state *render = &sna->render_state.gen6;
-	int nelem = op->mask.bo ? 2 : 1;
-	int selem = op->is_affine ? 2 : 3;
+	int nelem, selem;
 	uint32_t w_component;
 	uint32_t src_format;
-	int id = op->u.gen6.ve_id;
+	int id = GEN6_VERTEX(op->u.gen6.flags);
 
 	if (render->ve_id == id)
 		return;
 	render->ve_id = id;
 
+	switch (id) {
+	case VERTEX_2s2s:
+		DBG(("%s: setup COPY\n", __FUNCTION__));
+
+		OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS |
+			  ((2 * (1 + 2)) + 1 - 2));
+
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT);
+		OUT_BATCH(GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
+
+		/* x,y */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
+		OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+
+		/* u0, v0, w0 */
+		OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID |
+			  GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
+			  4 << VE0_OFFSET_SHIFT);	/* offset vb in bytes */
+		OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
+			  GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
+		return;
+	}
+
+	nelem = op->mask.bo ? 2 : 1;
 	if (op->is_affine) {
 		src_format = GEN6_SURFACEFORMAT_R32G32_FLOAT;
 		w_component = GEN6_VFCOMPONENT_STORE_1_FLT;
+		selem = 2;
 	} else {
 		src_format = GEN6_SURFACEFORMAT_R32G32B32_FLOAT;
 		w_component = GEN6_VFCOMPONENT_STORE_SRC;
+		selem = 3;
 	}
 
 	/* The VUE layout
@@ -822,19 +879,13 @@ gen6_emit_state(struct sna *sna,
 {
 	bool need_stall = wm_binding_table & 1;
 
-	if (gen6_emit_cc(sna, op->op, op->has_component_alpha, op->dst.format))
+	if (gen6_emit_cc(sna, GEN6_BLEND(op->u.gen6.flags)))
 		need_stall = false;
-	gen6_emit_sampler(sna,
-			  SAMPLER_OFFSET(op->src.filter,
-					 op->src.repeat,
-					 op->mask.filter,
-					 op->mask.repeat));
+	gen6_emit_sampler(sna, GEN6_SAMPLER(op->u.gen6.flags));
 	gen6_emit_sf(sna, op->mask.bo != NULL);
-	gen6_emit_wm(sna,
-		     op->u.gen6.wm_kernel,
-		     op->u.gen6.nr_surfaces,
-		     op->u.gen6.nr_inputs);
+	gen6_emit_wm(sna, GEN6_KERNEL(op->u.gen6.flags));
 	gen6_emit_vertex_elements(sna, op);
+
 	need_stall |= gen6_emit_binding_table(sna, wm_binding_table & ~1);
 	if (gen6_emit_drawing_rectangle(sna, op))
 		need_stall = false;
@@ -867,12 +918,11 @@ static void gen6_magic_ca_pass(struct sna *sna,
 
 	gen6_emit_flush(sna);
 
-	gen6_emit_cc(sna, PictOpAdd, true, op->dst.format);
+	gen6_emit_cc(sna, gen6_get_blend(PictOpAdd, true, op->dst.format));
 	gen6_emit_wm(sna,
 		     gen6_choose_composite_kernel(PictOpAdd,
 						  true, true,
-						  op->is_affine),
-		     3, 2);
+						  op->is_affine));
 
 	OUT_BATCH(GEN6_3DPRIMITIVE |
 		  GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -932,7 +982,7 @@ static int gen6_vertex_finish(struct sna *sna)
 						       sna->render.vertex_reloc[i]+1,
 						       bo,
 						       I915_GEM_DOMAIN_VERTEX << 16,
-						       0 + sna->render.vertex_used * 4 - 1);
+						       sna->render.vertex_used * 4 - 1);
 				sna->render.vertex_reloc[i] = 0;
 			}
 		}
@@ -1116,6 +1166,24 @@ sampler_state_init(struct gen6_sampler_state *sampler_state,
 	}
 }
 
+static void
+sampler_copy_init(struct gen6_sampler_state *ss)
+{
+	sampler_state_init(ss, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+	ss->ss3.non_normalized_coord = 1;
+
+	sampler_state_init(ss+1, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+}
+
+static void
+sampler_fill_init(struct gen6_sampler_state *ss)
+{
+	sampler_state_init(ss, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_REPEAT);
+	ss->ss3.non_normalized_coord = 1;
+
+	sampler_state_init(ss+1, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+}
+
 static uint32_t gen6_create_cc_viewport(struct sna_static_stream *stream)
 {
 	struct gen6_cc_viewport vp;
@@ -1291,7 +1359,6 @@ gen6_emit_composite_primitive_simple_source(struct sna *sna,
 	v[8] = ((r->src.y + ty) * yy + y0) * sy;
 }
 
-
 fastcall static void
 gen6_emit_composite_primitive_affine_source(struct sna *sna,
 					    const struct sna_composite_op *op,
@@ -1444,7 +1511,7 @@ gen6_emit_composite_primitive(struct sna *sna,
 static void gen6_emit_vertex_buffer(struct sna *sna,
 				    const struct sna_composite_op *op)
 {
-	int id = op->u.gen6.ve_id;
+	int id = GEN6_VERTEX(op->u.gen6.flags);
 
 	OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA |
@@ -1489,7 +1556,7 @@ static void gen6_emit_primitive(struct sna *sna)
 static bool gen6_rectangle_begin(struct sna *sna,
 				 const struct sna_composite_op *op)
 {
-	int id = 1 << op->u.gen6.ve_id;
+	int id = 1 << GEN6_VERTEX(op->u.gen6.flags);
 	int ndwords;
 
 	ndwords = op->need_magic_ca_pass ? 60 : 6;
@@ -1892,7 +1959,6 @@ gen6_render_video(struct sna *sna,
 
 	memset(&tmp, 0, sizeof(tmp));
 
-	tmp.op = PictOpSrc;
 	tmp.dst.pixmap = pixmap;
 	tmp.dst.width  = pixmap->drawable.width;
 	tmp.dst.height = pixmap->drawable.height;
@@ -1900,24 +1966,17 @@ gen6_render_video(struct sna *sna,
 	tmp.dst.bo = priv->gpu_bo;
 
 	tmp.src.bo = frame->bo;
-	tmp.src.filter = SAMPLER_FILTER_BILINEAR;
-	tmp.src.repeat = SAMPLER_EXTEND_PAD;
-
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
-	if (is_planar_fourcc(frame->id)) {
-		tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_VIDEO_PLANAR;
-		tmp.u.gen6.nr_surfaces = 7;
-	} else {
-		tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_VIDEO_PACKED;
-		tmp.u.gen6.nr_surfaces = 2;
-	}
-	tmp.u.gen6.nr_inputs = 1;
-	tmp.u.gen6.ve_id = 1;
+	tmp.u.gen6.flags =
+		GEN6_SET_FLAGS(VIDEO_SAMPLER, NO_BLEND,
+			       is_planar_fourcc(frame->id) ?
+			       GEN6_WM_KERNEL_VIDEO_PLANAR :
+			       GEN6_WM_KERNEL_VIDEO_PACKED,
+			       1);
 	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
@@ -2568,17 +2627,6 @@ gen6_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen6_blend_op))
 		return false;
 
-#if NO_COMPOSITE
-	if (mask)
-		return false;
-
-	return sna_blt_composite(sna, op,
-				 src, dst,
-				 src_x, src_y,
-				 dst_x, dst_y,
-				 width, height, tmp);
-#endif
-
 	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
 	     width, height, sna->kgem.ring));
 
@@ -2734,14 +2782,19 @@ gen6_render_composite(struct sna *sna,
 	}
 	tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
 
-	tmp->u.gen6.wm_kernel =
-		gen6_choose_composite_kernel(tmp->op,
-					     tmp->mask.bo != NULL,
-					     tmp->has_component_alpha,
-					     tmp->is_affine);
-	tmp->u.gen6.nr_surfaces = 2 + (tmp->mask.bo != NULL);
-	tmp->u.gen6.nr_inputs = 1 + (tmp->mask.bo != NULL);
-	tmp->u.gen6.ve_id = gen6_choose_composite_vertex_buffer(tmp);
+	tmp->u.gen6.flags =
+		GEN6_SET_FLAGS(SAMPLER_OFFSET(tmp->src.filter,
+					      tmp->src.repeat,
+					      tmp->mask.filter,
+					      tmp->mask.repeat),
+			       gen6_get_blend(tmp->op,
+					      tmp->has_component_alpha,
+					      tmp->dst.format),
+			       gen6_choose_composite_kernel(tmp->op,
+							    tmp->mask.bo != NULL,
+							    tmp->has_component_alpha,
+							    tmp->is_affine),
+			       gen6_choose_composite_vertex_buffer(tmp));
 
 	tmp->blt   = gen6_render_composite_blt;
 	tmp->box   = gen6_render_composite_box;
@@ -2784,8 +2837,6 @@ gen6_composite_alpha_gradient_init(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	channel->filter = PictFilterNearest;
-	channel->repeat = RepeatPad;
 	channel->is_affine = true;
 	channel->is_solid  = false;
 	channel->transform = NULL;
@@ -3100,13 +3151,11 @@ gen6_render_composite_spans(struct sna *sna,
 		break;
 	}
 
-	tmp->base.mask.bo = NULL;
-
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = false;
 	tmp->base.need_magic_ca_pass = false;
 
-	gen6_composite_alpha_gradient_init(sna, &tmp->base.mask);
+	if (!gen6_composite_alpha_gradient_init(sna, &tmp->base.mask))
+		goto cleanup_src;
 
 	tmp->prim_emit = gen6_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid) {
@@ -3125,13 +3174,16 @@ gen6_render_composite_spans(struct sna *sna,
 	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
-	tmp->base.u.gen6.wm_kernel =
-		gen6_choose_composite_kernel(tmp->base.op,
-					     true, false,
-					     tmp->base.is_affine);
-	tmp->base.u.gen6.nr_surfaces = 3;
-	tmp->base.u.gen6.nr_inputs = 2;
-	tmp->base.u.gen6.ve_id = 1 << 1 | tmp->base.is_affine;
+	tmp->base.u.gen6.flags =
+		GEN6_SET_FLAGS(SAMPLER_OFFSET(tmp->base.src.filter,
+					      tmp->base.src.repeat,
+					      SAMPLER_FILTER_NEAREST,
+					      SAMPLER_EXTEND_PAD),
+			       gen6_get_blend(tmp->base.op, false, tmp->base.dst.format),
+			       gen6_choose_composite_kernel(tmp->base.op,
+							    true, false,
+							    tmp->base.is_affine),
+			       1 << 1 | tmp->base.is_affine);
 
 	tmp->box   = gen6_render_composite_spans_box;
 	tmp->boxes = gen6_render_composite_spans_boxes;
@@ -3258,17 +3310,6 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	struct sna_composite_op tmp;
 
-#if NO_COPY_BOXES
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy_boxes(sna, alu,
-				  src_bo, src_dx, src_dy,
-				  dst_bo, dst_dx, dst_dy,
-				  dst->drawable.bitsPerPixel,
-				  box, n);
-#endif
-
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
@@ -3309,7 +3350,6 @@ fallback_blt:
 	if (!gen6_check_format(tmp.src.pict_format))
 		goto fallback_blt;
 
-	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3333,16 +3373,20 @@ fallback_blt:
 			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
+
 		if (!sna_render_composite_redirect(sna, &tmp,
 						   extents.x1 + dst_dx,
 						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
+
+		dst_dx += tmp.dst.x;
+		dst_dy += tmp.dst.y;
+
+		tmp.dst.x = tmp.dst.y = 0;
 	}
 
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_NONE;
 	tmp.src.card_format = gen6_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
 		BoxRec extents = box[0];
@@ -3368,34 +3412,30 @@ fallback_blt:
 			DBG(("%s: unable to extract partial pixmap\n", __FUNCTION__));
 			goto fallback_tiled_dst;
 		}
+
+		src_dx += tmp.src.offset[0];
+		src_dy += tmp.src.offset[1];
 	} else {
-		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.bo = src_bo;
 		tmp.src.width  = src->drawable.width;
 		tmp.src.height = src->drawable.height;
-		tmp.src.offset[0] = tmp.src.offset[1] = 0;
-		tmp.src.scale[0] = 1.f/src->drawable.width;
-		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = 0;
 
-	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	tmp.u.gen6.nr_surfaces = 2;
-	tmp.u.gen6.nr_inputs = 1;
-	tmp.u.gen6.ve_id = 1;
+	tmp.u.gen6.flags = COPY_FLAGS(alu);
+	assert(GEN6_KERNEL(tmp.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == COPY_SAMPLER);
+	assert(GEN6_VERTEX(tmp.u.gen6.flags) == COPY_VERTEX);
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
 		kgem_submit(&sna->kgem);
-		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
+		if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
 			DBG(("%s: too large for a single operation\n",
 			     __FUNCTION__));
 			goto fallback_tiled_src;
@@ -3403,52 +3443,47 @@ fallback_blt:
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	dst_dx += tmp.dst.x;
-	dst_dy += tmp.dst.y;
-	tmp.dst.x = tmp.dst.y = 0;
-
-	src_dx += tmp.src.offset[0];
-	src_dy += tmp.src.offset[1];
-
 	gen6_emit_copy_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
 	do {
-		float *v;
-		int n_this_time = gen6_get_rectangles(sna, &tmp, n,
-						      gen6_emit_copy_state);
+		int16_t *v;
+		int n_this_time;
+
+		n_this_time = gen6_get_rectangles(sna, &tmp, n,
+						  gen6_emit_copy_state);
 		n -= n_this_time;
 
-		v = sna->render.vertices + sna->render.vertex_used;
-		sna->render.vertex_used += 9 * n_this_time;
+		v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
+		sna->render.vertex_used += 6 * n_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 		do {
 
 			DBG(("	(%d, %d) -> (%d, %d) + (%d, %d)\n",
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			v[0] = pack_2s(box->x2 + dst_dx, box->y2 + dst_dy);
-			v[3] = pack_2s(box->x1 + dst_dx, box->y2 + dst_dy);
-			v[6] = pack_2s(box->x1 + dst_dx, box->y1 + dst_dy);
-
-			v[1] = (box->x2 + src_dx) * tmp.src.scale[0];
-			v[7] = v[4] = (box->x1 + src_dx) * tmp.src.scale[0];
-
-			v[5] = v[2] = (box->y2 + src_dy) * tmp.src.scale[1];
-			v[8] = (box->y1 + src_dy) * tmp.src.scale[1];
-
-			v += 9;
-			box++;
+			v[0] = box->x2 + dst_dx;
+			v[2] = box->x2 + src_dx;
+			v[1]  = v[5] = box->y2 + dst_dy;
+			v[3]  = v[7] = box->y2 + src_dy;
+			v[8]  = v[4] = box->x1 + dst_dx;
+			v[10] = v[6] = box->x1 + src_dx;
+			v[9]  = box->y1 + dst_dy;
+			v[11] = box->y1 + src_dy;
+			v += 12; box++;
 		} while (--n_this_time);
 	} while (n);
 
 	gen6_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
-	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	if (tmp.src.bo != src_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return true;
 
 fallback_tiled_src:
-	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	if (tmp.src.bo != src_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 fallback_tiled_dst:
 	if (tmp.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
@@ -3466,19 +3501,20 @@ gen6_render_copy_blt(struct sna *sna,
 		     int16_t w,  int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_copy_state);
+	int16_t *v;
 
-	OUT_VERTEX(dx+w, dy+h);
-	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_copy_state);
 
-	OUT_VERTEX(dx, dy+h);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(dx, dy);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F(sy*op->base.src.scale[1]);
+	v[0]  = dx+w; v[1]  = dy+h;
+	v[2]  = sx+w; v[3]  = sy+h;
+	v[4]  = dx;   v[5]  = dy+h;
+	v[6]  = sx;   v[7]  = sy+h;
+	v[8]  = dx;   v[9]  = dy;
+	v[10] = sx;   v[11] = sy;
 }
 
 static void
@@ -3496,16 +3532,6 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 struct sna_copy_op *op)
 {
-#if NO_COPY
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy(sna, alu,
-			    src_bo, dst_bo,
-			    dst->drawable.bitsPerPixel,
-			    op);
-#endif
-
 	DBG(("%s (alu=%d, src=(%dx%d), dst=(%dx%d))\n",
 	     __FUNCTION__, alu,
 	     src->drawable.width, src->drawable.height,
@@ -3541,8 +3567,6 @@ fallback:
 	if (!gen6_check_format(op->base.src.pict_format))
 		goto fallback;
 
-	op->base.op = PictOpSrc;
-
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
 	op->base.dst.height = dst->drawable.height;
@@ -3553,21 +3577,16 @@ fallback:
 		gen6_get_card_format(op->base.src.pict_format);
 	op->base.src.width  = src->drawable.width;
 	op->base.src.height = src->drawable.height;
-	op->base.src.scale[0] = 1.f/src->drawable.width;
-	op->base.src.scale[1] = 1.f/src->drawable.height;
-	op->base.src.filter = SAMPLER_FILTER_NEAREST;
-	op->base.src.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.mask.bo = NULL;
 
-	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 
-	op->base.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	op->base.u.gen6.nr_surfaces = 2;
-	op->base.u.gen6.nr_inputs = 1;
-	op->base.u.gen6.ve_id = 1;
+	op->base.u.gen6.flags = COPY_FLAGS(alu);
+	assert(GEN6_KERNEL(op->base.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(op->base.u.gen6.flags) == COPY_SAMPLER);
+	assert(GEN6_VERTEX(op->base.u.gen6.flags) == COPY_VERTEX);
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -3678,27 +3697,21 @@ gen6_render_fill_boxes(struct sna *sna,
 						     dst, dst_bo, box, n);
 	}
 
-#if NO_FILL_BOXES
-	return false;
-#endif
-
 	if (op == PictOpClear) {
 		pixel = 0;
 		op = PictOpSrc;
 	} else if (!sna_get_pixel_from_rgba(&pixel,
-				     color->red,
-				     color->green,
-				     color->blue,
-				     color->alpha,
-				     PICT_a8r8g8b8))
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    PICT_a8r8g8b8))
 		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
 	     box[0].x1, box[0].y1, box[0].x2, box[0].y2));
 
-	tmp.op = op;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3707,23 +3720,16 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = false;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	tmp.u.gen6.nr_surfaces = 2;
-	tmp.u.gen6.nr_inputs = 1;
-	tmp.u.gen6.ve_id = 1;
+	tmp.u.gen6.flags = FILL_FLAGS(op, format);
+	assert(GEN6_KERNEL(tmp.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
+	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -3734,25 +3740,27 @@ gen6_render_fill_boxes(struct sna *sna,
 	gen6_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen6_get_rectangles(sna, &tmp, n,
-						      gen6_emit_fill_state);
+		int n_this_time;
+		int16_t *v;
+
+		n_this_time = gen6_get_rectangles(sna, &tmp, n,
+						  gen6_emit_fill_state);
 		n -= n_this_time;
+
+		v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
+		sna->render.vertex_used += 6 * n_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 		do {
 			DBG(("	(%d, %d), (%d, %d)\n",
 			     box->x1, box->y1, box->x2, box->y2));
-			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
-
-			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
 
-			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
-
-			box++;
+			v[0] = box->x2;
+			v[5] = v[1] = box->y2;
+			v[8] = v[4] = box->x1;
+			v[9] = box->y1;
+			v[2] = v[3]  = v[7]  = 1;
+			v[6] = v[10] = v[11] = 0;
+			v += 12; box++;
 		} while (--n_this_time);
 	} while (n);
 
@@ -3766,21 +3774,23 @@ gen6_render_op_fill_blt(struct sna *sna,
 			const struct sna_fill_op *op,
 			int16_t x, int16_t y, int16_t w, int16_t h)
 {
+	int16_t *v;
+
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_fill_state);
 
-	OUT_VERTEX(x+w, y+h);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(x, y+h);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = x+w;
+	v[4] = v[8] = x;
+	v[1] = v[5] = y+h;
+	v[9] = y;
 
-	OUT_VERTEX(x, y);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[2] = v[3]  = v[7]  = 1;
+	v[6] = v[10] = v[11] = 0;
 }
 
 fastcall static void
@@ -3788,22 +3798,24 @@ gen6_render_op_fill_box(struct sna *sna,
 			const struct sna_fill_op *op,
 			const BoxRec *box)
 {
+	int16_t *v;
+
 	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_fill_state);
 
-	OUT_VERTEX(box->x2, box->y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(box->x1, box->y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = box->x2;
+	v[8] = v[4] = box->x1;
+	v[5] = v[1] = box->y2;
+	v[9] = box->y1;
 
-	OUT_VERTEX(box->x1, box->y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 }
 
 fastcall static void
@@ -3817,24 +3829,24 @@ gen6_render_op_fill_boxes(struct sna *sna,
 
 	do {
 		int nbox_this_time;
+		int16_t *v;
 
 		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox,
 						     gen6_emit_fill_state);
 		nbox -= nbox_this_time;
 
-		do {
-			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
-
-			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
+		v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+		sna->render.vertex_used += 6 * nbox_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
-			box++;
+		do {
+			v[0] = box->x2;
+			v[8] = v[4] = box->x1;
+			v[5] = v[1] = box->y2;
+			v[9] = box->y1;
+			v[7] = v[2]  = v[3]  = 1;
+			v[6] = v[10] = v[11] = 0;
+			box++; v += 12;
 		} while (--nbox_this_time);
 	} while (nbox);
 }
@@ -3857,13 +3869,6 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 {
 	DBG(("%s: (alu=%d, color=%x)\n", __FUNCTION__, alu, color));
 
-#if NO_FILL
-	return sna_blt_fill(sna, alu,
-			    dst_bo, dst->drawable.bitsPerPixel,
-			    color,
-			    op);
-#endif
-
 	if (prefer_blt_fill(sna, dst_bo) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
@@ -3881,8 +3886,6 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	if (alu == GXclear)
 		color = 0;
 
-	op->base.op = PictOpSrc;
-
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
 	op->base.dst.height = dst->drawable.height;
@@ -3894,23 +3897,16 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 		sna_render_get_solid(sna,
 				     sna_rgba_for_color(color,
 							dst->drawable.depth));
-	op->base.src.filter = SAMPLER_FILTER_NEAREST;
-	op->base.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	op->base.mask.bo = NULL;
-	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
-	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = true;
-	op->base.has_component_alpha = false;
 	op->base.need_magic_ca_pass = false;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 
-	op->base.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	op->base.u.gen6.nr_surfaces = 2;
-	op->base.u.gen6.nr_inputs = 1;
-	op->base.u.gen6.ve_id = 1;
+	op->base.u.gen6.flags = FILL_FLAGS_NOBLEND;
+	assert(GEN6_KERNEL(op->base.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(op->base.u.gen6.flags) == FILL_SAMPLER);
+	assert(GEN6_VERTEX(op->base.u.gen6.flags) == FILL_VERTEX);
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -3953,11 +3949,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint8_t alu)
 {
 	struct sna_composite_op tmp;
-
-#if NO_FILL_BOXES
-	return gen6_render_fill_one_try_blt(sna, dst, bo, color,
-					    x1, y1, x2, y2, alu);
-#endif
+	int16_t *v;
 
 	/* Prefer to use the BLT if already engaged */
 	if (prefer_blt_fill(sna, bo) &&
@@ -3974,8 +3966,6 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (alu == GXclear)
 		color = 0;
 
-	tmp.op = PictOpSrc;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3987,23 +3977,16 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		sna_render_get_solid(sna,
 				     sna_rgba_for_color(color,
 							dst->drawable.depth));
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	tmp.u.gen6.nr_surfaces = 2;
-	tmp.u.gen6.nr_inputs = 1;
-	tmp.u.gen6.ve_id = 1;
+	tmp.u.gen6.flags = FILL_FLAGS_NOBLEND;
+	assert(GEN6_KERNEL(tmp.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
+	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -4016,17 +3999,17 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
-	OUT_VERTEX(x2, y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
 
-	OUT_VERTEX(x1, y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(x1, y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[0] = x2;
+	v[8] = v[4] = x1;
+	v[5] = v[1] = y2;
+	v[9] = y1;
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -4053,10 +4036,7 @@ static bool
 gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
-
-#if NO_CLEAR
-	return gen6_render_clear_try_blt(sna, dst, bo);
-#endif
+	int16_t *v;
 
 	DBG(("%s: %dx%d\n",
 	     __FUNCTION__,
@@ -4072,8 +4052,6 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	if (too_large(dst->drawable.width, dst->drawable.height))
 		return gen6_render_clear_try_blt(sna, dst, bo);
 
-	tmp.op = PictOpSrc;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -4082,23 +4060,16 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, 0);
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
-	tmp.u.gen6.nr_surfaces = 2;
-	tmp.u.gen6.nr_inputs = 1;
-	tmp.u.gen6.ve_id = 1;
+	tmp.u.gen6.flags = FILL_FLAGS_NOBLEND;
+	assert(GEN6_KERNEL(tmp.u.gen6.flags) == GEN6_WM_KERNEL_NOMASK);
+	assert(GEN6_SAMPLER(tmp.u.gen6.flags) == FILL_SAMPLER);
+	assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -4110,17 +4081,17 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 	gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
 
-	OUT_VERTEX(dst->drawable.width, dst->drawable.height);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(0, dst->drawable.height);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = dst->drawable.width;
+	v[5] = v[1] = dst->drawable.height;
+	v[8] = v[4] = 0;
+	v[9] = 0;
 
-	OUT_VERTEX(0, 0);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -4227,10 +4198,13 @@ static bool gen6_render_setup(struct sna *sna)
 
 	ss = sna_static_stream_map(&general,
 				   2 * sizeof(*ss) *
-				   FILTER_COUNT * EXTEND_COUNT *
-				   FILTER_COUNT * EXTEND_COUNT,
+				   (2 +
+				    FILTER_COUNT * EXTEND_COUNT *
+				    FILTER_COUNT * EXTEND_COUNT),
 				   32);
 	state->wm_state = sna_static_stream_offsetof(&general, ss);
+	sampler_copy_init(ss); ss += 2;
+	sampler_fill_init(ss); ss += 2;
 	for (i = 0; i < FILTER_COUNT; i++) {
 		for (j = 0; j < EXTEND_COUNT; j++) {
 			for (k = 0; k < FILTER_COUNT; k++) {
@@ -4258,19 +4232,33 @@ bool gen6_render_init(struct sna *sna)
 	sna->kgem.retire = gen6_render_retire;
 	sna->kgem.expire = gen6_render_expire;
 
+#if !NO_COMPOSITE
 	sna->render.composite = gen6_render_composite;
+#endif
 #if !NO_COMPOSITE_SPANS
 	sna->render.composite_spans = gen6_render_composite_spans;
 #endif
 	sna->render.video = gen6_render_video;
 
+#if !NO_COPY_BOXES
 	sna->render.copy_boxes = gen6_render_copy_boxes;
+#endif
+#if !NO_COPY
 	sna->render.copy = gen6_render_copy;
+#endif
 
+#if !NO_FILL_BOXES
 	sna->render.fill_boxes = gen6_render_fill_boxes;
+#endif
+#if !NO_FILL
 	sna->render.fill = gen6_render_fill;
+#endif
+#if !NO_FILL_ONE
 	sna->render.fill_one = gen6_render_fill_one;
+#endif
+#if !NO_FILL_CLEAR
 	sna->render.clear = gen6_render_clear;
+#endif
 
 	sna->render.flush = gen6_render_flush;
 	sna->render.reset = gen6_render_reset;
diff --git a/src/sna/gen6_render.h b/src/sna/gen6_render.h
index eded2b7..b0331ec 100644
--- a/src/sna/gen6_render.h
+++ b/src/sna/gen6_render.h
@@ -1350,7 +1350,9 @@ struct gen6_sampler_state {
    } ss2;
 
    struct {
-      uint32_t pad:19;
+      uint32_t non_normalized_coord:1;
+      uint32_t pad:12;
+      uint32_t address_round:6;
       uint32_t max_aniso:3;
       uint32_t chroma_key_mode:1;
       uint32_t chroma_key_index:2;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 15d882f..41cb02b 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -126,10 +126,7 @@ struct sna_composite_op {
 		} gen5;
 
 		struct {
-			int wm_kernel;
-			int nr_surfaces;
-			int nr_inputs;
-			int ve_id;
+			uint32_t flags;
 		} gen6;
 
 		struct {
commit 4eea9ac0035dd72f3c637adc39eeaeda46472e9e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 12:59:41 2012 +0100

    sna/gen7: Micro-optimise render copy emission
    
    The goal is bring the overhead down to that of using the blitter. Tricky
    given the number of steps to using the 3D pipeline compared to the
    BLT...
    
    A stretch goal would be to make IVB GPU bound for -copywinpix10!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index f8036fb..cb497b9 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -48,7 +48,8 @@
 #define NO_COPY_BOXES 0
 #define NO_FILL 0
 #define NO_FILL_BOXES 0
-#define NO_CLEAR 0
+#define NO_FILL_ONE 0
+#define NO_FILL_CLEAR 0
 
 #define NO_RING_SWITCH 0
 
@@ -167,28 +168,28 @@ static const uint32_t ps_kernel_planar[][4] = {
 #include "exa_wm_write.g7b"
 };
 
-#define KERNEL(kernel_enum, kernel, masked) \
-    [GEN7_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), masked}
+#define KERNEL(kernel_enum, kernel, num_surfaces) \
+    [GEN7_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), num_surfaces}
 static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	bool has_mask;
+	int num_surfaces;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, 2),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, 2),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, true),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
+	KERNEL(MASK, ps_kernel_masknoca_affine, 3),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, 3),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, 3),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, 3),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, 3),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, 3),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, 7),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, 2),
 };
 #undef KERNEL
 
@@ -226,21 +227,32 @@ static const struct blendinfo {
 #define BLEND_OFFSET(s, d) \
 	(((s) * GEN7_BLENDFACTOR_COUNT + (d)) * GEN7_BLEND_STATE_PADDED_SIZE)
 
+#define NO_BLEND BLEND_OFFSET(GEN7_BLENDFACTOR_ONE, GEN7_BLENDFACTOR_ZERO)
+#define CLEAR BLEND_OFFSET(GEN7_BLENDFACTOR_ZERO, GEN7_BLENDFACTOR_ZERO)
+
 #define SAMPLER_OFFSET(sf, se, mf, me) \
-	(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * 2 * sizeof(struct gen7_sampler_state))
+	((((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) + 2) * 2 * sizeof(struct gen7_sampler_state))
 
-#define FILL_SAMPLER \
-	SAMPLER_OFFSET(SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_REPEAT, \
-		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+#define VERTEX_2s2s 4
 
-#define COPY_SAMPLER \
-	SAMPLER_OFFSET(SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE, \
-		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+#define COPY_SAMPLER 0
+#define COPY_VERTEX VERTEX_2s2s
+#define COPY_FLAGS(a) GEN7_SET_FLAGS(COPY_SAMPLER, (a) == GXcopy ? NO_BLEND : CLEAR, GEN7_WM_KERNEL_NOMASK, COPY_VERTEX)
+
+#define FILL_SAMPLER (2 * sizeof(struct gen7_sampler_state))
+#define FILL_VERTEX VERTEX_2s2s
+#define FILL_FLAGS(op, format) GEN7_SET_FLAGS(FILL_SAMPLER, gen7_get_blend((op), false, (format)), GEN7_WM_KERNEL_NOMASK, FILL_VERTEX)
+#define FILL_FLAGS_NOBLEND GEN7_SET_FLAGS(FILL_SAMPLER, NO_BLEND, GEN7_WM_KERNEL_NOMASK, FILL_VERTEX)
 
 #define VIDEO_SAMPLER \
 	SAMPLER_OFFSET(SAMPLER_FILTER_BILINEAR, SAMPLER_EXTEND_PAD, \
 		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
 
+#define GEN7_SAMPLER(f) (((f) >> 16) & 0xfff0)
+#define GEN7_BLEND(f) (((f) >> 0) & 0xfff0)
+#define GEN7_KERNEL(f) (((f) >> 16) & 0xf)
+#define GEN7_VERTEX(f) (((f) >> 0) & 0xf)
+#define GEN7_SET_FLAGS(S, B, K, V)  (((S) | (K)) << 16 | ((B) | (V)))
 
 #define OUT_BATCH(v) batch_emit(sna, v)
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
@@ -748,6 +760,8 @@ gen7_emit_cc(struct sna *sna, uint32_t blend_offset)
 	if (render->blend == blend_offset)
 		return;
 
+	DBG(("%s: blend = %x\n", __FUNCTION__, blend_offset));
+
 	/* XXX can have upto 8 blend states preload, selectable via
 	 * Render Target Index. What other side-effects of Render Target Index?
 	 */
@@ -762,16 +776,13 @@ gen7_emit_cc(struct sna *sna, uint32_t blend_offset)
 static void
 gen7_emit_sampler(struct sna *sna, uint32_t state)
 {
-	assert(state <
-	       2 * sizeof(struct gen7_sampler_state) *
-	       FILTER_COUNT * EXTEND_COUNT *
-	       FILTER_COUNT * EXTEND_COUNT);
-
 	if (sna->render_state.gen7.samplers == state)
 		return;
 
 	sna->render_state.gen7.samplers = state;
 
+	DBG(("%s: sampler = %x\n", __FUNCTION__, state));
+
 	assert (is_aligned(sna->render_state.gen7.wm_state + state, 32));
 	OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
 	OUT_BATCH(sna->render_state.gen7.wm_state + state);
@@ -809,19 +820,22 @@ gen7_emit_sf(struct sna *sna, bool has_mask)
 }
 
 static void
-gen7_emit_wm(struct sna *sna, unsigned int kernel, int nr_surfaces, int nr_inputs)
+gen7_emit_wm(struct sna *sna, int kernel)
 {
 	if (sna->render_state.gen7.kernel == kernel)
 		return;
 
 	sna->render_state.gen7.kernel = kernel;
 
-	DBG(("%s: switching to %s\n", __FUNCTION__, wm_kernels[kernel].name));
+	DBG(("%s: switching to %s, num_surfaces=%d\n",
+	     __FUNCTION__,
+	     wm_kernels[kernel].name,
+	     wm_kernels[kernel].num_surfaces));
 
 	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
 	OUT_BATCH(sna->render_state.gen7.wm_kernel[kernel]);
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
-		  nr_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
+		  wm_kernels[kernel].num_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
 	OUT_BATCH((sna->render_state.gen7.info->max_wm_threads - 1) << GEN7_PS_MAX_THREADS_SHIFT |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
@@ -881,22 +895,58 @@ gen7_emit_vertex_elements(struct sna *sna,
 	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen7_render_state *render = &sna->render_state.gen7;
-	int nelem = op->mask.bo ? 2 : 1;
-	int selem = op->is_affine ? 2 : 3;
+	int nelem, selem;
 	uint32_t w_component;
 	uint32_t src_format;
-	int id = op->u.gen7.ve_id;
+	int id = GEN7_VERTEX(op->u.gen7.flags);
 
 	if (render->ve_id == id)
 		return;
 	render->ve_id = id;
 
+	switch (id) {
+	case VERTEX_2s2s:
+		DBG(("%s: setup COPY\n", __FUNCTION__));
+
+		OUT_BATCH(GEN7_3DSTATE_VERTEX_ELEMENTS |
+			  ((2 * (1 + 2)) + 1 - 2));
+
+		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+			  GEN7_SURFACEFORMAT_R32G32B32A32_FLOAT << GEN7_VE0_FORMAT_SHIFT |
+			  0 << GEN7_VE0_OFFSET_SHIFT);
+		OUT_BATCH(GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_0_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_1_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+
+		/* x,y */
+		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+			  GEN7_SURFACEFORMAT_R16G16_SSCALED << GEN7_VE0_FORMAT_SHIFT |
+			  0 << GEN7_VE0_OFFSET_SHIFT); /* offsets vb in bytes */
+		OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+
+		OUT_BATCH(id << GEN7_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN7_VE0_VALID |
+			  GEN7_SURFACEFORMAT_R16G16_SSCALED << GEN7_VE0_FORMAT_SHIFT |
+			  4 << GEN7_VE0_OFFSET_SHIFT);	/* offset vb in bytes */
+		OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
+			  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
+		return;
+	}
+
+	nelem = op->mask.bo ? 2 : 1;
 	if (op->is_affine) {
 		src_format = GEN7_SURFACEFORMAT_R32G32_FLOAT;
 		w_component = GEN7_VFCOMPONENT_STORE_0;
+		selem = 2;
 	} else {
 		src_format = GEN7_SURFACEFORMAT_R32G32B32_FLOAT;
 		w_component = GEN7_VFCOMPONENT_STORE_SRC;
+		selem = 3;
 	}
 
 	/* The VUE layout
@@ -990,17 +1040,10 @@ gen7_emit_state(struct sna *sna,
 	if (sna->render_state.gen7.emit_flush)
 		gen7_emit_pipe_flush(sna);
 
-	gen7_emit_cc(sna,
-		     gen7_get_blend(op->op,
-				    op->has_component_alpha,
-				    op->dst.format));
-
-	gen7_emit_sampler(sna, op->u.gen7.sampler);
+	gen7_emit_cc(sna, GEN7_BLEND(op->u.gen7.flags));
+	gen7_emit_sampler(sna, GEN7_SAMPLER(op->u.gen7.flags));
 	gen7_emit_sf(sna, op->mask.bo != NULL);
-	gen7_emit_wm(sna,
-		     op->u.gen7.wm_kernel,
-		     op->u.gen7.nr_surfaces,
-		     op->u.gen7.nr_inputs);
+	gen7_emit_wm(sna, GEN7_KERNEL(op->u.gen7.flags));
 	gen7_emit_vertex_elements(sna, op);
 
 	need_stall |= gen7_emit_binding_table(sna, wm_binding_table);
@@ -1015,7 +1058,8 @@ gen7_emit_state(struct sna *sna,
 	if (need_stall)
 		gen7_emit_pipe_stall(sna);
 
-	sna->render_state.gen7.emit_flush = op->op > PictOpSrc;
+	sna->render_state.gen7.emit_flush =
+		GEN7_BLEND(op->u.gen7.flags) != NO_BLEND;
 }
 
 static void gen7_magic_ca_pass(struct sna *sna,
@@ -1035,8 +1079,7 @@ static void gen7_magic_ca_pass(struct sna *sna,
 	gen7_emit_wm(sna,
 		     gen7_choose_composite_kernel(PictOpAdd,
 						  true, true,
-						  op->is_affine),
-		     3, 2);
+						  op->is_affine));
 
 	OUT_BATCH(GEN7_3DPRIMITIVE | (7- 2));
 	OUT_BATCH(GEN7_3DPRIMITIVE_VERTEX_SEQUENTIAL | _3DPRIM_RECTLIST);
@@ -1256,6 +1299,24 @@ sampler_state_init(struct gen7_sampler_state *sampler_state,
 	}
 }
 
+static void
+sampler_copy_init(struct gen7_sampler_state *ss)
+{
+	sampler_state_init(ss, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+	ss->ss3.non_normalized_coord = 1;
+
+	sampler_state_init(ss+1, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+}
+
+static void
+sampler_fill_init(struct gen7_sampler_state *ss)
+{
+	sampler_state_init(ss, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_REPEAT);
+	ss->ss3.non_normalized_coord = 1;
+
+	sampler_state_init(ss+1, SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE);
+}
+
 static uint32_t gen7_create_cc_viewport(struct sna_static_stream *stream)
 {
 	struct gen7_cc_viewport vp;
@@ -1576,7 +1637,7 @@ gen7_emit_composite_primitive(struct sna *sna,
 static void gen7_emit_vertex_buffer(struct sna *sna,
 				    const struct sna_composite_op *op)
 {
-	int id = op->u.gen7.ve_id;
+	int id = GEN7_VERTEX(op->u.gen7.flags);
 
 	OUT_BATCH(GEN7_3DSTATE_VERTEX_BUFFERS | (5 - 2));
 	OUT_BATCH(id << GEN7_VB0_BUFFER_INDEX_SHIFT |
@@ -1614,7 +1675,7 @@ static void gen7_emit_primitive(struct sna *sna)
 static bool gen7_rectangle_begin(struct sna *sna,
 				 const struct sna_composite_op *op)
 {
-	int id = 1 << op->u.gen7.ve_id;
+	int id = 1 << GEN7_VERTEX(op->u.gen7.flags);
 	int ndwords;
 
 	ndwords = op->need_magic_ca_pass ? 60 : 6;
@@ -2010,7 +2071,6 @@ gen7_render_video(struct sna *sna,
 
 	memset(&tmp, 0, sizeof(tmp));
 
-	tmp.op = PictOpSrc;
 	tmp.dst.pixmap = pixmap;
 	tmp.dst.width  = pixmap->drawable.width;
 	tmp.dst.height = pixmap->drawable.height;
@@ -2020,20 +2080,15 @@ gen7_render_video(struct sna *sna,
 	tmp.src.bo = frame->bo;
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
-	if (is_planar_fourcc(frame->id)) {
-		tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_VIDEO_PLANAR;
-		tmp.u.gen7.nr_surfaces = 7;
-	} else {
-		tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_VIDEO_PACKED;
-		tmp.u.gen7.nr_surfaces = 2;
-	}
-	tmp.u.gen7.nr_inputs = 1;
-	tmp.u.gen7.ve_id = 1;
-	tmp.u.gen7.sampler = VIDEO_SAMPLER;
+	tmp.u.gen7.flags =
+		GEN7_SET_FLAGS(VIDEO_SAMPLER, NO_BLEND,
+			       is_planar_fourcc(frame->id) ?
+			       GEN7_WM_KERNEL_VIDEO_PLANAR :
+			       GEN7_WM_KERNEL_VIDEO_PACKED,
+			       1);
 	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
@@ -2663,17 +2718,6 @@ gen7_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen7_blend_op))
 		return false;
 
-#if NO_COMPOSITE
-	if (mask)
-		return false;
-
-	return sna_blt_composite(sna, op,
-				 src, dst,
-				 src_x, src_y,
-				 dst_x, dst_y,
-				 width, height, tmp);
-#endif
-
 	DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__,
 	     width, height, sna->kgem.ring));
 
@@ -2820,18 +2864,19 @@ gen7_render_composite(struct sna *sna,
 	}
 	tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
 
-	tmp->u.gen7.wm_kernel =
-		gen7_choose_composite_kernel(tmp->op,
-					     tmp->mask.bo != NULL,
-					     tmp->has_component_alpha,
-					     tmp->is_affine);
-	tmp->u.gen7.nr_surfaces = 2 + (tmp->mask.bo != NULL);
-	tmp->u.gen7.nr_inputs = 1 + (tmp->mask.bo != NULL);
-	tmp->u.gen7.ve_id = gen7_choose_composite_vertex_buffer(tmp);
-	tmp->u.gen7.sampler = SAMPLER_OFFSET(tmp->src.filter,
-					     tmp->src.repeat,
-					     tmp->mask.filter,
-					     tmp->mask.repeat);
+	tmp->u.gen7.flags =
+		GEN7_SET_FLAGS(SAMPLER_OFFSET(tmp->src.filter,
+					      tmp->src.repeat,
+					      tmp->mask.filter,
+					      tmp->mask.repeat),
+			       gen7_get_blend(tmp->op,
+					      tmp->has_component_alpha,
+					      tmp->dst.format),
+			       gen7_choose_composite_kernel(tmp->op,
+							    tmp->mask.bo != NULL,
+							    tmp->has_component_alpha,
+							    tmp->is_affine),
+			       gen7_choose_composite_vertex_buffer(tmp));
 
 	tmp->blt   = gen7_render_composite_blt;
 	tmp->box   = gen7_render_composite_box;
@@ -3186,7 +3231,6 @@ gen7_render_composite_spans(struct sna *sna,
 	}
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = false;
 	tmp->base.need_magic_ca_pass = false;
 
 	if (!gen7_composite_alpha_gradient_init(sna, &tmp->base.mask))
@@ -3209,17 +3253,16 @@ gen7_render_composite_spans(struct sna *sna,
 	tmp->base.floats_per_vertex = 5 + 2*!tmp->base.is_affine;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
-	tmp->base.u.gen7.wm_kernel =
-		gen7_choose_composite_kernel(tmp->base.op,
-					     true, false,
-					     tmp->base.is_affine);
-	tmp->base.u.gen7.nr_surfaces = 3;
-	tmp->base.u.gen7.nr_inputs = 2;
-	tmp->base.u.gen7.ve_id = 1 << 1 | tmp->base.is_affine;
-	tmp->base.u.gen7.sampler = SAMPLER_OFFSET(tmp->base.src.filter,
-						  tmp->base.src.repeat,
-						  SAMPLER_FILTER_NEAREST,
-						  SAMPLER_EXTEND_PAD);
+	tmp->base.u.gen7.flags =
+		GEN7_SET_FLAGS(SAMPLER_OFFSET(tmp->base.src.filter,
+					      tmp->base.src.repeat,
+					      SAMPLER_FILTER_NEAREST,
+					      SAMPLER_EXTEND_PAD),
+			       gen7_get_blend(tmp->base.op, false, tmp->base.dst.format),
+			       gen7_choose_composite_kernel(tmp->base.op,
+							    true, false,
+							    tmp->base.is_affine),
+			       1 << 1 | tmp->base.is_affine);
 
 	tmp->box   = gen7_render_composite_spans_box;
 	tmp->boxes = gen7_render_composite_spans_boxes;
@@ -3344,17 +3387,6 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	struct sna_composite_op tmp;
 
-#if NO_COPY_BOXES
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy_boxes(sna, alu,
-				  src_bo, src_dx, src_dy,
-				  dst_bo, dst_dx, dst_dy,
-				  dst->drawable.bitsPerPixel,
-				  box, n);
-#endif
-
 	DBG(("%s (%d, %d)->(%d, %d) x %d, alu=%x, self-copy=%d, overlaps? %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n, alu,
 	     src_bo == dst_bo,
@@ -3420,8 +3452,6 @@ fallback_blt:
 	if (!gen7_check_format(tmp.src.pict_format))
 		goto fallback_blt;
 
-	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3452,6 +3482,11 @@ fallback_blt:
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
+
+		dst_dx += tmp.dst.x;
+		dst_dy += tmp.dst.y;
+
+		tmp.dst.x = tmp.dst.y = 0;
 	}
 
 	tmp.src.card_format = gen7_get_card_format(tmp.src.pict_format);
@@ -3477,85 +3512,72 @@ fallback_blt:
 					       extents.x2 - extents.x1,
 					       extents.y2 - extents.y1))
 			goto fallback_tiled_dst;
+
+		src_dx += tmp.src.offset[0];
+		src_dy += tmp.src.offset[1];
 	} else {
-		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.bo = src_bo;
 		tmp.src.width  = src->drawable.width;
 		tmp.src.height = src->drawable.height;
-		tmp.src.offset[0] = tmp.src.offset[1] = 0;
-		tmp.src.scale[0] = 1.f/src->drawable.width;
-		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = 0;
 
-	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	tmp.u.gen7.nr_surfaces = 2;
-	tmp.u.gen7.nr_inputs = 1;
-	tmp.u.gen7.ve_id = 1;
-	tmp.u.gen7.sampler = COPY_SAMPLER;
+	tmp.u.gen7.flags = COPY_FLAGS(alu);
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL)) {
 		kgem_submit(&sna->kgem);
-		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+		if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, tmp.src.bo, NULL))
 			goto fallback_tiled_src;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	dst_dx += tmp.dst.x;
-	dst_dy += tmp.dst.y;
-	tmp.dst.x = tmp.dst.y = 0;
-
-	src_dx += tmp.src.offset[0];
-	src_dy += tmp.src.offset[1];
-
 	gen7_emit_copy_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
 	do {
-		float *v;
+		int16_t *v;
 		int n_this_time;
 
 		n_this_time = gen7_get_rectangles(sna, &tmp, n,
 						  gen7_emit_copy_state);
 		n -= n_this_time;
 
-		v = sna->render.vertices + sna->render.vertex_used;
-		sna->render.vertex_used += 9 * n_this_time;
+		v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
+		sna->render.vertex_used += 6 * n_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 		do {
 
 			DBG(("	(%d, %d) -> (%d, %d) + (%d, %d)\n",
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			v[0] = pack_2s(box->x2 + dst_dx, box->y2 + dst_dy);
-			v[3] = pack_2s(box->x1 + dst_dx, box->y2 + dst_dy);
-			v[6] = pack_2s(box->x1 + dst_dx, box->y1 + dst_dy);
-
-			v[1] = (box->x2 + src_dx) * tmp.src.scale[0];
-			v[7] = v[4] = (box->x1 + src_dx) * tmp.src.scale[0];
-
-			v[5] = v[2] = (box->y2 + src_dy) * tmp.src.scale[1];
-			v[8] = (box->y1 + src_dy) * tmp.src.scale[1];
-
-			v += 9;
-			box++;
+			v[0] = box->x2 + dst_dx;
+			v[2] = box->x2 + src_dx;
+			v[1]  = v[5] = box->y2 + dst_dy;
+			v[3]  = v[7] = box->y2 + src_dy;
+			v[8]  = v[4] = box->x1 + dst_dx;
+			v[10] = v[6] = box->x1 + src_dx;
+			v[9]  = box->y1 + dst_dy;
+			v[11] = box->y1 + src_dy;
+			v += 12; box++;
 		} while (--n_this_time);
 	} while (n);
 
 	gen7_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
-	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	if (tmp.src.bo != src_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return true;
 
 fallback_tiled_src:
-	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+	if (tmp.src.bo != src_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 fallback_tiled_dst:
 	if (tmp.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
@@ -3573,19 +3595,20 @@ gen7_render_copy_blt(struct sna *sna,
 		     int16_t w,  int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_copy_state);
+	int16_t *v;
 
-	OUT_VERTEX(dx+w, dy+h);
-	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_copy_state);
 
-	OUT_VERTEX(dx, dy+h);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F((sy+h)*op->base.src.scale[1]);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(dx, dy);
-	OUT_VERTEX_F(sx*op->base.src.scale[0]);
-	OUT_VERTEX_F(sy*op->base.src.scale[1]);
+	v[0]  = dx+w; v[1]  = dy+h;
+	v[2]  = sx+w; v[3]  = sy+h;
+	v[4]  = dx;   v[5]  = dy+h;
+	v[6]  = sx;   v[7]  = sy+h;
+	v[8]  = dx;   v[9]  = dy;
+	v[10] = sx;   v[11] = sy;
 }
 
 static void
@@ -3601,16 +3624,6 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 struct sna_copy_op *op)
 {
-#if NO_COPY
-	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return false;
-
-	return sna_blt_copy(sna, alu,
-			    src_bo, dst_bo,
-			    dst->drawable.bitsPerPixel,
-			    op);
-#endif
-
 	DBG(("%s (alu=%d, src=(%dx%d), dst=(%dx%d))\n",
 	     __FUNCTION__, alu,
 	     src->drawable.width, src->drawable.height,
@@ -3646,8 +3659,6 @@ fallback:
 	if (!gen7_check_format(op->base.src.pict_format))
 		goto fallback;
 
-	op->base.op = PictOpSrc;
-
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
 	op->base.dst.height = dst->drawable.height;
@@ -3658,20 +3669,13 @@ fallback:
 		gen7_get_card_format(op->base.src.pict_format);
 	op->base.src.width  = src->drawable.width;
 	op->base.src.height = src->drawable.height;
-	op->base.src.scale[0] = 1.f/src->drawable.width;
-	op->base.src.scale[1] = 1.f/src->drawable.height;
 
 	op->base.mask.bo = NULL;
 
-	op->base.is_affine = true;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 
-	op->base.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	op->base.u.gen7.nr_surfaces = 2;
-	op->base.u.gen7.nr_inputs = 1;
-	op->base.u.gen7.ve_id = 1;
-	op->base.u.gen7.sampler = COPY_SAMPLER;
+	op->base.u.gen7.flags = COPY_FLAGS(alu);
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -3784,27 +3788,21 @@ gen7_render_fill_boxes(struct sna *sna,
 						     dst, dst_bo, box, n);
 	}
 
-#if NO_FILL_BOXES
-	return false;
-#endif
-
 	if (op == PictOpClear) {
 		pixel = 0;
 		op = PictOpSrc;
 	} else if (!sna_get_pixel_from_rgba(&pixel,
-				     color->red,
-				     color->green,
-				     color->blue,
-				     color->alpha,
-				     PICT_a8r8g8b8))
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    PICT_a8r8g8b8))
 		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
 	     box[0].x1, box[0].y1, box[0].x2, box[0].y2));
 
-	tmp.op = op;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3815,17 +3813,11 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = false;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	tmp.u.gen7.nr_surfaces = 2;
-	tmp.u.gen7.nr_inputs = 1;
-	tmp.u.gen7.ve_id = 1;
-	tmp.u.gen7.sampler = FILL_SAMPLER;
+	tmp.u.gen7.flags = FILL_FLAGS(op, format);
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -3837,27 +3829,26 @@ gen7_render_fill_boxes(struct sna *sna,
 
 	do {
 		int n_this_time;
+		int16_t *v;
 
 		n_this_time = gen7_get_rectangles(sna, &tmp, n,
 						  gen7_emit_fill_state);
 		n -= n_this_time;
 
+		v = (int16_t *)(sna->render.vertices + sna->render.vertex_used);
+		sna->render.vertex_used += 6 * n_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 		do {
 			DBG(("	(%d, %d), (%d, %d)\n",
 			     box->x1, box->y1, box->x2, box->y2));
-			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
-
-			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
 
-			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
-
-			box++;
+			v[0] = box->x2;
+			v[5] = v[1] = box->y2;
+			v[8] = v[4] = box->x1;
+			v[9] = box->y1;
+			v[2] = v[3]  = v[7]  = 1;
+			v[6] = v[10] = v[11] = 0;
+			v += 12; box++;
 		} while (--n_this_time);
 	} while (n);
 
@@ -3871,21 +3862,23 @@ gen7_render_fill_op_blt(struct sna *sna,
 			const struct sna_fill_op *op,
 			int16_t x, int16_t y, int16_t w, int16_t h)
 {
+	int16_t *v;
+
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
 
-	OUT_VERTEX(x+w, y+h);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(x, y+h);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = x+w;
+	v[4] = v[8] = x;
+	v[1] = v[5] = y+h;
+	v[9] = y;
 
-	OUT_VERTEX(x, y);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[2] = v[3]  = v[7]  = 1;
+	v[6] = v[10] = v[11] = 0;
 }
 
 fastcall static void
@@ -3893,22 +3886,24 @@ gen7_render_fill_op_box(struct sna *sna,
 			const struct sna_fill_op *op,
 			const BoxRec *box)
 {
+	int16_t *v;
+
 	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
 
-	OUT_VERTEX(box->x2, box->y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(box->x1, box->y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = box->x2;
+	v[8] = v[4] = box->x1;
+	v[5] = v[1] = box->y2;
+	v[9] = box->y1;
 
-	OUT_VERTEX(box->x1, box->y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 }
 
 fastcall static void
@@ -3922,24 +3917,24 @@ gen7_render_fill_op_boxes(struct sna *sna,
 
 	do {
 		int nbox_this_time;
+		int16_t *v;
 
 		nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
 						     gen7_emit_fill_state);
 		nbox -= nbox_this_time;
 
-		do {
-			OUT_VERTEX(box->x2, box->y2);
-			OUT_VERTEX_F(1);
-			OUT_VERTEX_F(1);
-
-			OUT_VERTEX(box->x1, box->y2);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(1);
+		v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+		sna->render.vertex_used += 6 * nbox_this_time;
+		assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-			OUT_VERTEX(box->x1, box->y1);
-			OUT_VERTEX_F(0);
-			OUT_VERTEX_F(0);
-			box++;
+		do {
+			v[0] = box->x2;
+			v[8] = v[4] = box->x1;
+			v[5] = v[1] = box->y2;
+			v[9] = box->y1;
+			v[7] = v[2]  = v[3]  = 1;
+			v[6] = v[10] = v[11] = 0;
+			box++; v += 12;
 		} while (--nbox_this_time);
 	} while (nbox);
 }
@@ -3960,13 +3955,6 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 {
 	DBG(("%s: (alu=%d, color=%x)\n", __FUNCTION__, alu, color));
 
-#if NO_FILL
-	return sna_blt_fill(sna, alu,
-			    dst_bo, dst->drawable.bitsPerPixel,
-			    color,
-			    op);
-#endif
-
 	if (prefer_blt_fill(sna, dst_bo) &&
 	    sna_blt_fill(sna, alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
@@ -3984,8 +3972,6 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	if (alu == GXclear)
 		color = 0;
 
-	op->base.op = PictOpSrc;
-
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
 	op->base.dst.height = dst->drawable.height;
@@ -3999,17 +3985,11 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 							dst->drawable.depth));
 	op->base.mask.bo = NULL;
 
-	op->base.is_affine = true;
-	op->base.has_component_alpha = false;
 	op->base.need_magic_ca_pass = false;
-	op->base.floats_per_vertex = 3;
-	op->base.floats_per_rect = 9;
+	op->base.floats_per_vertex = 2;
+	op->base.floats_per_rect = 6;
 
-	op->base.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	op->base.u.gen7.nr_surfaces = 2;
-	op->base.u.gen7.nr_inputs = 1;
-	op->base.u.gen7.ve_id = 1;
-	op->base.u.gen7.sampler = FILL_SAMPLER;
+	op->base.u.gen7.flags = FILL_FLAGS_NOBLEND;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -4052,11 +4032,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint8_t alu)
 {
 	struct sna_composite_op tmp;
-
-#if NO_FILL_BOXES
-	return gen7_render_fill_one_try_blt(sna, dst, bo, color,
-					    x1, y1, x2, y2, alu);
-#endif
+	int16_t *v;
 
 	/* Prefer to use the BLT if already engaged */
 	if (prefer_blt_fill(sna, bo) &&
@@ -4073,8 +4049,6 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (alu == GXclear)
 		color = 0;
 
-	tmp.op = PictOpSrc;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -4088,17 +4062,11 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 							dst->drawable.depth));
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	tmp.u.gen7.nr_surfaces = 2;
-	tmp.u.gen7.nr_inputs = 1;
-	tmp.u.gen7.ve_id = 1;
-	tmp.u.gen7.sampler = FILL_SAMPLER;
+	tmp.u.gen7.flags = FILL_FLAGS_NOBLEND;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -4111,17 +4079,17 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
-	OUT_VERTEX(x2, y2);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
 
-	OUT_VERTEX(x1, y2);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(x1, y1);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[0] = x2;
+	v[8] = v[4] = x1;
+	v[5] = v[1] = y2;
+	v[9] = y1;
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -4148,10 +4116,7 @@ static bool
 gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
-
-#if NO_CLEAR
-	return gen7_render_clear_try_blt(sna, dst, bo);
-#endif
+	int16_t *v;
 
 	DBG(("%s: %dx%d\n",
 	     __FUNCTION__,
@@ -4167,8 +4132,6 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	if (too_large(dst->drawable.width, dst->drawable.height))
 		return gen7_render_clear_try_blt(sna, dst, bo);
 
-	tmp.op = PictOpSrc;
-
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -4179,17 +4142,11 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.src.bo = sna_render_get_solid(sna, 0);
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = true;
-	tmp.floats_per_vertex = 3;
-	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = 0;
+	tmp.floats_per_vertex = 2;
+	tmp.floats_per_rect = 6;
 	tmp.need_magic_ca_pass = false;
 
-	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
-	tmp.u.gen7.nr_surfaces = 2;
-	tmp.u.gen7.nr_inputs = 1;
-	tmp.u.gen7.ve_id = 1;
-	tmp.u.gen7.sampler = FILL_SAMPLER;
+	tmp.u.gen7.flags = FILL_FLAGS_NOBLEND;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -4201,17 +4158,17 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 
 	gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
 
-	OUT_VERTEX(dst->drawable.width, dst->drawable.height);
-	OUT_VERTEX_F(1);
-	OUT_VERTEX_F(1);
+	v = (int16_t *)&sna->render.vertices[sna->render.vertex_used];
+	sna->render.vertex_used += 6;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
-	OUT_VERTEX(0, dst->drawable.height);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(1);
+	v[0] = dst->drawable.width;
+	v[5] = v[1] = dst->drawable.height;
+	v[8] = v[4] = 0;
+	v[9] = 0;
 
-	OUT_VERTEX(0, 0);
-	OUT_VERTEX_F(0);
-	OUT_VERTEX_F(0);
+	v[7] = v[2]  = v[3]  = 1;
+	v[6] = v[10] = v[11] = 0;
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -4322,10 +4279,13 @@ static bool gen7_render_setup(struct sna *sna)
 
 	ss = sna_static_stream_map(&general,
 				   2 * sizeof(*ss) *
-				   FILTER_COUNT * EXTEND_COUNT *
-				   FILTER_COUNT * EXTEND_COUNT,
+				   (2 +
+				    FILTER_COUNT * EXTEND_COUNT *
+				    FILTER_COUNT * EXTEND_COUNT),
 				   32);
 	state->wm_state = sna_static_stream_offsetof(&general, ss);
+	sampler_copy_init(ss); ss += 2;
+	sampler_fill_init(ss); ss += 2;
 	for (i = 0; i < FILTER_COUNT; i++) {
 		for (j = 0; j < EXTEND_COUNT; j++) {
 			for (k = 0; k < FILTER_COUNT; k++) {
@@ -4353,19 +4313,33 @@ bool gen7_render_init(struct sna *sna)
 	sna->kgem.retire = gen7_render_retire;
 	sna->kgem.expire = gen7_render_expire;
 
+#if !NO_COMPOSITE
 	sna->render.composite = gen7_render_composite;
+#endif
 #if !NO_COMPOSITE_SPANS
 	sna->render.composite_spans = gen7_render_composite_spans;
 #endif
 	sna->render.video = gen7_render_video;
 
+#if !NO_COPY_BOXES
 	sna->render.copy_boxes = gen7_render_copy_boxes;
+#endif
+#if !NO_COPY
 	sna->render.copy = gen7_render_copy;
+#endif
 
+#if !NO_FILL_BOXES
 	sna->render.fill_boxes = gen7_render_fill_boxes;
+#endif
+#if !NO_FILL
 	sna->render.fill = gen7_render_fill;
+#endif
+#if !NO_FILL_ONE
 	sna->render.fill_one = gen7_render_fill_one;
+#endif
+#if !NO_FILL_CLEAR
 	sna->render.clear = gen7_render_clear;
+#endif
 
 	sna->render.flush = gen7_render_flush;
 	sna->render.reset = gen7_render_reset;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 0f96ace..15d882f 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -133,11 +133,7 @@ struct sna_composite_op {
 		} gen6;
 
 		struct {
-			int wm_kernel;
-			int nr_surfaces;
-			int nr_inputs;
-			int ve_id;
-			int sampler;
+			uint32_t flags;
 		} gen7;
 	} u;
 
commit 267429bbb146449ee4d3b88fa8e23c5b1d53470a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 19:24:44 2012 +0100

    sna: Enable runtime detection of set-cacheing ioctl
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 90dae7e..b3a786d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -247,16 +247,6 @@ if test "x$accel" = xno; then
 	AC_MSG_ERROR([No default acceleration option])
 fi
 
-AC_ARG_ENABLE(cache-level,
-	      AS_HELP_STRING([--enable-cache-level],
-			     [Enable use of cache level ioctl (experimental) [default=no]]),
-	      [CACHE_LEVEL="$enableval"],
-	      [CACHE_LEVEL=no])
-AM_CONDITIONAL(USE_CACHE_LEVEL, test x$CACHE_LEVEL = xyes)
-if test "x$CACHE_LEVEL" = xyes; then
-	AC_DEFINE(USE_CACHE_LEVEL,1,[Assume DRM_I915_GEM_SET_CACHE_LEVEL_IOCTL support])
-fi
-
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
 			     [Enable use of vmap (experimental) [default=no]]),
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6327e49..e5c97f6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -104,22 +104,16 @@ struct drm_i915_gem_vmap {
 };
 #endif
 
-#if !defined(DRM_I915_GEM_SET_CACHE_LEVEL)
-#define I915_CACHE_NONE		0
-#define I915_CACHE_LLC		1
-#define I915_CACHE_LLC_MLC	2 /* gen6+ */
+#define UNCACHED	0
+#define SNOOPED		1
 
-struct drm_i915_gem_cache_level {
-	/** Handle of the buffer to check for busy */
-	__u32 handle;
-
-	/** Cache level to apply or return value */
-	__u32 cache_level;
+struct local_i915_gem_cacheing {
+	uint32_t handle;
+	uint32_t cacheing;
 };
 
-#define DRM_I915_GEM_SET_CACHE_LEVEL	0x2f
-#define DRM_IOCTL_I915_GEM_SET_CACHE_LEVEL		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHE_LEVEL, struct drm_i915_gem_cache_level)
-#endif
+#define LOCAL_I915_GEM_SET_CACHEING	0x2f
+#define LOCAL_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + LOCAL_I915_GEM_SET_CACHEING, struct local_i915_gem_cacheing)
 
 struct kgem_partial_bo {
 	struct kgem_bo base;
@@ -193,14 +187,14 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
-static bool gem_set_cache_level(int fd, uint32_t handle, int cache_level)
+static bool gem_set_cacheing(int fd, uint32_t handle, int cacheing)
 {
-	struct drm_i915_gem_cache_level arg;
+	struct local_i915_gem_cacheing arg;
 
 	VG_CLEAR(arg);
 	arg.handle = handle;
-	arg.cache_level = cache_level;
-	return drmIoctl(fd, DRM_IOCTL_I915_GEM_SET_CACHE_LEVEL, &arg) == 0;
+	arg.cacheing = cacheing;
+	return drmIoctl(fd, LOCAL_IOCTL_I915_GEM_SET_CACHEING, &arg) == 0;
 }
 
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
@@ -681,9 +675,8 @@ static bool test_has_llc(struct kgem *kgem)
 	return has_llc;
 }
 
-static bool test_has_cache_level(struct kgem *kgem)
+static bool test_has_cacheing(struct kgem *kgem)
 {
-#if defined(USE_CACHE_LEVEL)
 	uint32_t handle;
 	bool ret;
 
@@ -698,12 +691,9 @@ static bool test_has_cache_level(struct kgem *kgem)
 	if (handle == 0)
 		return false;
 
-	ret = gem_set_cache_level(kgem->fd, handle, I915_CACHE_NONE);
+	ret = gem_set_cacheing(kgem->fd, handle, UNCACHED);
 	gem_close(kgem->fd, handle);
 	return ret;
-#else
-	return false;
-#endif
 }
 
 static bool test_has_vmap(struct kgem *kgem)
@@ -759,9 +749,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
 	     kgem->has_llc));
 
-	kgem->has_cache_level = test_has_cache_level(kgem);
+	kgem->has_cacheing = test_has_cacheing(kgem);
 	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
-	     kgem->has_cache_level));
+	     kgem->has_cacheing));
 
 	kgem->has_vmap = test_has_vmap(kgem);
 	DBG(("%s: has vmap? %d\n", __FUNCTION__,
@@ -830,8 +820,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->next_request = __kgem_request_alloc();
 
 	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
-	     kgem->has_llc | kgem->has_vmap | kgem->has_cache_level,
-	     kgem->has_llc, kgem->has_cache_level, kgem->has_vmap));
+	     kgem->has_llc | kgem->has_vmap | kgem->has_cacheing,
+	     kgem->has_llc, kgem->has_cacheing, kgem->has_vmap));
 
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
@@ -902,7 +892,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->large_object_size = MAX_CACHE_SIZE;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
-	if (kgem->has_llc | kgem->has_cache_level | kgem->has_vmap) {
+	if (kgem->has_llc | kgem->has_cacheing | kgem->has_vmap) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
 	} else
@@ -3249,7 +3239,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		return bo;
 	}
 
-	if (kgem->has_cache_level) {
+	if (kgem->has_cacheing) {
 		bo = kgem_create_linear(kgem, size, flags);
 		if (bo == NULL)
 			return NULL;
@@ -3258,7 +3248,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 		bo->reusable = false;
 		bo->vmap = true;
-		if (!gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC) ||
+		if (!gem_set_cacheing(kgem->fd, bo->handle, SNOOPED) ||
 		    kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
 			return NULL;
@@ -4004,14 +3994,14 @@ create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
 {
 	struct kgem_partial_bo *bo;
 
-	if (kgem->has_cache_level) {
+	if (kgem->has_cacheing) {
 		uint32_t handle;
 
 		handle = gem_create(kgem->fd, alloc);
 		if (handle == 0)
 			return NULL;
 
-		if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
+		if (!gem_set_cacheing(kgem->fd, handle, SNOOPED)) {
 			gem_close(kgem->fd, handle);
 			return NULL;
 		}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index b8d755c..b038bb1 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -158,7 +158,7 @@ struct kgem {
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_relaxed_delta :1;
 	uint32_t has_semaphores :1;
-	uint32_t has_cache_level :1;
+	uint32_t has_cacheing :1;
 	uint32_t has_llc :1;
 
 	uint32_t can_blt_cpu :1;
commit c0b3674d042ff55d64ad1fd0d64926e1967be323
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 20:48:27 2012 +0100

    sna/trapezoids: Only reduce bounded operators to a single pass
    
    Only for a few operators can we replace the opacity mask by
    premultiplying into the source.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 7124baf..c3a5447 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4873,11 +4873,36 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 {
 	struct tor tor;
 	span_func_t span;
+	uint32_t color;
+	bool lerp;
 	RegionRec region;
 	int16_t dst_x, dst_y;
 	int dx, dy;
 	int n;
 
+	lerp = false;
+	if (sna_picture_is_solid(src, &color)) {
+		if (op == PictOpOver && (color >> 24) == 0xff)
+			op = PictOpSrc;
+		if (op == PictOpOver && sna_drawable_is_clear(dst->pDrawable))
+			op = PictOpSrc;
+		lerp = op == PictOpSrc;
+	}
+	if (!lerp) {
+		switch (op) {
+		case PictOpOver:
+		case PictOpAdd:
+		case PictOpOutReverse:
+			break;
+		case PictOpSrc:
+			if (!sna_drawable_is_clear(dst->pDrawable))
+				return false;
+			break;
+		default:
+			return false;
+		}
+	}
+
 	if (maskFormat == NULL && ntrap > 1) {
 		DBG(("%s: individual rasterisation requested\n",
 		     __FUNCTION__));
@@ -4902,8 +4927,8 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 	     region.extents.x2, region.extents.y2));
 
 	if (!sna_compute_composite_extents(&region.extents,
-					   NULL, NULL, dst,
-					   0, 0,
+					   src, NULL, dst,
+					   src_x, src_y,
 					   0, 0,
 					   region.extents.x1, region.extents.y1,
 					   region.extents.x2 - region.extents.x1,
@@ -4940,26 +4965,14 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 	if (sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
 					    MOVE_WRITE | MOVE_READ)) {
 		PixmapPtr pixmap;
-		uint32_t color;
 
 		pixmap = get_drawable_pixmap(dst->pDrawable);
 		get_drawable_deltas(dst->pDrawable, pixmap, &dst_x, &dst_y);
 
-		if (!sna_picture_is_solid(src, &color))
-			goto pixman;
-
-		if (op == PictOpOver && (color >> 24) == 0xff)
-			op = PictOpSrc;
-		if (op == PictOpOver) {
-			struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
-			if (priv && priv->clear && priv->clear_color == 0)
-				op = PictOpSrc;
-		}
-
 		DBG(("%s: format=%x, op=%d, color=%x\n",
 		     __FUNCTION__, dst->format, op, color));
 
-		if (op == PictOpSrc) {
+		if (lerp) {
 			struct inplace inplace;
 
 			inplace.ptr = pixmap->devPrivate.ptr;
@@ -4981,11 +4994,10 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 		} else {
 			struct pixman_inplace pi;
 
-pixman:
 			pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
 			pi.source = image_from_pict(src, false, &pi.sx, &pi.sy);
-			pi.sx += src_x;
-			pi.sy += src_y;
+			pi.sx += src_x - pixman_fixed_to_int(traps[0].left.p1.x);
+			pi.sy += src_y - pixman_fixed_to_int(traps[0].left.p1.y);
 			pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
 			pixman_image_set_repeat(pi.mask, PIXMAN_REPEAT_NORMAL);
 			pi.bits = pixman_image_get_data(pi.mask);
@@ -4998,7 +5010,7 @@ pixman:
 
 			tor_render(NULL, &tor, (void*)&pi,
 				   dst->pCompositeClip, span,
-				   operator_is_bounded(op));
+				   false);
 			tor_fini(&tor);
 
 			pixman_image_unref(pi.mask);
commit bb0303677c38076db14dfbceec3636197a971e8c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 10:40:50 2012 +0100

    sna/trapezoids: Use pixman from within the spans to reduce two-pass operations
    
    Reduce the two pass CompositeTrapezoids if we can perform the operation
    inplace by calling pixman_image_composite from the span. This step
    enables this for xrgb32.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 634423e..7124baf 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4824,10 +4824,50 @@ unbounded_pass:
 	return true;
 }
 
+static void
+pixmask_span(struct sna *sna,
+	     struct sna_composite_spans_op *op,
+	     pixman_region16_t *clip,
+	     const BoxRec *box,
+	     int coverage)
+{
+	struct pixman_inplace *pi = (struct pixman_inplace *)op;
+	pixman_image_t *mask = NULL;
+	if (coverage != FAST_SAMPLES_XY) {
+		coverage = coverage * 256 / FAST_SAMPLES_XY;
+		coverage -= coverage >> 8;
+		*pi->bits = coverage;
+		mask = pi->mask;
+	}
+	pixman_image_composite(pi->op, pi->source, mask, pi->image,
+			       pi->sx + box->x1, pi->sy + box->y1,
+			       0, 0,
+			       pi->dx + box->x1, pi->dy + box->y1,
+			       box->x2 - box->x1, box->y2 - box->y1);
+}
+static void
+pixmask_span__clipped(struct sna *sna,
+		      struct sna_composite_spans_op *op,
+		      pixman_region16_t *clip,
+		      const BoxRec *box,
+		      int coverage)
+{
+	pixman_region16_t region;
+	int n;
+
+	pixman_region_init_rects(&region, box, 1);
+	RegionIntersect(&region, &region, clip);
+	n = REGION_NUM_RECTS(&region);
+	box = REGION_RECTS(&region);
+	while (n--)
+		pixmask_span(sna, op, NULL, box++, coverage);
+	pixman_region_fini(&region);
+}
+
 static bool
 trapezoid_span_inplace__x8r8g8b8(CARD8 op,
-				 uint32_t color,
 				 PicturePtr dst,
+				 PicturePtr src, int16_t src_x, int16_t src_y,
 				 PictFormatPtr maskFormat,
 				 int ntrap, xTrapezoid *traps)
 {
@@ -4838,34 +4878,14 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 	int dx, dy;
 	int n;
 
-	if (op == PictOpOver && (color >> 24) == 0xff)
-		op = PictOpSrc;
-	if (op == PictOpOver) {
-		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
-		if (priv && priv->clear && priv->clear_color == 0)
-			op = PictOpSrc;
-	}
-
-	switch (op) {
-	case PictOpSrc:
-		break;
-	default:
-		DBG(("%s: fallback -- can not perform op [%d] in place\n",
-		     __FUNCTION__, op));
-		return false;
-	}
-
-	DBG(("%s: format=%x, op=%d, color=%x\n",
-	     __FUNCTION__, dst->format, op, color));
-
 	if (maskFormat == NULL && ntrap > 1) {
 		DBG(("%s: individual rasterisation requested\n",
 		     __FUNCTION__));
 		do {
 			/* XXX unwind errors? */
-			if (!trapezoid_span_inplace__x8r8g8b8(op, color,
-							      dst, NULL,
-							      1, traps++))
+			if (!trapezoid_span_inplace__x8r8g8b8(op, dst,
+							      src, src_x, src_y,
+							      NULL, 1, traps++))
 				return false;
 		} while (--ntrap);
 		return true;
@@ -4915,37 +4935,76 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 		tor_add_edge(&tor, &t, &t.right, -1);
 	}
 
-	switch (op) {
-	case PictOpSrc:
-		if (dst->pCompositeClip->data)
-			span = tor_blt_lerp32_clipped;
-		else
-			span = tor_blt_lerp32;
-		break;
-	}
-
 	DBG(("%s: move-to-cpu\n", __FUNCTION__));
 	region.data = NULL;
 	if (sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
 					    MOVE_WRITE | MOVE_READ)) {
 		PixmapPtr pixmap;
-		struct inplace inplace;
+		uint32_t color;
 
 		pixmap = get_drawable_pixmap(dst->pDrawable);
-
 		get_drawable_deltas(dst->pDrawable, pixmap, &dst_x, &dst_y);
 
-		inplace.ptr = pixmap->devPrivate.ptr;
-		inplace.ptr += dst_y * pixmap->devKind + dst_x;
-		inplace.stride = pixmap->devKind;
-		inplace.color = color;
+		if (!sna_picture_is_solid(src, &color))
+			goto pixman;
+
+		if (op == PictOpOver && (color >> 24) == 0xff)
+			op = PictOpSrc;
+		if (op == PictOpOver) {
+			struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
+			if (priv && priv->clear && priv->clear_color == 0)
+				op = PictOpSrc;
+		}
+
+		DBG(("%s: format=%x, op=%d, color=%x\n",
+		     __FUNCTION__, dst->format, op, color));
+
+		if (op == PictOpSrc) {
+			struct inplace inplace;
+
+			inplace.ptr = pixmap->devPrivate.ptr;
+			inplace.ptr += dst_y * pixmap->devKind + dst_x;
+			inplace.stride = pixmap->devKind;
+			inplace.color = color;
+
+			if (dst->pCompositeClip->data)
+				span = tor_blt_lerp32_clipped;
+			else
+				span = tor_blt_lerp32;
+
+			DBG(("%s: render inplace op=%d, color=%08x\n",
+			     __FUNCTION__, op, color));
+
+			tor_render(NULL, &tor, (void*)&inplace,
+				   dst->pCompositeClip, span, false);
+			tor_fini(&tor);
+		} else {
+			struct pixman_inplace pi;
+
+pixman:
+			pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
+			pi.source = image_from_pict(src, false, &pi.sx, &pi.sy);
+			pi.sx += src_x;
+			pi.sy += src_y;
+			pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
+			pixman_image_set_repeat(pi.mask, PIXMAN_REPEAT_NORMAL);
+			pi.bits = pixman_image_get_data(pi.mask);
+			pi.op = op;
+
+			if (dst->pCompositeClip->data)
+				span = pixmask_span__clipped;
+			else
+				span = pixmask_span;
 
-		DBG(("%s: render inplace op=%d, color=%08x\n",
-		     __FUNCTION__, op, color));
-		tor_render(NULL, &tor, (void*)&inplace,
-			   dst->pCompositeClip, span, false);
+			tor_render(NULL, &tor, (void*)&pi,
+				   dst->pCompositeClip, span,
+				   operator_is_bounded(op));
+			tor_fini(&tor);
 
-		tor_fini(&tor);
+			pixman_image_unref(pi.mask);
+			pixman_image_unref(pi.source);
+			pixman_image_unref(pi.image);
+		}
 	}
 
 	return true;
@@ -4994,17 +5053,18 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return trapezoid_span_mono_inplace(op, src, dst,
 						   src_x, src_y, ntrap, traps);
 
+	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)
+		return trapezoid_span_inplace__x8r8g8b8(op, dst,
+							src, src_x, src_y,
+							maskFormat,
+							ntrap, traps);
+
 	if (!sna_picture_is_solid(src, &color)) {
 		DBG(("%s: fallback -- can not perform operation in place, requires solid source\n",
 		     __FUNCTION__));
 		return false;
 	}
 
-	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)
-		return trapezoid_span_inplace__x8r8g8b8(op, color,
-							dst, maskFormat,
-							ntrap, traps);
-
 	if (dst->format != PICT_a8) {
 		DBG(("%s: fallback -- can not perform operation in place, format=%x\n",
 		     __FUNCTION__, dst->format));
commit bee1a14618797b3d3a1c1a20eb72644fa907c048
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 09:38:32 2012 +0100

    sna: Fix processing of the last fallback box
    
    The evil typo caused us to misalign the clip boxes and run over a
    garbage array on 64-bit builds.
    
    Reported-by: Edward Sheldrake <ejsheldrake at gmail.com>
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52163
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbclip.c b/src/sna/fb/fbclip.c
index 5a71b0c..37011a7 100644
--- a/src/sna/fb/fbclip.c
+++ b/src/sna/fb/fbclip.c
@@ -73,7 +73,7 @@ fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end)
 		return &region->extents;
 	}
 
-	c0 = (const BoxRec *)region->data + 1;
+	c0 = (const BoxRec *)(region->data + 1);
 	c1 = c0 + region->data->numRects;
 
 	if (c0->y2 <= box->y1) {
commit 88cb1968b6dbf3edfa885da9503e91124af46007
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 09:38:03 2012 +0100

    sna: Add more DBG for fallback processing
    
    Hunting the lost box...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index fa5d032..0758728 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -37,6 +37,11 @@ static inline void add(RegionPtr region,
 	r->x1 = x1; r->y1 = y1;
 	r->x2 = x2; r->y2 = y2;
 
+	DBG(("%s[%d/%d]: (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region->data->numRects, region->data->size,
+	     x1, y1, x2, y2));
+
 	if (x1 < region->extents.x1)
 		region->extents.x1 = x1;
 	if (x2 > region->extents.x2)
@@ -138,5 +143,11 @@ fbBitmapToRegion(PixmapPtr pixmap)
 	} else
 		region->extents.x1 = region->extents.x2 = 0;
 
+	DBG(("%s: region extents=(%d, %d), (%d, %d) x %d\n",
+	     __FUNCTION__,
+	     region->extents.x1, region->extents.y1,
+	     region->extents.x2, region->extents.y2,
+	     RegionNumRects(region)));
+
 	return region;
 }
diff --git a/src/sna/fb/fbblt.c b/src/sna/fb/fbblt.c
index fd55c85..3b3fa48 100644
--- a/src/sna/fb/fbblt.c
+++ b/src/sna/fb/fbblt.c
@@ -285,7 +285,10 @@ fbBlt(FbBits *srcLine, FbStride srcStride, int srcX,
 		s += srcX >> 3;
 		d += dstX >> 3;
 
-		DBG(("%s fast blt\n", __FUNCTION__));
+		DBG(("%s fast blt, src_stride=%d, dst_stride=%d, width=%d (offset=%d)\n",
+		     __FUNCTION__,
+		     srcStride, dstStride, width,
+		     srcLine - dstLine));
 
 		if ((srcLine < dstLine && srcLine + width > dstLine) ||
 		    (dstLine < srcLine && dstLine + width > srcLine))
diff --git a/src/sna/fb/fbclip.c b/src/sna/fb/fbclip.c
index 8d9c4db..5a71b0c 100644
--- a/src/sna/fb/fbclip.c
+++ b/src/sna/fb/fbclip.c
@@ -76,11 +76,16 @@ fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end)
 	c0 = (const BoxRec *)region->data + 1;
 	c1 = c0 + region->data->numRects;
 
-	if (c0->y2 <= box->y1)
+	if (c0->y2 <= box->y1) {
+		DBG(("%s: first clip (%d, %d), (%d, %d) before box (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     c0->x1, c0->y1, c0->x2, c0->y2,
+		     box->x1, box->y1, box->x2, box->y2));
 		c0 = find_clip_row_for_y(c0, c1, box->y1);
+	}
 
-	DBG(("%s: c0=(%d, %d),(%d, %d)\n",
-	     __FUNCTION__, c0->x1, c0->y1, c0->x2, c0->y2));
+	DBG(("%s: c0=(%d, %d),(%d, %d) x %ld\n",
+	     __FUNCTION__, c0->x1, c0->y1, c0->x2, c0->y2, c1 - c0));
 
 	*end = c1;
 	return c0;
diff --git a/src/sna/fb/fbclip.h b/src/sna/fb/fbclip.h
index feb2d2c..f07e63c 100644
--- a/src/sna/fb/fbclip.h
+++ b/src/sna/fb/fbclip.h
@@ -46,6 +46,13 @@ box_intersect(BoxPtr a, const BoxRec *b)
 	return a->x1 < a->x2 && a->y1 < a->y2;
 }
 
+#define run_box(b, c) \
+	DBG(("%s: box=(%d, %d), (%d, %d), clip=(%d, %d), (%d, %d)\n", \
+	     __FUNCTION__, (b)->x1, (b)->y1, (b)->x2, (b)->y2, (c)->x1, (c)->y1, (c)->x2, (c)->y2)); \
+	if ((b)->y2 <= (c)->y1) break; \
+	if ((b)->x1 >= (c)->x2) continue; \
+	if ((b)->x2 <= (c)->x1) { if ((b)->y2 <= (c)->y2) break; continue; }
+
 static inline void
 fbDrawableRun(DrawablePtr d, GCPtr gc, const BoxRec *box,
 	      void (*func)(DrawablePtr, GCPtr, const BoxRec *b, void *data),
@@ -55,14 +62,7 @@ fbDrawableRun(DrawablePtr d, GCPtr gc, const BoxRec *box,
 	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
 		BoxRec b;
 
-		if (box->x1 >= c->x2)
-			continue;
-		if (box->x2 <= c->x1) {
-			if (box->y2 <= c->y2)
-				break;
-			else
-				continue;
-		}
+		run_box(box, c);
 
 		b = *box;
 		if (box_intersect(&b, c))
@@ -77,14 +77,7 @@ fbDrawableRunUnclipped(DrawablePtr d, GCPtr gc, const BoxRec *box,
 {
 	const BoxRec *c, *end;
 	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
-		if (box->x1 >= c->x2)
-			continue;
-		if (box->x2 <= c->x1) {
-			if (box->y2 <= c->y2)
-				break;
-			else
-				continue;
-		}
+		run_box(box, c);
 		func(d, gc, c, data);
 	}
 }
diff --git a/src/sna/fb/fbfill.c b/src/sna/fb/fbfill.c
index 3df1f9c..a9ae2bc 100644
--- a/src/sna/fb/fbfill.c
+++ b/src/sna/fb/fbfill.c
@@ -141,28 +141,14 @@ fbFill(DrawablePtr drawable, GCPtr gc, int x, int y, int width, int height)
 
 	case FillTiled:
 		{
-			PixmapPtr pTile = gc->tile.pixmap;
-			FbBits *tile;
-			FbStride tileStride;
-			int tileBpp;
-			int tileWidth;
-			int tileHeight;
-			_X_UNUSED int tileXoff, tileYoff;
-
-			fbGetDrawable(&pTile->drawable, tile,
-				      tileStride, tileBpp, tileXoff, tileYoff);
-			tileWidth = pTile->drawable.width;
-			tileHeight = pTile->drawable.height;
-			fbTile(dst + (y + dstYoff) * dstStride,
-			       dstStride,
-			       (x + dstXoff) * dstBpp,
-			       width * dstBpp, height,
-			       tile,
-			       tileStride,
-			       tileWidth * tileBpp,
-			       tileHeight,
-			       gc->alu, pgc->pm,
-			       dstBpp,
+			PixmapPtr tile = gc->tile.pixmap;
+
+			fbTile(dst + (y + dstYoff) * dstStride, dstStride,
+			       (x + dstXoff) * dstBpp, width * dstBpp, height,
+			       tile->devPrivate.ptr, tile->devKind / sizeof(FbBits),
+			       tile->drawable.width * tile->drawable.bitsPerPixel,
+			       tile->drawable.height,
+			       gc->alu, pgc->pm, dstBpp,
 			       (gc->patOrg.x + drawable->x + dstXoff) * dstBpp,
 			       gc->patOrg.y + drawable->y - y);
 			break;
diff --git a/src/sna/fb/fbtile.c b/src/sna/fb/fbtile.c
index 5586553..c350671 100644
--- a/src/sna/fb/fbtile.c
+++ b/src/sna/fb/fbtile.c
@@ -99,30 +99,29 @@ fbOddTile(FbBits *dst, FbStride dstStride, int dstX,
 	  int xRot, int yRot)
 {
 	int tileX, tileY;
-	int widthTmp;
-	int h, w;
 	int x, y;
 
+	DBG(("%s tile=%dx%d, size=%dx%d\n", __FUNCTION__,
+	     tileWidth, tileHeight, width, height));
+
 	modulus(-yRot, tileHeight, tileY);
 	y = 0;
 	while (height) {
-		h = tileHeight - tileY;
+		int ww = width;
+		int h = tileHeight - tileY;
 		if (h > height)
 			h = height;
 		height -= h;
-		widthTmp = width;
 		x = dstX;
 		modulus(dstX - xRot, tileWidth, tileX);
-		while (widthTmp) {
-			w = tileWidth - tileX;
-			if (w > widthTmp)
-				w = widthTmp;
-			widthTmp -= w;
-			fbBlt(tile + tileY * tileStride,
-			      tileStride,
-			      tileX,
-			      dst + y * dstStride,
-			      dstStride, x, w, h, alu, pm, bpp, FALSE, FALSE);
+		while (ww) {
+			int w = tileWidth - tileX;
+			if (w > ww)
+				w = ww;
+			ww -= w;
+			fbBlt(tile + tileY * tileStride, tileStride, tileX,
+			      dst + y * dstStride, dstStride,
+			      x, w, h, alu, pm, bpp, FALSE, FALSE);
 			x += w;
 			tileX = 0;
 		}
commit 36f2e46619598e9bca4fe1207aa2f157bfa1ecf4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 00:45:54 2012 +0100

    sna: Reuse the snoopable cache more frequently for upload buffers
    
    Now that we are keeping a small cache of snoopable buffers, experiment
    with using them for uploads more frequently.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d9e27e2..6327e49 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3950,15 +3950,122 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 static inline bool
 use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 {
-	assert(kgem->gen != 40);
-
-	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE)
-		return true;
-
 	if ((flags & KGEM_BUFFER_WRITE) == 0)
 		return kgem->gen >= 30;
 
-	return false;
+	return true;
+}
+
+static struct kgem_partial_bo *
+search_snoopable_buffer(struct kgem *kgem, unsigned alloc)
+{
+	struct kgem_partial_bo *bo;
+	struct kgem_bo *old;
+
+	old = search_vmap_cache(kgem, alloc, 0);
+	if (old) {
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL)
+			return NULL;
+
+		memcpy(&bo->base, old, sizeof(*old));
+		if (old->rq)
+			list_replace(&old->request, &bo->base.request);
+		else
+			list_init(&bo->base.request);
+		list_replace(&old->vma, &bo->base.vma);
+		list_init(&bo->base.list);
+		free(old);
+
+		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
+		     __FUNCTION__, bo->base.handle, num_pages(&bo->base)));
+
+		assert(bo->base.vmap);
+		assert(bo->base.tiling == I915_TILING_NONE);
+		assert(num_pages(&bo->base) >= alloc);
+
+		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+		if (bo->mem) {
+			bo->mmapped = true;
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->base.refcnt = 1;
+
+			return bo;
+		} else
+			kgem_bo_free(kgem, &bo->base);
+	}
+
+	return NULL;
+}
+
+static struct kgem_partial_bo *
+create_snoopable_buffer(struct kgem *kgem, unsigned alloc)
+{
+	struct kgem_partial_bo *bo;
+
+	if (kgem->has_cache_level) {
+		uint32_t handle;
+
+		handle = gem_create(kgem->fd, alloc);
+		if (handle == 0)
+			return NULL;
+
+		if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
+			gem_close(kgem->fd, handle);
+			return NULL;
+		}
+
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL) {
+			gem_close(kgem->fd, handle);
+			return NULL;
+		}
+
+		debug_alloc(kgem, alloc);
+		__kgem_bo_init(&bo->base, handle, alloc);
+		DBG(("%s: created CPU handle=%d for buffer, size %d\n",
+		     __FUNCTION__, bo->base.handle, alloc));
+
+		bo->base.reusable = false;
+		bo->base.vmap = true;
+
+		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+		if (bo->mem) {
+			bo->mmapped = true;
+			bo->need_io = false;
+			bo->base.io = true;
+			return bo;
+		} else {
+			bo->base.refcnt = 0; /* for valgrind */
+			kgem_bo_free(kgem, &bo->base);
+		}
+	}
+
+	if (kgem->has_vmap) {
+		bo = partial_bo_alloc(alloc);
+		if (bo) {
+			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
+						   alloc * PAGE_SIZE, false);
+			if (handle == 0 ||
+			    !__kgem_bo_init(&bo->base, handle, alloc)) {
+				free(bo);
+			} else {
+				DBG(("%s: created vmap handle=%d for buffer\n",
+				     __FUNCTION__, bo->base.handle));
+
+				bo->base.io = true;
+				bo->base.vmap = true;
+				bo->base.map = MAKE_VMAP_MAP(bo);
+				bo->mmapped = true;
+				bo->need_io = false;
+
+				return bo;
+			}
+		}
+	}
+
+	return NULL;
 }
 
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
@@ -4199,105 +4306,23 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 #endif
-	/* Be more parsimonious with pwrite/pread buffers */
+	/* Be more parsimonious with pwrite/pread/cacheable buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
 		alloc = NUM_PAGES(size);
 
 	if (use_snoopable_buffer(kgem, flags)) {
-		old = search_vmap_cache(kgem, NUM_PAGES(size), 0);
-		if (old) {
-			bo = malloc(sizeof(*bo));
-			if (bo == NULL)
-				return NULL;
-
-			memcpy(&bo->base, old, sizeof(*old));
-			if (old->rq)
-				list_replace(&old->request, &bo->base.request);
-			else
-				list_init(&bo->base.request);
-			list_replace(&old->vma, &bo->base.vma);
-			list_init(&bo->base.list);
-			free(old);
-
-			assert(bo->base.vmap);
-			assert(bo->base.tiling == I915_TILING_NONE);
-			assert(num_pages(&bo->base) >= NUM_PAGES(size));
-
-			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-			if (bo->mem) {
-				bo->mmapped = true;
-				bo->need_io = false;
-				bo->base.io = true;
-				bo->base.refcnt = 1;
-
-				alloc = num_pages(&bo->base);
-				goto init;
-			} else {
-				kgem_bo_free(kgem, &bo->base);
-				bo = NULL;
-			}
-		}
-
-		if (kgem->has_cache_level) {
-			uint32_t handle;
-
-			handle = gem_create(kgem->fd, alloc);
-			if (handle == 0)
-				return NULL;
-
-			if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
-				gem_close(kgem->fd, handle);
-				return NULL;
-			}
-
-			bo = malloc(sizeof(*bo));
-			if (bo == NULL) {
-				gem_close(kgem->fd, handle);
-				return NULL;
-			}
-
-			debug_alloc(kgem, alloc);
-			__kgem_bo_init(&bo->base, handle, alloc);
-			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
-			     __FUNCTION__, bo->base.handle, alloc));
-
-			bo->base.reusable = false;
-			bo->base.vmap = true;
-
-			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-			if (bo->mem) {
-				bo->mmapped = true;
-				bo->need_io = false;
-				bo->base.io = true;
-				goto init;
-			} else {
-				bo->base.refcnt = 0; /* for valgrind */
-				kgem_bo_free(kgem, &bo->base);
-				bo = NULL;
-			}
+		bo = search_snoopable_buffer(kgem, alloc);
+		if (bo) {
+			flags &= ~KGEM_BUFFER_INPLACE;
+			alloc = num_pages(&bo->base);
+			goto init;
 		}
 
-		if (kgem->has_vmap) {
-			bo = partial_bo_alloc(alloc);
+		if ((flags & KGEM_BUFFER_WRITE_INPLACE) != KGEM_BUFFER_WRITE_INPLACE) {
+			bo = create_snoopable_buffer(kgem, alloc);
 			if (bo) {
-				uint32_t handle = gem_vmap(kgem->fd, bo->mem,
-							   alloc * PAGE_SIZE, false);
-				if (handle == 0 ||
-				    !__kgem_bo_init(&bo->base, handle, alloc)) {
-					free(bo);
-					bo = NULL;
-				} else {
-					DBG(("%s: created vmap handle=%d for buffer\n",
-					     __FUNCTION__, bo->base.handle));
-
-					bo->base.io = true;
-					bo->base.vmap = true;
-					bo->base.map = MAKE_VMAP_MAP(bo);
-					bo->mmapped = true;
-					bo->need_io = false;
-
-					goto init;
-				}
+				flags &= ~KGEM_BUFFER_INPLACE;
+				goto init;
 			}
 		}
 	}
@@ -4331,6 +4356,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		bo->need_io = flags & KGEM_BUFFER_WRITE;
 		bo->base.io = true;
 	} else {
+		if (use_snoopable_buffer(kgem, flags)) {
+			bo = create_snoopable_buffer(kgem, alloc);
+			if (bo)
+				goto init;
+		}
+
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
 			return NULL;
commit 73f07abbd2d78418e5a66262f293b5ed80b7ccb4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 18 00:19:49 2012 +0100

    sna: Maintain a short-lived cache of snoopable CPU bo for older gen
    
    Once again, we find that frequent buffer creation and manipulation of the
    GTT is a painful experience leading to noticeable and frequent application
    stalls. So mitigate the need for fresh pages by keeping a small stash of
    recently freed and inactive bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4c6ca57..d9e27e2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -53,6 +53,9 @@
 static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
+static struct kgem_bo *
+search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
+
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
 #define DBG_NO_CACHE 0
@@ -810,6 +813,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
 	list_init(&kgem->large);
+	list_init(&kgem->vmap);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
 	for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
@@ -1212,6 +1216,9 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 					    struct kgem_bo *bo)
 {
+	DBG(("%s: moving %d from flush to inactive\n",
+	     __FUNCTION__, bo->handle));
+
 	assert(bo->reusable);
 	assert(bo->rq == NULL);
 	assert(bo->domain != DOMAIN_GPU);
@@ -1295,6 +1302,73 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 		io->used = bo->delta;
 }
 
+static void kgem_bo_move_to_vmap(struct kgem *kgem, struct kgem_bo *bo)
+{
+	if (num_pages(bo) > kgem->max_cpu_size >> 13) {
+		kgem_bo_free(kgem, bo);
+		return;
+	}
+
+	assert(bo->tiling == I915_TILING_NONE);
+	assert(bo->rq == NULL);
+	assert(!bo->io);
+
+	DBG(("%s: moving %d to vmap\n", __FUNCTION__, bo->handle));
+	list_add(&bo->list, &kgem->vmap);
+}
+
+static struct kgem_bo *
+search_vmap_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
+{
+	struct kgem_bo *bo, *first = NULL;
+
+	DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
+
+	if (list_is_empty(&kgem->vmap)) {
+		DBG(("%s: inactive and cache empty\n", __FUNCTION__));
+		if (!__kgem_throttle_retire(kgem, flags)) {
+			DBG(("%s: nothing retired\n", __FUNCTION__));
+			return NULL;
+		}
+	}
+
+	list_for_each_entry(bo, &kgem->vmap, list) {
+		assert(bo->refcnt == 0);
+		assert(bo->vmap);
+		assert(bo->tiling == I915_TILING_NONE);
+		assert(bo->rq == NULL);
+
+		if (num_pages > num_pages(bo))
+			continue;
+
+		if (num_pages(bo) > 2*num_pages) {
+			if (first == NULL)
+				first = bo;
+			continue;
+		}
+
+		list_del(&bo->list);
+		bo->pitch = 0;
+		bo->delta = 0;
+
+		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		     __FUNCTION__, bo->handle, num_pages(bo)));
+		return bo;
+	}
+
+	if (first) {
+		list_del(&first->list);
+		first->pitch = 0;
+		first->delta = 0;
+
+		DBG(("  %s: found handle=%d (num_pages=%d) in vmap cache\n",
+		     __FUNCTION__, first->handle, num_pages(first)));
+		return first;
+	}
+
+	return NULL;
+}
+
 static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
@@ -1309,22 +1383,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (DBG_NO_CACHE)
 		goto destroy;
 
-	if (bo->vmap) {
-		assert(!bo->flush);
-		DBG(("%s: handle=%d is vmapped, tracking until free\n",
-		     __FUNCTION__, bo->handle));
-		if (bo->rq == NULL) {
-			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
-				list_add(&bo->request, &kgem->flushing);
-				bo->rq = &_kgem_static_request;
-			} else
-				kgem_bo_free(kgem, bo);
-		} else {
-			assert(!bo->sync);
-		}
-		return;
-	}
-
 	if (bo->io) {
 		struct kgem_bo *base;
 
@@ -1344,6 +1402,21 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		}
 	}
 
+	if (bo->vmap) {
+		assert(!bo->flush);
+		DBG(("%s: handle=%d is vmapped, tracking until free\n",
+		     __FUNCTION__, bo->handle));
+		if (bo->rq == NULL) {
+			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			}
+		}
+		if (bo->rq == NULL)
+			kgem_bo_move_to_vmap(kgem, bo);
+		return;
+	}
+
 	if (!bo->reusable) {
 		DBG(("%s: handle=%d, not reusable\n",
 		     __FUNCTION__, bo->handle));
@@ -1406,7 +1479,6 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		     __FUNCTION__, bo->handle));
 	}
 
-	DBG(("%s: handle=%d -> inactive\n", __FUNCTION__, bo->handle));
 	kgem_bo_move_to_inactive(kgem, bo);
 	return;
 
@@ -1475,16 +1547,16 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 		if (kgem_busy(kgem, bo->handle))
 			break;
 
-		DBG(("%s: moving %d from flush to inactive\n",
-		     __FUNCTION__, bo->handle));
 		bo->needs_flush = false;
 		bo->domain = DOMAIN_NONE;
 		bo->rq = NULL;
 		list_del(&bo->request);
 
 		if (!bo->refcnt) {
-			assert(bo->reusable);
-			if (kgem_bo_set_purgeable(kgem, bo)) {
+			if (bo->vmap) {
+				kgem_bo_move_to_vmap(kgem, bo);
+			} else if (kgem_bo_set_purgeable(kgem, bo)) {
+				assert(bo->reusable);
 				kgem_bo_move_to_inactive(kgem, bo);
 				retired = true;
 			} else
@@ -1546,6 +1618,16 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			if (bo->refcnt)
 				continue;
 
+			if (bo->vmap) {
+				if (bo->needs_flush) {
+					list_add(&bo->request, &kgem->flushing);
+					bo->rq = &_kgem_static_request;
+				} else {
+					kgem_bo_move_to_vmap(kgem, bo);
+				}
+				continue;
+			}
+
 			if (!bo->reusable) {
 				DBG(("%s: closing %d\n",
 				     __FUNCTION__, bo->handle));
@@ -1555,8 +1637,6 @@ static bool kgem_retire__requests(struct kgem *kgem)
 
 			if (!bo->needs_flush) {
 				if (kgem_bo_set_purgeable(kgem, bo)) {
-					DBG(("%s: moving %d to inactive\n",
-					     __FUNCTION__, bo->handle));
 					kgem_bo_move_to_inactive(kgem, bo);
 					retired = true;
 				} else {
@@ -2177,12 +2257,35 @@ bool kgem_expire_cache(struct kgem *kgem)
 	bool idle;
 	unsigned int i;
 
+	time(&now);
+
 	while (__kgem_freed_bo) {
 		bo = __kgem_freed_bo;
 		__kgem_freed_bo = *(struct kgem_bo **)bo;
 		free(bo);
 	}
 
+
+	expire = 0;
+	list_for_each_entry(bo, &kgem->vmap, list) {
+		if (bo->delta) {
+			expire = now - MAX_INACTIVE_TIME/2;
+			break;
+		}
+
+		bo->delta = now;
+	}
+	if (expire) {
+		while (!list_is_empty(&kgem->vmap)) {
+			bo = list_last_entry(&kgem->vmap, struct kgem_bo, list);
+
+			if (bo->delta > expire)
+				break;
+
+			kgem_bo_free(kgem, bo);
+		}
+	}
+
 	kgem_retire(kgem);
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
@@ -2192,7 +2295,6 @@ bool kgem_expire_cache(struct kgem *kgem)
 	if (kgem->need_purge)
 		kgem_purge_cache(kgem);
 
-	time(&now);
 	expire = 0;
 
 	idle = !kgem->need_retire;
@@ -2291,6 +2393,17 @@ void kgem_cleanup_cache(struct kgem *kgem)
 						     struct kgem_bo, list));
 	}
 
+	while (!list_is_empty(&kgem->vmap))
+		kgem_bo_free(kgem,
+			     list_last_entry(&kgem->vmap,
+					     struct kgem_bo, list));
+
+	while (__kgem_freed_bo) {
+		struct kgem_bo *bo = __kgem_freed_bo;
+		__kgem_freed_bo = *(struct kgem_bo **)bo;
+		free(bo);
+	}
+
 	kgem->need_purge = false;
 	kgem->need_expire = false;
 }
@@ -2743,9 +2856,6 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	size /= PAGE_SIZE;
 	bucket = cache_bucket(size);
 
-	if (flags & CREATE_FORCE)
-		goto create;
-
 	if (bucket >= NUM_CACHE_BUCKETS) {
 		DBG(("%s: large bo num pages=%d, bucket=%d\n",
 		     __FUNCTION__, size, bucket));
@@ -3101,6 +3211,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 				   uint32_t flags)
 {
 	struct kgem_bo *bo;
+	int stride, size;
 
 	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
 
@@ -3120,9 +3231,26 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		return bo;
 	}
 
+	assert(width > 0 && height > 0);
+	stride = ALIGN(width, 2) * bpp >> 3;
+	stride = ALIGN(stride, 4);
+	size = stride * ALIGN(height, 2);
+	assert(size >= PAGE_SIZE);
+
+	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
+	     __FUNCTION__, width, height, bpp, stride));
+
+	bo = search_vmap_cache(kgem, NUM_PAGES(size), 0);
+	if (bo) {
+		assert(bo->tiling == I915_TILING_NONE);
+		assert(bo->vmap);
+		bo->refcnt = 1;
+		bo->pitch = stride;
+		return bo;
+	}
+
 	if (kgem->has_cache_level) {
-		bo = kgem_create_2d(kgem, width, height, bpp,
-				    I915_TILING_NONE, flags | CREATE_FORCE);
+		bo = kgem_create_linear(kgem, size, flags);
 		if (bo == NULL)
 			return NULL;
 
@@ -3136,19 +3264,13 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return NULL;
 		}
 
+		bo->pitch = stride;
 		return bo;
 	}
 
 	if (kgem->has_vmap) {
-		int stride, size;
 		void *ptr;
 
-		stride = ALIGN(width, 2) * bpp >> 3;
-		stride = ALIGN(stride, 4);
-		size = ALIGN(height, 2) * stride;
-
-		assert(size >= PAGE_SIZE);
-
 		/* XXX */
 		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
 		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
@@ -4082,6 +4204,40 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = NUM_PAGES(size);
 
 	if (use_snoopable_buffer(kgem, flags)) {
+		old = search_vmap_cache(kgem, NUM_PAGES(size), 0);
+		if (old) {
+			bo = malloc(sizeof(*bo));
+			if (bo == NULL)
+				return NULL;
+
+			memcpy(&bo->base, old, sizeof(*old));
+			if (old->rq)
+				list_replace(&old->request, &bo->base.request);
+			else
+				list_init(&bo->base.request);
+			list_replace(&old->vma, &bo->base.vma);
+			list_init(&bo->base.list);
+			free(old);
+
+			assert(bo->base.vmap);
+			assert(bo->base.tiling == I915_TILING_NONE);
+			assert(num_pages(&bo->base) >= NUM_PAGES(size));
+
+			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+			if (bo->mem) {
+				bo->mmapped = true;
+				bo->need_io = false;
+				bo->base.io = true;
+				bo->base.refcnt = 1;
+
+				alloc = num_pages(&bo->base);
+				goto init;
+			} else {
+				kgem_bo_free(kgem, &bo->base);
+				bo = NULL;
+			}
+		}
+
 		if (kgem->has_cache_level) {
 			uint32_t handle;
 
@@ -4102,8 +4258,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 			debug_alloc(kgem, alloc);
 			__kgem_bo_init(&bo->base, handle, alloc);
-			DBG(("%s: created handle=%d for buffer\n",
-			     __FUNCTION__, bo->base.handle));
+			DBG(("%s: created CPU handle=%d for buffer, size %d\n",
+			     __FUNCTION__, bo->base.handle, alloc));
 
 			bo->base.reusable = false;
 			bo->base.vmap = true;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 63be218..b8d755c 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,6 +125,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
+	struct list vmap;
 	struct list batch_partials, active_partials;
 	struct list requests;
 	struct list sync_list;
@@ -243,7 +244,6 @@ enum {
 	CREATE_TEMPORARY = 0x20,
 	CREATE_NO_RETIRE = 0x40,
 	CREATE_NO_THROTTLE = 0x40,
-	CREATE_FORCE = 0x80,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
commit 77520641a332a622c0b5378bd254ed5cb46a5f0a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 22:09:33 2012 +0100

    i810: Replace XAAGet.*ROP() with local tables
    
    The XAAGetPatternROP() and XAAGetCopyROP() functions were removed along
    with the rest of XAA so we need to implement those tables locally.
    
    Reported-by: Knut Petersen <Knut_Petersen at t-online.de>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index a96c504..823631f 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -318,4 +318,7 @@ extern void I810InitMC(ScreenPtr pScreen);
 
 extern const OptionInfoRec *I810AvailableOptions(int chipid, int busid);
 
+extern const int I810CopyROP[16];
+const int I810PatternROP[16];
+
 #endif /* _I810_H_ */
diff --git a/src/legacy/i810/i810_accel.c b/src/legacy/i810/i810_accel.c
index c079bfd..aa2c4df 100644
--- a/src/legacy/i810/i810_accel.c
+++ b/src/legacy/i810/i810_accel.c
@@ -40,6 +40,44 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xaarop.h"
 #include "i810.h"
 
+const int I810CopyROP[16] = {
+	ROP_0,			/* GXclear */
+	ROP_DSa,		/* GXand */
+	ROP_SDna,		/* GXandReverse */
+	ROP_S,			/* GXcopy */
+	ROP_DSna,		/* GXandInverted */
+	ROP_D,			/* GXnoop */
+	ROP_DSx,		/* GXxor */
+	ROP_DSo,		/* GXor */
+	ROP_DSon,		/* GXnor */
+	ROP_DSxn,		/* GXequiv */
+	ROP_Dn,			/* GXinvert */
+	ROP_SDno,		/* GXorReverse */
+	ROP_Sn,			/* GXcopyInverted */
+	ROP_DSno,		/* GXorInverted */
+	ROP_DSan,		/* GXnand */
+	ROP_1			/* GXset */
+};
+
+const int I810PatternROP[16] = {
+	ROP_0,
+	ROP_DPa,
+	ROP_PDna,
+	ROP_P,
+	ROP_DPna,
+	ROP_D,
+	ROP_DPx,
+	ROP_DPo,
+	ROP_DPon,
+	ROP_PDxn,
+	ROP_Dn,
+	ROP_PDno,
+	ROP_Pn,
+	ROP_DPno,
+	ROP_DPan,
+	ROP_1
+};
+
 int
 I810WaitLpRing(ScrnInfoPtr pScrn, int n, int timeout_millis)
 {
@@ -153,7 +191,7 @@ I810SetupForSolidFill(ScrnInfoPtr pScrn, int color, int rop,
 
    /* Color blit, p166 */
    pI810->BR[13] = (BR13_SOLID_PATTERN |
-		    (XAAGetPatternROP(rop) << 16) |
+		    (I810PatternROP[rop] << 16) |
 		    (pScrn->displayWidth * pI810->cpp));
    pI810->BR[16] = color;
 }
@@ -199,7 +237,7 @@ I810SetupForScreenToScreenCopy(ScrnInfoPtr pScrn, int xdir, int ydir, int rop,
    if (xdir == -1)
       pI810->BR[13] |= BR13_RIGHT_TO_LEFT;
 
-   pI810->BR[13] |= XAAGetCopyROP(rop) << 16;
+   pI810->BR[13] |= I810CopyROP[rop] << 16;
 
    pI810->BR[18] = 0;
 }
diff --git a/src/legacy/i810/i810_xaa.c b/src/legacy/i810/i810_xaa.c
index 600b631..13f0dc2 100644
--- a/src/legacy/i810/i810_xaa.c
+++ b/src/legacy/i810/i810_xaa.c
@@ -55,7 +55,7 @@ I810SetupForMono8x8PatternFill(ScrnInfoPtr pScrn, int pattx, int patty,
    pI810->BR[18] = bg;
    pI810->BR[19] = fg;
    pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
-   pI810->BR[13] |= XAAGetPatternROP(rop) << 16;
+   pI810->BR[13] |= I810PatternROP[rop] << 16;
    if (bg == -1)
       pI810->BR[13] |= BR13_MONO_PATN_TRANS;
 }
@@ -119,7 +119,7 @@ I810SetupForScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
 	     fg, bg, rop, planemask);
 
    pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
-   pI810->BR[13] |= XAAGetCopyROP(rop) << 16;
+   pI810->BR[13] |= I810CopyROP[rop] << 16;
    pI810->BR[13] |= (1 << 27);
    if (bg == -1)
       pI810->BR[13] |= BR13_MONO_TRANSPCY;
commit caef63e0268e59e439b030a9a338e81d5cf8e311
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 21:22:57 2012 +0100

    i810: Split xaa routines from common acceleration methods
    
    Some of the routines in i810_accel.c are specific to XAA whilst others
    are used elsewhere, for example in i810_dri.c. Therefore we have to be
    selective over which ones we compile out without xaa.
    
    Reported-by: Knut Petersen <Knut_Petersen at t-online.de>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/Makefile.am b/src/legacy/i810/Makefile.am
index 07a384f..51489b8 100644
--- a/src/legacy/i810/Makefile.am
+++ b/src/legacy/i810/Makefile.am
@@ -8,6 +8,7 @@ AM_CFLAGS = @CWARNFLAGS@ @XORG_CFLAGS@ @DRM_CFLAGS@ @DRI_CFLAGS@ @PCIACCESS_CFLA
 	    $(NULL)
 
 liblegacy_i810_la_SOURCES = \
+         i810_accel.c \
          i810_common.h \
          i810_cursor.c \
          i810_driver.c \
@@ -20,7 +21,7 @@ liblegacy_i810_la_SOURCES = \
 
 if XAA
 liblegacy_i810_la_SOURCES += \
-         i810_accel.c
+         i810_xaa.c
 endif
 
 if DGA
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index a07fb69..a96c504 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -310,7 +310,6 @@ extern void I810SelectBuffer(ScrnInfoPtr pScrn, int buffer);
 
 extern void I810RefreshRing(ScrnInfoPtr pScrn);
 extern void I810EmitFlush(ScrnInfoPtr pScrn);
-extern void I810EmitInvarientState(ScrnInfoPtr pScrn);
 
 extern Bool I810DGAInit(ScreenPtr pScreen);
 
diff --git a/src/legacy/i810/i810_accel.c b/src/legacy/i810/i810_accel.c
index 7120b4b..c079bfd 100644
--- a/src/legacy/i810/i810_accel.c
+++ b/src/legacy/i810/i810_accel.c
@@ -40,137 +40,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xaarop.h"
 #include "i810.h"
 
-static void I810SetupForMono8x8PatternFill(ScrnInfoPtr pScrn,
-					   int pattx, int patty,
-					   int fg, int bg, int rop,
-					   unsigned int planemask);
-static void I810SubsequentMono8x8PatternFillRect(ScrnInfoPtr pScrn,
-						 int pattx, int patty,
-						 int x, int y, int w, int h);
-
-static void I810SetupForScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
-							   int fg, int bg,
-							   int rop,
-							   unsigned int mask);
-
-static void I810SubsequentScanlineCPUToScreenColorExpandFill(ScrnInfoPtr
-							     pScrn, int x,
-							     int y, int w,
-							     int h,
-							     int skipleft);
-
-static void I810SubsequentColorExpandScanline(ScrnInfoPtr pScrn, int bufno);
-
-/* The following function sets up the supported acceleration. Call it
- * from the FbInit() function in the SVGA driver, or before ScreenInit
- * in a monolithic server.
- */
-Bool
-I810AccelInit(ScreenPtr pScreen)
-{
-   XAAInfoRecPtr infoPtr;
-   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810AccelInit\n");
-
-   pI810->AccelInfoRec = infoPtr = XAACreateInfoRec();
-   if (!infoPtr)
-      return FALSE;
-
-   pI810->bufferOffset = 0;
-   infoPtr->Flags = LINEAR_FRAMEBUFFER | OFFSCREEN_PIXMAPS;
-   infoPtr->Flags |= PIXMAP_CACHE;
-
-   /* Sync
-    */
-   infoPtr->Sync = I810Sync;
-
-   /* Solid filled rectangles 
-    */
-   {
-      infoPtr->SolidFillFlags = NO_PLANEMASK;
-      infoPtr->SetupForSolidFill = I810SetupForSolidFill;
-      infoPtr->SubsequentSolidFillRect = I810SubsequentSolidFillRect;
-   }
-
-   /* Screen to screen copy 
-    *   - the transparency op hangs the blit engine, disable for now.
-    */
-   {
-      infoPtr->ScreenToScreenCopyFlags = (0
-					  | NO_PLANEMASK
-					  | NO_TRANSPARENCY | 0);
-
-      infoPtr->SetupForScreenToScreenCopy = I810SetupForScreenToScreenCopy;
-      infoPtr->SubsequentScreenToScreenCopy =
-	    I810SubsequentScreenToScreenCopy;
-   }
-
-   /* 8x8 pattern fills 
-    */
-   {
-      infoPtr->SetupForMono8x8PatternFill = I810SetupForMono8x8PatternFill;
-      infoPtr->SubsequentMono8x8PatternFillRect =
-	    I810SubsequentMono8x8PatternFillRect;
-
-      infoPtr->Mono8x8PatternFillFlags = (HARDWARE_PATTERN_PROGRAMMED_BITS |
-					  HARDWARE_PATTERN_SCREEN_ORIGIN |
-					  BIT_ORDER_IN_BYTE_MSBFIRST |
-					  NO_PLANEMASK | 0);
-   }
-
-   /* 8x8 color fills - not considered useful for XAA.
-    */
-
-   /* Scanline color expansion - Use the same scheme as the 3.3 driver.  
-    *
-    */
-   if (pI810->Scratch.Size != 0) {
-      int i;
-      int width = ALIGN(pScrn->displayWidth, 32) / 8;
-      int nr_buffers = pI810->Scratch.Size / width;
-      unsigned char *ptr = pI810->FbBase + pI810->Scratch.Start;
-
-      pI810->NumScanlineColorExpandBuffers = nr_buffers;
-      pI810->ScanlineColorExpandBuffers = (unsigned char **)
-	    xnfcalloc(nr_buffers, sizeof(unsigned char *));
-
-      for (i = 0; i < nr_buffers; i++, ptr += width)
-	 pI810->ScanlineColorExpandBuffers[i] = ptr;
-
-      infoPtr->ScanlineCPUToScreenColorExpandFillFlags = (NO_PLANEMASK |
-							  ROP_NEEDS_SOURCE |
-							  BIT_ORDER_IN_BYTE_MSBFIRST
-							  | 0);
-
-      infoPtr->ScanlineColorExpandBuffers = (unsigned char **)
-	    xnfcalloc(1, sizeof(unsigned char *));
-      infoPtr->NumScanlineColorExpandBuffers = 1;
-
-      infoPtr->ScanlineColorExpandBuffers[0] =
-	    pI810->ScanlineColorExpandBuffers[0];
-      pI810->nextColorExpandBuf = 0;
-
-      infoPtr->SetupForScanlineCPUToScreenColorExpandFill =
-	    I810SetupForScanlineCPUToScreenColorExpandFill;
-
-      infoPtr->SubsequentScanlineCPUToScreenColorExpandFill =
-	    I810SubsequentScanlineCPUToScreenColorExpandFill;
-
-      infoPtr->SubsequentColorExpandScanline =
-	    I810SubsequentColorExpandScanline;
-   }
-
-   /* Possible todo: Image writes w/ non-GXCOPY rop.
-    */
-
-   I810SelectBuffer(pScrn, I810_SELECT_FRONT);
-
-   return XAAInit(pScreen, infoPtr);
-}
-
 int
 I810WaitLpRing(ScrnInfoPtr pScrn, int n, int timeout_millis)
 {
@@ -219,7 +88,9 @@ I810WaitLpRing(ScrnInfoPtr pScrn, int n, int timeout_millis)
 	    DRICloseScreen(xf86ScrnToScreen(pScrn));
 	 }
 #endif
+#if HAVE_XAA_H
 	 pI810->AccelInfoRec = NULL;	/* Stops recursive behavior */
+#endif
 	 FatalError("lockup\n");
       }
 
@@ -399,145 +270,6 @@ I810SubsequentScreenToScreenCopy(ScrnInfoPtr pScrn, int x1, int y1,
     }  while (1);
 }
 
-static void
-I810SetupForMono8x8PatternFill(ScrnInfoPtr pScrn, int pattx, int patty,
-			       int fg, int bg, int rop,
-			       unsigned int planemask)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810SetupFor8x8PatternColorExpand\n");
-
-   /* FULL_MONO_PAT_BLT, p176 */
-   pI810->BR[0] = (BR00_BITBLT_CLIENT | BR00_OP_MONO_PAT_BLT | 0x9);
-   pI810->BR[18] = bg;
-   pI810->BR[19] = fg;
-   pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
-   pI810->BR[13] |= XAAGetPatternROP(rop) << 16;
-   if (bg == -1)
-      pI810->BR[13] |= BR13_MONO_PATN_TRANS;
-}
-
-static void
-I810SubsequentMono8x8PatternFillRect(ScrnInfoPtr pScrn, int pattx, int patty,
-				     int x, int y, int w, int h)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-   int addr =
-	 pI810->bufferOffset + (y * pScrn->displayWidth + x) * pI810->cpp;
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810Subsequent8x8PatternColorExpand\n");
-
-   {
-      BEGIN_LP_RING(12);
-      OUT_RING(pI810->BR[0] | ((y << 5) & BR00_PAT_VERT_ALIGN));
-      OUT_RING(pI810->BR[13]);
-      OUT_RING((h << 16) | (w * pI810->cpp));
-      OUT_RING(addr);
-      OUT_RING(pI810->BR[13] & 0xFFFF);	/* src pitch */
-      OUT_RING(addr);			/* src addr */
-      OUT_RING(0);			/* transparency color */
-      OUT_RING(pI810->BR[18]);		/* bg */
-      OUT_RING(pI810->BR[19]);		/* fg */
-      OUT_RING(pattx);			/* pattern data */
-      OUT_RING(patty);
-      OUT_RING(0);
-      ADVANCE_LP_RING();
-   }
-}
-
-static void
-I810GetNextScanlineColorExpandBuffer(ScrnInfoPtr pScrn)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-   XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
-
-   if (pI810->nextColorExpandBuf == pI810->NumScanlineColorExpandBuffers)
-      I810Sync(pScrn);
-
-   infoPtr->ScanlineColorExpandBuffers[0] =
-	 pI810->ScanlineColorExpandBuffers[pI810->nextColorExpandBuf];
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("using color expand buffer %d\n", pI810->nextColorExpandBuf);
-
-   pI810->nextColorExpandBuf++;
-}
-
-static void
-I810SetupForScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
-					       int fg, int bg, int rop,
-					       unsigned int planemask)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810SetupForScanlineScreenToScreenColorExpand %d %d %x %x\n",
-	     fg, bg, rop, planemask);
-
-   pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
-   pI810->BR[13] |= XAAGetCopyROP(rop) << 16;
-   pI810->BR[13] |= (1 << 27);
-   if (bg == -1)
-      pI810->BR[13] |= BR13_MONO_TRANSPCY;
-
-   pI810->BR[18] = bg;
-   pI810->BR[19] = fg;
-
-   I810GetNextScanlineColorExpandBuffer(pScrn);
-}
-
-static void
-I810SubsequentScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
-						 int x, int y,
-						 int w, int h, int skipleft)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810SubsequentScanlineCPUToScreenColorExpandFill "
-	     "%d,%d %dx%x %d\n", x, y, w, h, skipleft);
-
-   pI810->BR[0] = BR00_BITBLT_CLIENT | BR00_OP_MONO_SRC_COPY_BLT | 0x06;
-   pI810->BR[9] = (pI810->bufferOffset +
-		   (y * pScrn->displayWidth + x) * pI810->cpp);
-   pI810->BR[14] = ((1 << 16) | (w * pI810->cpp));
-   pI810->BR[11] = ((w + 31) / 32) - 1;
-}
-
-static void
-I810SubsequentColorExpandScanline(ScrnInfoPtr pScrn, int bufno)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   pI810->BR[12] = (pI810->AccelInfoRec->ScanlineColorExpandBuffers[0] -
-		    pI810->FbBase);
-
-   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
-      ErrorF("I810SubsequentColorExpandScanline %d (addr %x)\n",
-	     bufno, pI810->BR[12]);
-
-   {
-      BEGIN_LP_RING(8);
-      OUT_RING(pI810->BR[0]);
-      OUT_RING(pI810->BR[13]);
-      OUT_RING(pI810->BR[14]);
-      OUT_RING(pI810->BR[9]);
-      OUT_RING(pI810->BR[11]);
-      OUT_RING(pI810->BR[12]);		/* srcaddr */
-      OUT_RING(pI810->BR[18]);
-      OUT_RING(pI810->BR[19]);
-      ADVANCE_LP_RING();
-   }
-
-   /* Advance to next scanline.
-    */
-   pI810->BR[9] += pScrn->displayWidth * pI810->cpp;
-   I810GetNextScanlineColorExpandBuffer(pScrn);
-}
-
 void
 I810EmitFlush(ScrnInfoPtr pScrn)
 {
@@ -583,35 +315,8 @@ I810RefreshRing(ScrnInfoPtr pScrn)
    if (pI810->LpRing->space < 0)
       pI810->LpRing->space += pI810->LpRing->mem.Size;
 
+#if HAVE_XAA_H
    if (pI810->AccelInfoRec)
       pI810->AccelInfoRec->NeedToSync = TRUE;
+#endif
 }
-
-/* Emit on gaining VT?
- */
-void
-I810EmitInvarientState(ScrnInfoPtr pScrn)
-{
-   I810Ptr pI810 = I810PTR(pScrn);
-
-   BEGIN_LP_RING(10);
-
-   OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
-   OUT_RING(GFX_CMD_CONTEXT_SEL | CS_UPDATE_USE | CS_USE_CTX0);
-   OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
-   OUT_RING(0);
-
-   OUT_RING(GFX_OP_COLOR_CHROMA_KEY);
-   OUT_RING(CC1_UPDATE_KILL_WRITE |
-	    CC1_DISABLE_KILL_WRITE |
-	    CC1_UPDATE_COLOR_IDX |
-	    CC1_UPDATE_CHROMA_LOW | CC1_UPDATE_CHROMA_HI | 0);
-   OUT_RING(0);
-   OUT_RING(0);
-
-/*     OUT_RING( CMD_OP_Z_BUFFER_INFO ); */
-/*     OUT_RING( pI810->DepthBuffer.Start | pI810->auxPitchBits); */
-
-   ADVANCE_LP_RING();
-}
-
diff --git a/src/legacy/i810/i810_xaa.c b/src/legacy/i810/i810_xaa.c
new file mode 100644
index 0000000..600b631
--- /dev/null
+++ b/src/legacy/i810/i810_xaa.c
@@ -0,0 +1,320 @@
+
+/**************************************************************************
+
+Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
+All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sub license, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/*
+ * Authors:
+ *   Keith Whitwell <keith at tungstengraphics.com>
+ *
+ */
+
+#include "xf86.h"
+#include "xaarop.h"
+#include "i810.h"
+
+static void
+I810SetupForMono8x8PatternFill(ScrnInfoPtr pScrn, int pattx, int patty,
+			       int fg, int bg, int rop,
+			       unsigned int planemask)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810SetupFor8x8PatternColorExpand\n");
+
+   /* FULL_MONO_PAT_BLT, p176 */
+   pI810->BR[0] = (BR00_BITBLT_CLIENT | BR00_OP_MONO_PAT_BLT | 0x9);
+   pI810->BR[18] = bg;
+   pI810->BR[19] = fg;
+   pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
+   pI810->BR[13] |= XAAGetPatternROP(rop) << 16;
+   if (bg == -1)
+      pI810->BR[13] |= BR13_MONO_PATN_TRANS;
+}
+
+static void
+I810SubsequentMono8x8PatternFillRect(ScrnInfoPtr pScrn, int pattx, int patty,
+				     int x, int y, int w, int h)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+   int addr =
+	 pI810->bufferOffset + (y * pScrn->displayWidth + x) * pI810->cpp;
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810Subsequent8x8PatternColorExpand\n");
+
+   {
+      BEGIN_LP_RING(12);
+      OUT_RING(pI810->BR[0] | ((y << 5) & BR00_PAT_VERT_ALIGN));
+      OUT_RING(pI810->BR[13]);
+      OUT_RING((h << 16) | (w * pI810->cpp));
+      OUT_RING(addr);
+      OUT_RING(pI810->BR[13] & 0xFFFF);	/* src pitch */
+      OUT_RING(addr);			/* src addr */
+      OUT_RING(0);			/* transparency color */
+      OUT_RING(pI810->BR[18]);		/* bg */
+      OUT_RING(pI810->BR[19]);		/* fg */
+      OUT_RING(pattx);			/* pattern data */
+      OUT_RING(patty);
+      OUT_RING(0);
+      ADVANCE_LP_RING();
+   }
+}
+
+static void
+I810GetNextScanlineColorExpandBuffer(ScrnInfoPtr pScrn)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+   XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
+
+   if (pI810->nextColorExpandBuf == pI810->NumScanlineColorExpandBuffers)
+      I810Sync(pScrn);
+
+   infoPtr->ScanlineColorExpandBuffers[0] =
+	 pI810->ScanlineColorExpandBuffers[pI810->nextColorExpandBuf];
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("using color expand buffer %d\n", pI810->nextColorExpandBuf);
+
+   pI810->nextColorExpandBuf++;
+}
+
+static void
+I810SetupForScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
+					       int fg, int bg, int rop,
+					       unsigned int planemask)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810SetupForScanlineScreenToScreenColorExpand %d %d %x %x\n",
+	     fg, bg, rop, planemask);
+
+   pI810->BR[13] = (pScrn->displayWidth * pI810->cpp);
+   pI810->BR[13] |= XAAGetCopyROP(rop) << 16;
+   pI810->BR[13] |= (1 << 27);
+   if (bg == -1)
+      pI810->BR[13] |= BR13_MONO_TRANSPCY;
+
+   pI810->BR[18] = bg;
+   pI810->BR[19] = fg;
+
+   I810GetNextScanlineColorExpandBuffer(pScrn);
+}
+
+static void
+I810SubsequentScanlineCPUToScreenColorExpandFill(ScrnInfoPtr pScrn,
+						 int x, int y,
+						 int w, int h, int skipleft)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810SubsequentScanlineCPUToScreenColorExpandFill "
+	     "%d,%d %dx%x %d\n", x, y, w, h, skipleft);
+
+   pI810->BR[0] = BR00_BITBLT_CLIENT | BR00_OP_MONO_SRC_COPY_BLT | 0x06;
+   pI810->BR[9] = (pI810->bufferOffset +
+		   (y * pScrn->displayWidth + x) * pI810->cpp);
+   pI810->BR[14] = ((1 << 16) | (w * pI810->cpp));
+   pI810->BR[11] = ((w + 31) / 32) - 1;
+}
+
+static void
+I810SubsequentColorExpandScanline(ScrnInfoPtr pScrn, int bufno)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   pI810->BR[12] = (pI810->AccelInfoRec->ScanlineColorExpandBuffers[0] -
+		    pI810->FbBase);
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810SubsequentColorExpandScanline %d (addr %x)\n",
+	     bufno, pI810->BR[12]);
+
+   {
+      BEGIN_LP_RING(8);
+      OUT_RING(pI810->BR[0]);
+      OUT_RING(pI810->BR[13]);
+      OUT_RING(pI810->BR[14]);
+      OUT_RING(pI810->BR[9]);
+      OUT_RING(pI810->BR[11]);
+      OUT_RING(pI810->BR[12]);		/* srcaddr */
+      OUT_RING(pI810->BR[18]);
+      OUT_RING(pI810->BR[19]);
+      ADVANCE_LP_RING();
+   }
+
+   /* Advance to next scanline.
+    */
+   pI810->BR[9] += pScrn->displayWidth * pI810->cpp;
+   I810GetNextScanlineColorExpandBuffer(pScrn);
+}
+
+/* Emit on gaining VT?
+ */
+#if 0
+static void
+I810EmitInvarientState(ScrnInfoPtr pScrn)
+{
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   BEGIN_LP_RING(10);
+
+   OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+   OUT_RING(GFX_CMD_CONTEXT_SEL | CS_UPDATE_USE | CS_USE_CTX0);
+   OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+   OUT_RING(0);
+
+   OUT_RING(GFX_OP_COLOR_CHROMA_KEY);
+   OUT_RING(CC1_UPDATE_KILL_WRITE |
+	    CC1_DISABLE_KILL_WRITE |
+	    CC1_UPDATE_COLOR_IDX |
+	    CC1_UPDATE_CHROMA_LOW | CC1_UPDATE_CHROMA_HI | 0);
+   OUT_RING(0);
+   OUT_RING(0);
+
+/*     OUT_RING( CMD_OP_Z_BUFFER_INFO ); */
+/*     OUT_RING( pI810->DepthBuffer.Start | pI810->auxPitchBits); */
+
+   ADVANCE_LP_RING();
+}
+#endif
+
+/* The following function sets up the supported acceleration. Call it
+ * from the FbInit() function in the SVGA driver, or before ScreenInit
+ * in a monolithic server.
+ */
+Bool
+I810AccelInit(ScreenPtr pScreen)
+{
+   XAAInfoRecPtr infoPtr;
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
+   I810Ptr pI810 = I810PTR(pScrn);
+
+   if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
+      ErrorF("I810AccelInit\n");
+
+   pI810->AccelInfoRec = infoPtr = XAACreateInfoRec();
+   if (!infoPtr)
+      return FALSE;
+
+   pI810->bufferOffset = 0;
+   infoPtr->Flags = LINEAR_FRAMEBUFFER | OFFSCREEN_PIXMAPS;
+   infoPtr->Flags |= PIXMAP_CACHE;
+
+   /* Sync
+    */
+   infoPtr->Sync = I810Sync;
+
+   /* Solid filled rectangles
+    */
+   {
+      infoPtr->SolidFillFlags = NO_PLANEMASK;
+      infoPtr->SetupForSolidFill = I810SetupForSolidFill;
+      infoPtr->SubsequentSolidFillRect = I810SubsequentSolidFillRect;
+   }
+
+   /* Screen to screen copy
+    *   - the transparency op hangs the blit engine, disable for now.
+    */
+   {
+      infoPtr->ScreenToScreenCopyFlags = (0
+					  | NO_PLANEMASK
+					  | NO_TRANSPARENCY | 0);
+
+      infoPtr->SetupForScreenToScreenCopy = I810SetupForScreenToScreenCopy;
+      infoPtr->SubsequentScreenToScreenCopy =
+	    I810SubsequentScreenToScreenCopy;
+   }
+
+   /* 8x8 pattern fills
+    */
+   {
+      infoPtr->SetupForMono8x8PatternFill = I810SetupForMono8x8PatternFill;
+      infoPtr->SubsequentMono8x8PatternFillRect =
+	    I810SubsequentMono8x8PatternFillRect;
+
+      infoPtr->Mono8x8PatternFillFlags = (HARDWARE_PATTERN_PROGRAMMED_BITS |
+					  HARDWARE_PATTERN_SCREEN_ORIGIN |
+					  BIT_ORDER_IN_BYTE_MSBFIRST |
+					  NO_PLANEMASK | 0);
+   }
+
+   /* 8x8 color fills - not considered useful for XAA.
+    */
+
+   /* Scanline color expansion - Use the same scheme as the 3.3 driver.
+    *
+    */
+   if (pI810->Scratch.Size != 0) {
+      int i;
+      int width = ALIGN(pScrn->displayWidth, 32) / 8;
+      int nr_buffers = pI810->Scratch.Size / width;
+      unsigned char *ptr = pI810->FbBase + pI810->Scratch.Start;
+
+      pI810->NumScanlineColorExpandBuffers = nr_buffers;
+      pI810->ScanlineColorExpandBuffers = (unsigned char **)
+	    xnfcalloc(nr_buffers, sizeof(unsigned char *));
+
+      for (i = 0; i < nr_buffers; i++, ptr += width)
+	 pI810->ScanlineColorExpandBuffers[i] = ptr;
+
+      infoPtr->ScanlineCPUToScreenColorExpandFillFlags = (NO_PLANEMASK |
+							  ROP_NEEDS_SOURCE |
+							  BIT_ORDER_IN_BYTE_MSBFIRST
+							  | 0);
+
+      infoPtr->ScanlineColorExpandBuffers = (unsigned char **)
+	    xnfcalloc(1, sizeof(unsigned char *));
+      infoPtr->NumScanlineColorExpandBuffers = 1;
+
+      infoPtr->ScanlineColorExpandBuffers[0] =
+	    pI810->ScanlineColorExpandBuffers[0];
+      pI810->nextColorExpandBuf = 0;
+
+      infoPtr->SetupForScanlineCPUToScreenColorExpandFill =
+	    I810SetupForScanlineCPUToScreenColorExpandFill;
+
+      infoPtr->SubsequentScanlineCPUToScreenColorExpandFill =
+	    I810SubsequentScanlineCPUToScreenColorExpandFill;
+
+      infoPtr->SubsequentColorExpandScanline =
+	    I810SubsequentColorExpandScanline;
+   }
+
+   /* Possible todo: Image writes w/ non-GXCOPY rop.
+    */
+
+   I810SelectBuffer(pScrn, I810_SELECT_FRONT);
+
+   return XAAInit(pScreen, infoPtr);
+}
commit 53ff19f45a3cc4863845c23e8d3c2c2b95e03fd9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 19:40:16 2012 +0100

    sna: Allow wedged CopyPlane to operate inplace on the destination
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4321c21..d4b9f37 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6051,7 +6051,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, dst, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(dst, &region,
-					     MOVE_READ | MOVE_WRITE))
+					     drawable_gc_flags(dst, gc, false)))
 		goto out_gc;
 
 	DBG(("%s: fbCopyPlane(%d, %d, %d, %d, %d,%d) %x\n",
commit d4fa4d5494db45b227c9ae7f7a90cd5dfd940027
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 19:29:32 2012 +0100

    sna: Allow inplace copies for wedged CopyArea
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7229f36..4321c21 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4584,7 +4584,8 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (!sna_gc_move_to_cpu(gc, dst, &region))
 			goto out;
 
-		if (!sna_drawable_move_region_to_cpu(dst, &region, MOVE_READ | MOVE_WRITE))
+		if (!sna_drawable_move_region_to_cpu(dst, &region,
+						     drawable_gc_flags(dst, gc, false)))
 			goto out_gc;
 
 		RegionTranslate(&region,
commit 217eeadf81a8cbb43e495e1e937acdd95c703377
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 19:35:06 2012 +0100

    sna: Allow operation inplace to scanout whilst wedged
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ebf7a23..7229f36 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1048,7 +1048,7 @@ static inline bool pixmap_inplace(struct sna *sna,
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
-	if (wedged(sna))
+	if (wedged(sna) && !priv->pinned)
 		return false;
 
 	if (priv->mapped)
@@ -1455,7 +1455,7 @@ static inline bool region_inplace(struct sna *sna,
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
-	if (wedged(sna))
+	if (wedged(sna) && !priv->pinned)
 		return false;
 
 	if (priv->cpu) {
commit 40ff29480a0dbf458adf1a1b0d3275ad1361530e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 18:38:49 2012 +0100

    sna: Tweak fast blt path
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbblt.c b/src/sna/fb/fbblt.c
index 247a331..fd55c85 100644
--- a/src/sna/fb/fbblt.c
+++ b/src/sna/fb/fbblt.c
@@ -276,6 +276,7 @@ fbBlt(FbBits *srcLine, FbStride srcStride, int srcX,
 	if (alu == GXcopy && pm == FB_ALLONES && ((srcX|dstX|width) & 7) == 0) {
 		CARD8 *s = (CARD8 *) srcLine;
 		CARD8 *d = (CARD8 *) dstLine;
+		void *(*func)(void *, const void *, size_t);
 		int i;
 
 		srcStride *= sizeof(FbBits);
@@ -287,28 +288,24 @@ fbBlt(FbBits *srcLine, FbStride srcStride, int srcX,
 		DBG(("%s fast blt\n", __FUNCTION__));
 
 		if ((srcLine < dstLine && srcLine + width > dstLine) ||
-		    (dstLine < srcLine && dstLine + width > srcLine)) {
-			if (!upsidedown)
-				for (i = 0; i < height; i++)
-					memmove(d + i * dstStride,
-						s + i * srcStride,
-						width);
-			else
-				for (i = height - 1; i >= 0; i--)
-					memmove(d + i * dstStride,
-						s + i * srcStride,
-						width);
+		    (dstLine < srcLine && dstLine + width > srcLine))
+			func = memmove;
+		else
+			func = memcpy;
+		if (!upsidedown) {
+			if (srcStride == dstStride && srcStride == width) {
+				width *= height;
+				height = 1;
+			}
+			for (i = 0; i < height; i++)
+				func(d + i * dstStride,
+				     s + i * srcStride,
+				     width);
 		} else {
-			if (!upsidedown)
-				for (i = 0; i < height; i++)
-					memcpy(d + i * dstStride,
-					       s + i * srcStride,
-					       width);
-			else
-				for (i = height - 1; i >= 0; i--)
-					memcpy(d + i * dstStride,
-					       s + i * srcStride,
-					       width);
+			for (i = height; i--; )
+				func(d + i * dstStride,
+				     s + i * srcStride,
+				     width);
 		}
 
 		return;
commit fce69c79c4840e7863d7c382da0d22be90a9f19a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 18:28:24 2012 +0100

    sna: prefer fbBlt over pixman_blt
    
    It is currently much better optimised through memcpy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbcopy.c b/src/sna/fb/fbcopy.c
index a2b1ded..a486a5b 100644
--- a/src/sna/fb/fbcopy.c
+++ b/src/sna/fb/fbcopy.c
@@ -35,42 +35,27 @@ fbCopyNtoN(DrawablePtr src_drawable, DrawablePtr dst_drawable, GCPtr gc,
 {
 	CARD8 alu = gc ? gc->alu : GXcopy;
 	FbBits pm = gc ? fb_gc(gc)->pm : FB_ALLONES;
-	FbBits *src;
-	FbStride srcStride;
-	int srcBpp;
+	FbBits *src, *dst;
+	FbStride srcStride, dstStride;
+	int dstBpp, srcBpp;
 	int srcXoff, srcYoff;
-	FbBits *dst;
-	FbStride dstStride;
-	int dstBpp;
 	int dstXoff, dstYoff;
 
 	fbGetDrawable(src_drawable, src, srcStride, srcBpp, srcXoff, srcYoff);
 	fbGetDrawable(dst_drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
 
-	while (nbox--) {
-		if (pm == FB_ALLONES && alu == GXcopy && !reverse && !upsidedown) {
-			if (!pixman_blt
-			    ((uint32_t *) src, (uint32_t *) dst, srcStride, dstStride,
-			     srcBpp, dstBpp, (box->x1 + dx + srcXoff),
-			     (box->y1 + dy + srcYoff), (box->x1 + dstXoff),
-			     (box->y1 + dstYoff), (box->x2 - box->x1),
-			     (box->y2 - box->y1)))
-				goto fallback;
-			else
-				goto next;
-		}
-fallback:
-		fbBlt(src + (box->y1 + dy + srcYoff) * srcStride,
-		      srcStride,
-		      (box->x1 + dx + srcXoff) * srcBpp,
-		      dst + (box->y1 + dstYoff) * dstStride,
-		      dstStride,
+	src += (dy + srcYoff) * srcStride;
+	srcXoff += dx;
+	dst += dstYoff * dstStride;
+	do {
+		fbBlt(src + box->y1 * srcStride, srcStride,
+		      (box->x1 + srcXoff) * srcBpp,
+		      dst + box->y1 * dstStride, dstStride,
 		      (box->x1 + dstXoff) * dstBpp,
 		      (box->x2 - box->x1) * dstBpp,
-		      (box->y2 - box->y1), alu, pm, dstBpp, reverse, upsidedown);
-next:
-		box++;
-	}
+		      (box->y2 - box->y1),
+		      alu, pm, dstBpp, reverse, upsidedown);
+	} while (box++, --nbox);
 }
 
 void
commit c29f96d50839388377ad57c6366f9bc7ad8b9d0a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 13:02:51 2012 +0100

    sna/gen7: Bump the number of pixel shader threads for IVB GT2
    
    Spotted-by: Kilarski, Bernard R" <bernard.r.kilarski at intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 1e10cb0..f8036fb 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -83,7 +83,7 @@ static const struct gt_info gt1_info = {
 static const struct gt_info gt2_info = {
 	.max_vs_threads = 128,
 	.max_gs_threads = 128,
-	.max_wm_threads = 86,
+	.max_wm_threads = 172,
 	.urb = { 256, 704, 320 },
 };
 
commit 799bae9e8ff53fb1b5c74f3278d530a58d66de9a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 17:07:10 2012 +0100

    sna/dri: Do not allow an exchange to take place on invalid buffers
    
    If the SwapBuffers is called after we have resized a Window but before
    the client has processed the Invalidate notification, then the
    SwapBuffers will be referring to a pair of stale buffers. As the buffers
    are no longer attached to the Pixmap, we can not simply exchange them.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index d369871..65c3550 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1014,6 +1014,12 @@ can_exchange(struct sna * sna,
 		return false;
 	}
 
+	if (sna_pixmap_get_buffer(pixmap) != front) {
+		DBG(("%s: no, DRI2 drawable is no longer attached\n",
+		     __FUNCTION__));
+		return false;
+	}
+
 	return true;
 }
 
@@ -1046,13 +1052,21 @@ sna_dri_exchange_buffers(DrawablePtr draw,
 	back_bo = get_private(back)->bo;
 	front_bo = get_private(front)->bo;
 
-	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
-	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
-
-	DBG(("%s: exchange front=%d/%d and back=%d/%d\n",
+	DBG(("%s: exchange front=%d/%d and back=%d/%d, pixmap=%ld %x%d\n",
 	     __FUNCTION__,
 	     front_bo->handle, front->name,
-	     back_bo->handle, back->name));
+	     back_bo->handle, back->name,
+	     pixmap->drawable.serialNumber,
+	     pixmap->drawable.width,
+	     pixmap->drawable.height));
+
+	DBG(("%s: back_bo pitch=%d, size=%d\n",
+	     __FUNCTION__, back_bo->pitch, kgem_bo_size(back_bo)));
+	DBG(("%s: front_bo pitch=%d, size=%d\n",
+	     __FUNCTION__, front_bo->pitch, kgem_bo_size(front_bo)));
+
+	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
+	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
 
 	set_bo(pixmap, back_bo);
 
commit 067aeaddb8047f01ae3a20b26ba0acf5ba2d035f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 15:33:19 2012 +0100

    sna: Rebalance choice of GPU vs CPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index ae95878..cb40cd3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -301,6 +301,12 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 		return NULL;
 	}
 
+	if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
+		DBG(("%s: promoting snooped CPU bo due to reuse\n",
+		     __FUNCTION__));
+		return NULL;
+	}
+
 	if (priv->gpu_bo) {
 		switch (sna_damage_contains_box(priv->cpu_damage, box)) {
 		case PIXMAN_REGION_OUT:
@@ -321,54 +327,43 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 			}
 			break;
 		}
-
-		if (!blt &&
-		    priv->gpu_bo->tiling != I915_TILING_NONE &&
-		    (priv->cpu_bo->vmap || priv->cpu_bo->pitch >= 4096)) {
-			DBG(("%s: GPU bo exists and is tiled [%d], upload\n",
-			     __FUNCTION__, priv->gpu_bo->tiling));
-			return NULL;
-		}
 	}
 
-	if (blt) {
-		if (priv->cpu_bo->vmap && priv->source_count++ > SOURCE_BIAS) {
-			DBG(("%s: promoting snooped CPU bo due to BLT reuse\n",
-			     __FUNCTION__));
-			return NULL;
-		}
-	} else {
+	if (!blt) {
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
-		if (priv->cpu_bo->pitch >= 4096) {
-			DBG(("%s: promoting snooped CPU bo due to TLB miss\n",
-			     __FUNCTION__));
-			return NULL;
-		}
+		if (w < pixmap->drawable.width ||
+		    h < pixmap->drawable.height ||
+		    priv->source_count != SOURCE_BIAS) {
+			bool want_tiling;
 
-		if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
-			DBG(("%s: promoting snooped CPU bo due to reuse\n",
-			     __FUNCTION__));
-			return NULL;
-		}
+			if (priv->cpu_bo->pitch >= 4096) {
+				DBG(("%s: promoting snooped CPU bo due to TLB miss\n",
+				     __FUNCTION__));
+				return NULL;
+			}
 
-		if (priv->source_count*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
-		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem,
-							    blt ? I915_TILING_X : I915_TILING_Y,
-							    pixmap->drawable.width,
-							    pixmap->drawable.height,
-							    pixmap->drawable.bitsPerPixel)) {
-			DBG(("%s: pitch (%d) requires tiling\n",
-			     __FUNCTION__, priv->cpu_bo->pitch));
-			return NULL;
+			if (priv->gpu_bo)
+				want_tiling = priv->gpu_bo->tiling != I915_TILING_NONE;
+			else
+				want_tiling = kgem_choose_tiling(&sna->kgem,
+								 I915_TILING_Y,
+								 pixmap->drawable.width,
+								 pixmap->drawable.height,
+								 pixmap->drawable.bitsPerPixel) != I915_TILING_NONE;
+			if (want_tiling &&
+			    priv->source_count*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height) {
+				DBG(("%s: pitch (%d) requires tiling\n",
+				     __FUNCTION__, priv->cpu_bo->pitch));
+				return NULL;
+			}
 		}
-
-		++priv->source_count;
 	}
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+	++priv->source_count;
 	return priv->cpu_bo;
 }
 
commit 7ebeea3f5c71959773478de44b08a967fe5acc8b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 15:10:43 2012 +0100

    sna: Avoid the CPU bo readback for render paths
    
    As we exclude using the CPU bo if there is overlapping GPU damage, we
    can forgo the call to keep the transfer the damage.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 6fb9fe3..ae95878 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -369,9 +369,6 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
-	if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_ASYNC_HINT))
-		return NULL;
-
 	return priv->cpu_bo;
 }
 
commit ed8c729ed02705fd03be1ab22a94b5aae13567c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 11:21:30 2012 +0100

    sna: Catch the short-circuit path for clearing clear on move-to-gpu as well
    
    I thought the short-circuit path was only taken when already clear, I
    was wrong.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c4b6aba..ebf7a23 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2766,9 +2766,10 @@ done:
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
+
+active:
 	if (flags & MOVE_WRITE)
 		priv->clear = false;
-active:
 	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
 	return sna_pixmap_mark_active(sna, priv);
 }
commit 359b9cc82de13b0ac89692896ac6104ff3be308b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 10:26:27 2012 +0100

    sna: Limit the use of snoopable buffers to read/write uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d30b8e7..4c6ca57 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3830,10 +3830,13 @@ use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 {
 	assert(kgem->gen != 40);
 
-	if (kgem->gen < 30)
-		return flags & KGEM_BUFFER_WRITE;
+	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE)
+		return true;
 
-	return true;
+	if ((flags & KGEM_BUFFER_WRITE) == 0)
+		return kgem->gen >= 30;
+
+	return false;
 }
 
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
@@ -4077,71 +4080,74 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	/* Be more parsimonious with pwrite/pread buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
 		alloc = NUM_PAGES(size);
-	flags &= ~KGEM_BUFFER_INPLACE;
 
-	if (kgem->has_cache_level && use_snoopable_buffer(kgem, flags)) {
-		uint32_t handle;
+	if (use_snoopable_buffer(kgem, flags)) {
+		if (kgem->has_cache_level) {
+			uint32_t handle;
 
-		handle = gem_create(kgem->fd, alloc);
-		if (handle == 0)
-			return NULL;
+			handle = gem_create(kgem->fd, alloc);
+			if (handle == 0)
+				return NULL;
 
-		if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
-			gem_close(kgem->fd, handle);
-			return NULL;
-		}
+			if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
+				gem_close(kgem->fd, handle);
+				return NULL;
+			}
 
-		bo = malloc(sizeof(*bo));
-		if (bo == NULL) {
-			gem_close(kgem->fd, handle);
-			return NULL;
-		}
+			bo = malloc(sizeof(*bo));
+			if (bo == NULL) {
+				gem_close(kgem->fd, handle);
+				return NULL;
+			}
 
-		debug_alloc(kgem, alloc);
-		__kgem_bo_init(&bo->base, handle, alloc);
-		DBG(("%s: created handle=%d for buffer\n",
-		     __FUNCTION__, bo->base.handle));
+			debug_alloc(kgem, alloc);
+			__kgem_bo_init(&bo->base, handle, alloc);
+			DBG(("%s: created handle=%d for buffer\n",
+			     __FUNCTION__, bo->base.handle));
 
-		bo->base.reusable = false;
-		bo->base.vmap = true;
+			bo->base.reusable = false;
+			bo->base.vmap = true;
 
-		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem) {
-			bo->mmapped = true;
-			bo->need_io = false;
-			bo->base.io = true;
-			goto init;
-		} else {
-			bo->base.refcnt = 0; /* for valgrind */
-			kgem_bo_free(kgem, &bo->base);
-			bo = NULL;
+			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+			if (bo->mem) {
+				bo->mmapped = true;
+				bo->need_io = false;
+				bo->base.io = true;
+				goto init;
+			} else {
+				bo->base.refcnt = 0; /* for valgrind */
+				kgem_bo_free(kgem, &bo->base);
+				bo = NULL;
+			}
 		}
-	}
 
-	if (kgem->has_vmap && use_snoopable_buffer(kgem, flags)) {
-		bo = partial_bo_alloc(alloc);
-		if (bo) {
-			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
-						   alloc * PAGE_SIZE, false);
-			if (handle == 0 ||
-			    !__kgem_bo_init(&bo->base, handle, alloc)) {
-				free(bo);
-				bo = NULL;
-			} else {
-				DBG(("%s: created vmap handle=%d for buffer\n",
-				     __FUNCTION__, bo->base.handle));
+		if (kgem->has_vmap) {
+			bo = partial_bo_alloc(alloc);
+			if (bo) {
+				uint32_t handle = gem_vmap(kgem->fd, bo->mem,
+							   alloc * PAGE_SIZE, false);
+				if (handle == 0 ||
+				    !__kgem_bo_init(&bo->base, handle, alloc)) {
+					free(bo);
+					bo = NULL;
+				} else {
+					DBG(("%s: created vmap handle=%d for buffer\n",
+					     __FUNCTION__, bo->base.handle));
 
-				bo->base.io = true;
-				bo->base.vmap = true;
-				bo->base.map = MAKE_VMAP_MAP(bo);
-				bo->mmapped = true;
-				bo->need_io = false;
+					bo->base.io = true;
+					bo->base.vmap = true;
+					bo->base.map = MAKE_VMAP_MAP(bo);
+					bo->mmapped = true;
+					bo->need_io = false;
 
-				goto init;
+					goto init;
+				}
 			}
 		}
 	}
 
+	flags &= ~KGEM_BUFFER_INPLACE;
+
 	old = NULL;
 	if ((flags & KGEM_BUFFER_WRITE) == 0)
 		old = search_linear_cache(kgem, alloc, 0);
commit 4f21dba6ee505217d63edd84611622e05aeb4593
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 09:26:46 2012 +0100

    sna: Only drop the clear flag when writing to the GPU pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c4a43a1..c4b6aba 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2103,7 +2103,6 @@ sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 	if (!priv->pinned && priv->gpu_bo->proxy == NULL &&
 	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
-	priv->clear = false;
 	priv->cpu = false;
 	return priv;
 }
@@ -2282,15 +2281,16 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	}
 
 done:
-	if (priv->cpu_damage == NULL &&
-	    flags & MOVE_WRITE &&
-	    box_inplace(pixmap, box)) {
-		DBG(("%s: large operation on undamaged, promoting to full GPU\n",
-		     __FUNCTION__));
-		sna_damage_all(&priv->gpu_damage,
-			       pixmap->drawable.width,
-			       pixmap->drawable.height);
-		priv->undamaged = false;
+	if (flags & MOVE_WRITE) {
+		priv->clear = false;
+		if (priv->cpu_damage == NULL && box_inplace(pixmap, box)) {
+			DBG(("%s: large operation on undamaged, promoting to full GPU\n",
+			     __FUNCTION__));
+			sna_damage_all(&priv->gpu_damage,
+				       pixmap->drawable.width,
+				       pixmap->drawable.height);
+			priv->undamaged = false;
+		}
 	}
 
 	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
@@ -2766,6 +2766,8 @@ done:
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
+	if (flags & MOVE_WRITE)
+		priv->clear = false;
 active:
 	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
 	return sna_pixmap_mark_active(sna, priv);
commit fbfbbee8288aba1e4754fd2dbc02e71f5e118cda
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 09:20:21 2012 +0100

    sna: Fix glyph DBG to include clip extents and actual glyph origin
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 308f52f..c4a43a1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10942,6 +10942,12 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
+
+	DBG(("%s: glyph clip box (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     extents->x1, extents->y1,
+	     extents->x2, extents->y2));
+
 	b = sna->kgem.batch + sna->kgem.nbatch;
 	b[0] = XY_SETUP_BLT | 3 << 20;
 	b[1] = bo->pitch;
@@ -10984,12 +10990,12 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				goto skip;
 
 			len = (w8 * h + 7) >> 3 << 1;
-			DBG(("%s glyph: (%d, %d) x (%d[%d], %d), len=%d\n" ,__FUNCTION__,
-			     x,y, w, w8, h, len));
-
 			x1 = x + c->metrics.leftSideBearing;
 			y1 = y - c->metrics.ascent;
 
+			DBG(("%s glyph: (%d, %d) -> (%d, %d) x (%d[%d], %d), len=%d\n" ,__FUNCTION__,
+			     x,y, x1, y1, w, w8, h, len));
+
 			if (x1 >= extents->x2 || y1 >= extents->y2)
 				goto skip;
 			if (x1 + w <= extents->x1 || y1 + h <= extents->y1)
@@ -11000,6 +11006,11 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
 
+				DBG(("%s: new batch, glyph clip box (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     extents->x1, extents->y1,
+				     extents->x2, extents->y2));
+
 				b = sna->kgem.batch + sna->kgem.nbatch;
 				b[0] = XY_SETUP_BLT | 3 << 20;
 				b[1] = bo->pitch;
@@ -11059,6 +11070,11 @@ skip:
 			b = sna->kgem.batch + sna->kgem.nbatch;
 			sna->kgem.nbatch += 3;
 
+			DBG(("%s: glyph clip box (%d, %d), (%d, %d)\n",
+			     __FUNCTION__,
+			     extents->x1, extents->y1,
+			     extents->x2, extents->y2));
+
 			b[0] = XY_SETUP_CLIP;
 			b[1] = extents->y1 << 16 | extents->x1;
 			b[2] = extents->y2 << 16 | extents->x2;
@@ -11637,6 +11653,11 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
+
+	DBG(("%s: glyph clip box (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     extents->x1, extents->y1,
+	     extents->x2, extents->y2));
 	b = sna->kgem.batch + sna->kgem.nbatch;
 	b[0] = XY_SETUP_BLT | 1 << 20;
 	b[1] = bo->pitch;
@@ -11675,12 +11696,12 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				goto skip;
 
 			len = (w8 * h + 7) >> 3 << 1;
-			DBG(("%s glyph: (%d, %d) x (%d[%d], %d), len=%d\n" ,__FUNCTION__,
-			     x,y, w, w8, h, len));
-
 			x1 = x + c->metrics.leftSideBearing;
 			y1 = y - c->metrics.ascent;
 
+			DBG(("%s glyph: (%d, %d) -> (%d, %d) x (%d[%d], %d), len=%d\n" ,__FUNCTION__,
+			     x,y, x1, y1, w, w8, h, len));
+
 			if (x1 >= extents->x2 || y1 >= extents->y2 ||
 			    x1 + w <= extents->x1 || y1 + h <= extents->y1) {
 				DBG(("%s: glyph is clipped (%d, %d)x(%d,%d) against extents (%d, %d), (%d, %d)\n",
@@ -11713,6 +11734,11 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
 
+				DBG(("%s: new batch, glyph clip box (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     extents->x1, extents->y1,
+				     extents->x2, extents->y2));
+
 				b = sna->kgem.batch + sna->kgem.nbatch;
 				b[0] = XY_SETUP_BLT | 1 << 20;
 				b[1] = bo->pitch;
@@ -11778,6 +11804,11 @@ skip:
 			b = sna->kgem.batch + sna->kgem.nbatch;
 			sna->kgem.nbatch += 3;
 
+			DBG(("%s: glyph clip box (%d, %d), (%d, %d)\n",
+			     __FUNCTION__,
+			     extents->x1, extents->y1,
+			     extents->x2, extents->y2));
+
 			b[0] = XY_SETUP_CLIP;
 			b[1] = extents->y1 << 16 | extents->x1;
 			b[2] = extents->y2 << 16 | extents->x2;
commit f0ed0ca234a4bed986824845ff70e8554c0e579f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 17 08:35:20 2012 +0100

    sna: Promote an undamaged pixmap to use the full GPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6c069ee..308f52f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2135,12 +2135,16 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	sna_damage_reduce(&priv->cpu_damage);
 	assert_pixmap_damage(pixmap);
 
+	if (priv->cpu_damage == NULL) {
+		priv->undamaged = false;
+		list_del(&priv->list);
+		return sna_pixmap_move_to_gpu(pixmap, flags);
+	}
+
 	if (priv->gpu_bo == NULL) {
 		unsigned create, tiling;
 
-		create = 0;
-		if (priv->cpu_damage)
-			create |= CREATE_INACTIVE;
+		create = CREATE_INACTIVE;
 		if (pixmap->usage_hint == SNA_CREATE_FB)
 			create |= CREATE_EXACT | CREATE_SCANOUT;
 
@@ -2156,22 +2160,9 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			return false;
 
 		DBG(("%s: created gpu bo\n", __FUNCTION__));
-
-		if (flags & MOVE_WRITE && priv->cpu_damage == NULL) {
-			sna_damage_all(&priv->gpu_damage,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height);
-			list_del(&priv->list);
-			goto done;
-		}
 	}
 	assert(priv->gpu_bo->proxy == NULL);
 
-	if (priv->cpu_damage == NULL) {
-		list_del(&priv->list);
-		goto done;
-	}
-
 	if (priv->mapped) {
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
commit 1f79e877fb6602bd0f9dd14ac9c3511f3b7044fb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 21:18:24 2012 +0100

    sna: Share the pixmap migration decision with the BLT composite routines
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d7ecb00..6c069ee 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -425,8 +425,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 						  pixmap->drawable.bitsPerPixel,
 						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
 		if (priv->cpu_bo) {
-			DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
-			     priv->cpu_bo->handle));
+			DBG(("%s: allocated CPU handle=%d (vmap? %d)\n", __FUNCTION__,
+			     priv->cpu_bo->handle, priv->cpu_bo->vmap));
 
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
 			priv->stride = priv->cpu_bo->pitch;
@@ -525,7 +525,17 @@ static inline uint32_t default_tiling(PixmapPtr pixmap,
 	if (sna->kgem.gen == 21)
 		return I915_TILING_X;
 
-	if (sna_damage_is_all(&priv->cpu_damage,
+	/* Only on later generations was the render pipeline
+	 * more flexible than the BLT. So on gen2/3, prefer to
+	 * keep large objects accessible through the BLT.
+	 */
+	if (sna->kgem.gen < 40 &&
+	    (pixmap->drawable.width  > sna->render.max_3d_size ||
+	     pixmap->drawable.height > sna->render.max_3d_size))
+		return I915_TILING_X;
+
+	if (tiling == I915_TILING_Y &&
+	    sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
 		DBG(("%s: entire source is damaged, using Y-tiling\n",
@@ -533,15 +543,6 @@ static inline uint32_t default_tiling(PixmapPtr pixmap,
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = false;
 
-		/* Only on later generations was the render pipeline
-		 * more flexible than the BLT. So on gen2/3, prefer to
-		 * keep large objects accessible through the BLT.
-		 */
-		if (sna->kgem.gen < 40 &&
-		    (pixmap->drawable.width > sna->render.max_3d_size ||
-		     pixmap->drawable.height > sna->render.max_3d_size))
-			return I915_TILING_X;
-
 		return I915_TILING_Y;
 	}
 
@@ -1089,7 +1090,8 @@ static inline bool use_cpu_bo_for_download(struct sna *sna,
 	return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
 }
 
-static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
+static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv,
+					 unsigned flags)
 {
 	if (DBG_NO_CPU_UPLOAD)
 		return false;
@@ -1097,6 +1099,14 @@ static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
 	if (priv->cpu_bo == NULL)
 		return false;
 
+	DBG(("%s? flags=%x, gpu busy?=%d, cpu busy?=%d\n", __FUNCTION__,
+	     flags,
+	     kgem_bo_is_busy(priv->gpu_bo),
+	     kgem_bo_is_busy(priv->cpu_bo)));
+
+	if (flags & (MOVE_WRITE | MOVE_ASYNC_HINT))
+		return true;
+
 	return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
 }
 
@@ -2135,14 +2145,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			create |= CREATE_EXACT | CREATE_SCANOUT;
 
 		tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+		tiling = sna_pixmap_choose_tiling(pixmap, tiling);
 
 		priv->gpu_bo = kgem_create_2d(&sna->kgem,
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap,
-								       tiling),
-					      create);
+					      tiling, create);
 		if (priv->gpu_bo == NULL)
 			return false;
 
@@ -2182,7 +2191,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		if (n) {
 			bool ok = false;
 
-			if (use_cpu_bo_for_upload(priv)) {
+			if (use_cpu_bo_for_upload(priv, 0)) {
 				DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -2222,7 +2231,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		bool ok = false;
-		if (use_cpu_bo_for_upload(priv)) {
+		if (use_cpu_bo_for_upload(priv, 0)) {
 			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2253,7 +2262,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 		box = REGION_RECTS(&i);
 		ok = false;
-		if (use_cpu_bo_for_upload(priv)) {
+		if (use_cpu_bo_for_upload(priv, 0)) {
 			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2641,17 +2650,25 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		     priv->create));
 		assert(!priv->mapped);
 		if (flags & __MOVE_FORCE || priv->create & KGEM_CAN_CREATE_GPU) {
+			unsigned create, tiling;
+
 			assert(pixmap->drawable.width > 0);
 			assert(pixmap->drawable.height > 0);
 			assert(pixmap->drawable.bitsPerPixel >= 8);
+
+			tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+			tiling = sna_pixmap_choose_tiling(pixmap, tiling);
+
+			create = 0;
+			if (priv->cpu_damage && priv->cpu_bo == NULL)
+				create = CREATE_GTT_MAP | CREATE_INACTIVE;
+
 			priv->gpu_bo =
 				kgem_create_2d(&sna->kgem,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel,
-					       sna_pixmap_choose_tiling(pixmap,
-									DEFAULT_TILING),
-					       (priv->cpu_damage && priv->cpu_bo == NULL) ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
+					       tiling, create);
 		}
 		if (priv->gpu_bo == NULL) {
 			DBG(("%s: not creating GPU bo\n", __FUNCTION__));
@@ -2698,8 +2715,11 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
+		if (DAMAGE_IS_ALL(priv->cpu_damage))
+			flags |= MOVE_ASYNC_HINT;
+
 		ok = false;
-		if (use_cpu_bo_for_upload(priv)) {
+		if (use_cpu_bo_for_upload(priv, flags)) {
 			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -3055,7 +3075,8 @@ static bool upload_inplace(struct sna *sna,
 	}
 
 	if (priv->gpu_bo) {
-		assert(priv->gpu_bo->proxy == NULL);
+		if (priv->gpu_bo->proxy)
+			return false;
 
 		if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) {
 			DBG(("%s? no, GPU bo not mappable\n", __FUNCTION__));
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 80fad6d..ff8e3eb 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1138,20 +1138,20 @@ blt_composite_copy_boxes_with_alpha(struct sna *sna,
 static bool
 prepare_blt_copy(struct sna *sna,
 		 struct sna_composite_op *op,
+		 struct kgem_bo *bo,
 		 uint32_t alpha_fixup)
 {
 	PixmapPtr src = op->u.blt.src_pixmap;
-	struct sna_pixmap *priv = sna_pixmap(src);
 
-	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
+	if (!kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
 		return false;
 	}
 
-	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
+	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
 		if (!kgem_check_many_bo_fenced(&sna->kgem,
-					       op->dst.bo, priv->gpu_bo, NULL)) {
+					       op->dst.bo, bo, NULL)) {
 			DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
 			return false;
 		}
@@ -1170,9 +1170,7 @@ prepare_blt_copy(struct sna *sna,
 		op->box   = blt_composite_copy_box_with_alpha;
 		op->boxes = blt_composite_copy_boxes_with_alpha;
 
-		if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
-					      priv->gpu_bo,
-					      op->dst.bo,
+		if (!sna_blt_alpha_fixup_init(sna, &op->u.blt, bo, op->dst.bo,
 					      src->drawable.bitsPerPixel,
 					      alpha_fixup))
 			return false;
@@ -1181,15 +1179,13 @@ prepare_blt_copy(struct sna *sna,
 		op->box   = blt_composite_copy_box;
 		op->boxes = blt_composite_copy_boxes;
 
-		if (!sna_blt_copy_init(sna, &op->u.blt,
-				       priv->gpu_bo,
-				       op->dst.bo,
+		if (!sna_blt_copy_init(sna, &op->u.blt, bo, op->dst.bo,
 				       src->drawable.bitsPerPixel,
 				       GXcopy))
 			return false;
 	}
 
-	return begin_blt(sna, op);
+	return true;
 }
 
 fastcall static void
@@ -1434,119 +1430,33 @@ prepare_blt_put(struct sna *sna,
 		uint32_t alpha_fixup)
 {
 	PixmapPtr src = op->u.blt.src_pixmap;
-	struct sna_pixmap *priv;
-	struct kgem_bo *src_bo;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	op->done = nop_done;
+	if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
+		return false;
 
-	src_bo = NULL;
-	priv = sna_pixmap(src);
-	if (priv)
-		src_bo = priv->cpu_bo;
-	if (src_bo) {
-		if (alpha_fixup) {
-			op->blt   = blt_composite_copy_with_alpha;
-			op->box   = blt_composite_copy_box_with_alpha;
-			op->boxes = blt_composite_copy_boxes_with_alpha;
-
-			if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
-						      src_bo, op->dst.bo,
-						      op->dst.pixmap->drawable.bitsPerPixel,
-						      alpha_fixup))
-				return false;
-		} else {
-			op->blt   = blt_composite_copy;
-			op->box   = blt_composite_copy_box;
-			op->boxes = blt_composite_copy_boxes;
-
-			if (!sna_blt_copy_init(sna, &op->u.blt,
-					       src_bo, op->dst.bo,
-					       op->dst.pixmap->drawable.bitsPerPixel,
-					       GXcopy))
-				return false;
-		}
+	assert(src->devKind);
+	assert(src->devPrivate.ptr);
 
-		return begin_blt(sna, op);
-	} else {
-		if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
-			return false;
+	if (alpha_fixup)
+		return false; /* XXX */
 
-		assert(src->devKind);
-		assert(src->devPrivate.ptr);
-
-		if (alpha_fixup)
-			return false; /* XXX */
-
-		if (alpha_fixup) {
-			op->u.blt.pixel = alpha_fixup;
-			op->blt   = blt_put_composite_with_alpha;
-			op->box   = blt_put_composite_box_with_alpha;
-			op->boxes = blt_put_composite_boxes_with_alpha;
-		} else {
-			op->blt   = blt_put_composite;
-			op->box   = blt_put_composite_box;
-			op->boxes = blt_put_composite_boxes;
-		}
+	if (alpha_fixup) {
+		op->u.blt.pixel = alpha_fixup;
+		op->blt   = blt_put_composite_with_alpha;
+		op->box   = blt_put_composite_box_with_alpha;
+		op->boxes = blt_put_composite_boxes_with_alpha;
+	} else {
+		op->blt   = blt_put_composite;
+		op->box   = blt_put_composite_box;
+		op->boxes = blt_put_composite_boxes;
 	}
+	op->done = nop_done;
 
 	return true;
 }
 
-static bool
-has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
-{
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	BoxRec area;
-
-	if (!priv)
-		return false;
-	if (!priv->gpu_bo)
-		return false;
-
-	if (priv->cpu_damage == NULL)
-		return true;
-	if (priv->cpu_damage->mode == DAMAGE_ALL)
-		return false;
-
-	area.x1 = x;
-	area.y1 = y;
-	area.x2 = x + w;
-	area.y2 = y + h;
-	if (priv->gpu_damage &&
-	    sna_damage_contains_box__no_reduce(priv->gpu_damage, &area))
-		return true;
-
-	return sna_damage_contains_box(priv->cpu_damage,
-				       &area) == PIXMAN_REGION_OUT;
-}
-
-static bool
-has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
-{
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	BoxRec area;
-
-	if (!priv)
-		return true;
-	if (priv->gpu_damage == NULL)
-		return true;
-	if (priv->gpu_damage->mode == DAMAGE_ALL)
-		return false;
-
-	area.x1 = x;
-	area.y1 = y;
-	area.x2 = x + w;
-	area.y2 = y + h;
-	if (priv->cpu_damage &&
-	    sna_damage_contains_box__no_reduce(priv->cpu_damage, &area))
-		return true;
-
-	return sna_damage_contains_box(priv->gpu_damage,
-				       &area) == PIXMAN_REGION_OUT;
-}
-
 static void
 reduce_damage(struct sna_composite_op *op,
 	      int dst_x, int dst_y,
@@ -1592,7 +1502,9 @@ sna_blt_composite(struct sna *sna,
 	PictFormat src_format = src->format;
 	PixmapPtr src_pixmap;
 	struct sna_pixmap *priv;
+	struct kgem_bo *bo;
 	int16_t tx, ty;
+	BoxRec box;
 	uint32_t alpha_fixup;
 	bool was_clear;
 	bool ret;
@@ -1748,13 +1660,15 @@ clear:
 	     __FUNCTION__,
 	     tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
 
-	if (has_gpu_area(src_pixmap, x, y, width, height))
-		ret = prepare_blt_copy(sna, tmp, alpha_fixup);
-	else if (has_cpu_area(src_pixmap, x, y, width, height))
-		ret = prepare_blt_put(sna, tmp, alpha_fixup);
-	else if (sna_pixmap_move_to_gpu(src_pixmap, MOVE_READ))
-		ret = prepare_blt_copy(sna, tmp, alpha_fixup);
-	else
+	ret = false;
+	box.x1 = x;
+	box.y1 = y;
+	box.x2 = x + width;
+	box.y2 = y + height;
+	bo = __sna_render_pixmap_bo(sna, src_pixmap, &box, true);
+	if (bo)
+		ret = prepare_blt_copy(sna, tmp, bo, alpha_fixup);
+	if (!ret)
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
 
 	return ret;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index dcfab91..6fb9fe3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -288,7 +288,7 @@ void no_render_init(struct sna *sna)
 }
 
 static struct kgem_bo *
-use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
+use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box, bool blt)
 {
 	struct sna_pixmap *priv;
 
@@ -322,24 +322,40 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 			break;
 		}
 
-		if (priv->gpu_bo->tiling != I915_TILING_NONE &&
+		if (!blt &&
+		    priv->gpu_bo->tiling != I915_TILING_NONE &&
 		    (priv->cpu_bo->vmap || priv->cpu_bo->pitch >= 4096)) {
 			DBG(("%s: GPU bo exists and is tiled [%d], upload\n",
 			     __FUNCTION__, priv->gpu_bo->tiling));
 			return NULL;
 		}
+	}
+
+	if (blt) {
+		if (priv->cpu_bo->vmap && priv->source_count++ > SOURCE_BIAS) {
+			DBG(("%s: promoting snooped CPU bo due to BLT reuse\n",
+			     __FUNCTION__));
+			return NULL;
+		}
 	} else {
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
+		if (priv->cpu_bo->pitch >= 4096) {
+			DBG(("%s: promoting snooped CPU bo due to TLB miss\n",
+			     __FUNCTION__));
+			return NULL;
+		}
+
 		if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
 			DBG(("%s: promoting snooped CPU bo due to reuse\n",
 			     __FUNCTION__));
 			return NULL;
 		}
 
-		if (priv->source_count++*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
-		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_Y,
+		if (priv->source_count*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
+		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem,
+							    blt ? I915_TILING_X : I915_TILING_Y,
 							    pixmap->drawable.width,
 							    pixmap->drawable.height,
 							    pixmap->drawable.bitsPerPixel)) {
@@ -347,15 +363,20 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 			     __FUNCTION__, priv->cpu_bo->pitch));
 			return NULL;
 		}
+
+		++priv->source_count;
 	}
 
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+	if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_ASYNC_HINT))
+		return NULL;
+
 	return priv->cpu_bo;
 }
 
 static struct kgem_bo *
-move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
+move_to_gpu(PixmapPtr pixmap, const BoxRec *box, bool blt)
 {
 	struct sna_pixmap *priv;
 	int count, w, h;
@@ -390,7 +411,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 
 	if (DBG_FORCE_UPLOAD < 0) {
 		if (!sna_pixmap_force_to_gpu(pixmap,
-					       MOVE_SOURCE_HINT | MOVE_READ))
+					     blt ? MOVE_READ : MOVE_SOURCE_HINT | MOVE_READ))
 			return NULL;
 
 		return priv->gpu_bo;
@@ -407,7 +428,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		     box->x1, box->y1, box->x2, box->y2, priv->source_count,
 		     migrate));
 	} else if (kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-				      I915_TILING_Y, w, h,
+				      blt ? I915_TILING_X : I915_TILING_Y, w, h,
 				      pixmap->drawable.bitsPerPixel) != I915_TILING_NONE) {
 		count = priv->source_count++;
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
@@ -424,7 +445,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 	}
 
 	if (migrate && !sna_pixmap_force_to_gpu(pixmap,
-						MOVE_SOURCE_HINT | MOVE_READ))
+						blt ? MOVE_READ : MOVE_SOURCE_HINT | MOVE_READ))
 		return NULL;
 
 	return priv->gpu_bo;
@@ -465,13 +486,11 @@ static struct kgem_bo *upload(struct sna *sna,
 				      pixmap->devPrivate.ptr, box,
 				      pixmap->devKind,
 				      pixmap->drawable.bitsPerPixel);
-	if (bo) {
+	if (channel && bo) {
 		channel->width  = box->x2 - box->x1;
 		channel->height = box->y2 - box->y1;
 		channel->offset[0] -= box->x1;
 		channel->offset[1] -= box->y1;
-		channel->scale[0] = 1.f/channel->width;
-		channel->scale[1] = 1.f/channel->height;
 
 		if (priv &&
 		    pixmap->usage_hint == 0 &&
@@ -483,6 +502,24 @@ static struct kgem_bo *upload(struct sna *sna,
 	return bo;
 }
 
+struct kgem_bo *
+__sna_render_pixmap_bo(struct sna *sna,
+		       PixmapPtr pixmap,
+		       const BoxRec *box,
+		       bool blt)
+{
+	struct kgem_bo *bo;
+
+	bo = use_cpu_bo(sna, pixmap, box, blt);
+	if (bo == NULL) {
+		bo = move_to_gpu(pixmap, box, blt);
+		if (bo == NULL)
+			return NULL;
+	}
+
+	return bo;
+}
+
 int
 sna_render_pixmap_bo(struct sna *sna,
 		     struct sna_composite_channel *channel,
@@ -491,7 +528,6 @@ sna_render_pixmap_bo(struct sna *sna,
 		     int16_t w, int16_t h,
 		     int16_t dst_x, int16_t dst_y)
 {
-	struct kgem_bo *bo;
 	struct sna_pixmap *priv;
 	BoxRec box;
 
@@ -500,8 +536,6 @@ sna_render_pixmap_bo(struct sna *sna,
 
 	channel->width  = pixmap->drawable.width;
 	channel->height = pixmap->drawable.height;
-	channel->scale[0] = 1.f / pixmap->drawable.width;
-	channel->scale[1] = 1.f / pixmap->drawable.height;
 	channel->offset[0] = x - dst_x;
 	channel->offset[1] = y - dst_y;
 
@@ -511,16 +545,16 @@ sna_render_pixmap_bo(struct sna *sna,
 		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage ||
 		     priv->gpu_bo->proxy)) {
 			DBG(("%s: GPU all damaged\n", __FUNCTION__));
-			channel->bo = kgem_bo_reference(priv->gpu_bo);
-			return 1;
+			channel->bo = priv->gpu_bo;
+			goto done;
 		}
 
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
 		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
 			DBG(("%s: CPU all damaged\n", __FUNCTION__));
-			channel->bo = kgem_bo_reference(priv->cpu_bo);
-			return 1;
+			channel->bo = priv->cpu_bo;
+			goto done;
 		}
 	}
 
@@ -572,21 +606,22 @@ sna_render_pixmap_bo(struct sna *sna,
 	     channel->offset[0], channel->offset[1],
 	     pixmap->drawable.width, pixmap->drawable.height));
 
-	bo = use_cpu_bo(sna, pixmap, &box);
-	if (bo) {
-		bo = kgem_bo_reference(bo);
+
+	channel->bo = __sna_render_pixmap_bo(sna, pixmap, &box, false);
+	if (channel->bo == NULL) {
+		DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
+		     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
+		channel->bo = upload(sna, channel, pixmap, &box);
+		if (channel->bo == NULL)
+			return 0;
 	} else {
-		bo = move_to_gpu(pixmap, &box);
-		if (bo == NULL) {
-			DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
-			     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
-			bo = upload(sna, channel, pixmap, &box);
-		} else
-			bo = kgem_bo_reference(bo);
+done:
+		kgem_bo_reference(channel->bo);
 	}
 
-	channel->bo = bo;
-	return bo != NULL;
+	channel->scale[0] = 1.f / channel->width;
+	channel->scale[1] = 1.f / channel->height;
+	return 1;
 }
 
 static int sna_render_picture_downsample(struct sna *sna,
@@ -929,14 +964,11 @@ sna_render_picture_partial(struct sna *sna,
 		}
 	}
 
-	if (use_cpu_bo(sna, pixmap, &box)) {
-		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
-			return 0;
-
+	if (use_cpu_bo(sna, pixmap, &box, false)) {
 		bo = sna_pixmap(pixmap)->cpu_bo;
 	} else {
 		if (!sna_pixmap_force_to_gpu(pixmap,
-					     MOVE_SOURCE_HINT | MOVE_READ))
+					     MOVE_READ | MOVE_SOURCE_HINT))
 			return 0;
 
 		bo = sna_pixmap(pixmap)->gpu_bo;
@@ -1119,12 +1151,9 @@ sna_render_picture_extract(struct sna *sna,
 						     dst_x, dst_y);
 	}
 
-	src_bo = use_cpu_bo(sna, pixmap, &box);
-	if (src_bo) {
-		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
-			return 0;
-	} else {
-		src_bo = move_to_gpu(pixmap, &box);
+	src_bo = use_cpu_bo(sna, pixmap, &box, true);
+	if (src_bo == NULL) {
+		src_bo = move_to_gpu(pixmap, &box, false);
 		if (src_bo == NULL) {
 			bo = kgem_upload_source_image(&sna->kgem,
 						      pixmap->devPrivate.ptr,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 7c43f61..0f96ace 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -619,6 +619,12 @@ sna_get_pixel_from_rgba(uint32_t * pixel,
 	return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
 }
 
+struct kgem_bo *
+__sna_render_pixmap_bo(struct sna *sna,
+		       PixmapPtr pixmap,
+		       const BoxRec *box,
+		       bool blt);
+
 int
 sna_render_pixmap_bo(struct sna *sna,
 		     struct sna_composite_channel *channel,
commit d141a2d59007866c9eaad020c744be446e70c346
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 23:20:58 2012 +0100

    sna: Disable snoopable bo for gen4
    
    Further inspection reveals that whilst it may not hang the GPU, the
    results are not pleasant or complete.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d6ed4e0..d30b8e7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -687,6 +687,10 @@ static bool test_has_cache_level(struct kgem *kgem)
 	if (DBG_NO_CACHE_LEVEL)
 		return false;
 
+	/* Incoherent blt and sampler hangs the GPU */
+	if (kgem->gen == 40)
+		return false;
+
 	handle = gem_create(kgem->fd, 1);
 	if (handle == 0)
 		return false;
@@ -705,6 +709,10 @@ static bool test_has_vmap(struct kgem *kgem)
 	if (DBG_NO_VMAP)
 		return false;
 
+	/* Incoherent blt and sampler hangs the GPU */
+	if (kgem->gen == 40)
+		return false;
+
 	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
 #else
 	return false;
@@ -3102,6 +3110,8 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		if (bo == NULL)
 			return bo;
 
+		assert(bo->tiling == I915_TILING_NONE);
+
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
 			return NULL;
@@ -3116,6 +3126,8 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		if (bo == NULL)
 			return NULL;
 
+		assert(bo->tiling == I915_TILING_NONE);
+
 		bo->reusable = false;
 		bo->vmap = true;
 		if (!gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC) ||
@@ -3816,8 +3828,7 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 static inline bool
 use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
 {
-	if (kgem->gen == 40)
-		return false;
+	assert(kgem->gen != 40);
 
 	if (kgem->gen < 30)
 		return flags & KGEM_BUFFER_WRITE;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index ab7fb81..dcfab91 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -301,9 +301,6 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		return NULL;
 	}
 
-	if (sna->kgem.gen == 40) /* XXX sampler dies with snoopable memory */
-		return NULL;
-
 	if (priv->gpu_bo) {
 		switch (sna_damage_contains_box(priv->cpu_damage, box)) {
 		case PIXMAN_REGION_OUT:
commit 107feed2a4ca044313c70f83a62909187ff1f905
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 18:58:30 2012 +0100

    sna: Disable snoopable uplaod buffers for gen4
    
    The sampler really does not like using snoopable buffers...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c6fbddb..c985c8d 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -725,6 +725,8 @@ gen4_bind_bo(struct sna *sna,
 	uint32_t domains;
 	uint16_t offset;
 
+	assert(!kgem_bo_is_vmap(bo));
+
 	/* After the first bind, we manage the cache domains within the batch */
 	if (is_dst) {
 		domains = I915_GEM_DOMAIN_RENDER << 16 | I915_GEM_DOMAIN_RENDER;
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 51fc29d..d6ed4e0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3813,6 +3813,18 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 	return bo;
 }
 
+static inline bool
+use_snoopable_buffer(struct kgem *kgem, uint32_t flags)
+{
+	if (kgem->gen == 40)
+		return false;
+
+	if (kgem->gen < 30)
+		return flags & KGEM_BUFFER_WRITE;
+
+	return true;
+}
+
 struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				   uint32_t size, uint32_t flags,
 				   void **ret)
@@ -4056,7 +4068,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = NUM_PAGES(size);
 	flags &= ~KGEM_BUFFER_INPLACE;
 
-	if (flags & KGEM_BUFFER_WRITE && kgem->has_cache_level) {
+	if (kgem->has_cache_level && use_snoopable_buffer(kgem, flags)) {
 		uint32_t handle;
 
 		handle = gem_create(kgem->fd, alloc);
@@ -4079,13 +4091,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		DBG(("%s: created handle=%d for buffer\n",
 		     __FUNCTION__, bo->base.handle));
 
+		bo->base.reusable = false;
+		bo->base.vmap = true;
+
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
 		if (bo->mem) {
 			bo->mmapped = true;
 			bo->need_io = false;
 			bo->base.io = true;
-			bo->base.reusable = false;
-			bo->base.vmap = true;
 			goto init;
 		} else {
 			bo->base.refcnt = 0; /* for valgrind */
@@ -4094,7 +4107,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 
-	if (flags & KGEM_BUFFER_WRITE && kgem->has_vmap) {
+	if (kgem->has_vmap && use_snoopable_buffer(kgem, flags)) {
 		bo = partial_bo_alloc(alloc);
 		if (bo) {
 			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8e9b006..63be218 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -500,6 +500,12 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
 }
 
+static inline bool kgem_bo_is_vmap(struct kgem_bo *bo)
+{
+	while (bo->proxy)
+		bo = bo->proxy;
+	return bo->vmap;
+}
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
commit 818c21165c746b7b410a6e6e23b1675d88db685d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 16:28:00 2012 +0100

    sna: Fixup pixmap validation for sna_copy_area()
    
    Remember to offset the box by the drawable deltas in order to
    compensate for compositing.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=52142
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6d7f51c..d7ecb00 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -229,6 +229,14 @@ static void _assert_pixmap_contains_box(PixmapPtr pixmap, const BoxRec *box, con
 	}
 }
 
+static void _assert_pixmap_contains_box_with_offset(PixmapPtr pixmap, const BoxRec *box, int dx, int dy, const char *function)
+{
+	BoxRec b = *box;
+	b.x1 += dx; b.x2 += dx;
+	b.y1 += dy; b.y2 += dy;
+	_assert_pixmap_contains_box(pixmap, &b, function);
+}
+
 static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, const BoxRec *box, int n, int dx, int dy, const char *function)
 {
 	BoxRec extents;
@@ -333,12 +341,14 @@ static void assert_pixmap_damage(PixmapPtr p)
 }
 
 #define assert_pixmap_contains_box(p, b) _assert_pixmap_contains_box(p, b, __FUNCTION__)
+#define assert_pixmap_contains_box_with_offset(p, b, dx, dy) _assert_pixmap_contains_box_with_offset(p, b, dx, dy, __FUNCTION__)
 #define assert_drawable_contains_box(d, b) _assert_drawable_contains_box(d, b, __FUNCTION__)
 #define assert_pixmap_contains_boxes(p, b, n, x, y) _assert_pixmap_contains_boxes(p, b, n, x, y, __FUNCTION__)
 #define assert_pixmap_contains_points(p, pt, n, x, y) _assert_pixmap_contains_points(p, pt, n, x, y, __FUNCTION__)
 
 #else
 #define assert_pixmap_contains_box(p, b)
+#define assert_pixmap_contains_box_with_offset(p, b, dx, dy)
 #define assert_pixmap_contains_boxes(p, b, n, x, y)
 #define assert_pixmap_contains_points(p, pt, n, x, y)
 #define assert_drawable_contains_box(d, b)
@@ -3937,7 +3947,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	bool replaces;
 
 	assert(RegionNumRects(region));
-	assert_pixmap_contains_box(dst_pixmap, RegionExtents(region));
 
 	if (src_pixmap == dst_pixmap)
 		return sna_self_copy_boxes(src, dst, gc,
@@ -3962,6 +3971,11 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	src_dx += dx - dst_dx;
 	src_dy += dy - dst_dy;
 
+	assert_pixmap_contains_box(dst_pixmap, RegionExtents(region));
+	assert_pixmap_contains_box_with_offset(src_pixmap,
+					       RegionExtents(region),
+					       src_dx, src_dy);
+
 	replaces = n == 1 &&
 		box->x1 <= 0 &&
 		box->y1 <= 0 &&
commit 623d84bed7c47ac39348775ce35eec54196f6dac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 16:07:37 2012 +0100

    Wrap defines to avoid redefinition warnings
    
    Currently this only catches out ARRAY_SIZE, but wrap the other common
    defines for consistency.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index 2076b2f..fe27011 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -408,9 +408,17 @@ intel_get_screen_private(ScrnInfoPtr scrn)
 	return (intel_screen_private *)(scrn->driverPrivate);
 }
 
+#ifndef ARRAY_SIZE
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+#endif
+
+#ifndef ALIGN
 #define ALIGN(i,m)	(((i) + (m) - 1) & ~((m) - 1))
+#endif
+
+#ifndef MIN
 #define MIN(a,b)	((a) < (b) ? (a) : (b))
+#endif
 
 static inline unsigned long intel_pixmap_pitch(PixmapPtr pixmap)
 {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index c354547..8e9b006 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -191,7 +191,10 @@ struct kgem {
 #define KGEM_RELOC_RESERVED 4
 #define KGEM_EXEC_RESERVED 1
 
+#ifndef ARRAY_SIZE
 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+#endif
+
 #define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
 #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
 #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 03115c2..194c712 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -327,9 +327,18 @@ to_sna_from_kgem(struct kgem *kgem)
 #ifndef ARRAY_SIZE
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
 #endif
+
+#ifndef ALIGN
 #define ALIGN(i,m)	(((i) + (m) - 1) & ~((m) - 1))
+#endif
+
+#ifndef MIN
 #define MIN(a,b)	((a) <= (b) ? (a) : (b))
+#endif
+
+#ifndef MAX
 #define MAX(a,b)	((a) >= (b) ? (a) : (b))
+#endif
 
 extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 				     const BoxRec *box,
commit 907a2a7c97514d3f7610648ed87c7042a857f786
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 14:47:03 2012 +0100

    sna/trapezoids: Fix inplace unaligned fills (on gen4)
    
    Reported-by: Sergio Callegari <sergio.callegari at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=52150
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 1261e9b..634423e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3054,7 +3054,7 @@ lerp32_unaligned_box_row(PixmapPtr scratch, uint32_t color,
 	DBG(("%s: x=(%d.%d, %d.%d), y=%dx%d, covered=%d\n", __FUNCTION__,
 	     x1, fx1, x2, fx2, y, h, covered));
 
-	if (x2 < x1) {
+	if (x1 < x2) {
 		if (fx1) {
 			lerp32_opacity(scratch, color,
 				       x1, 1,
commit 6ce2f40249231f57cf464361ea5329cee1932ccf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 14:46:39 2012 +0100

    sna/trapezoids: Add some DBG to unaligned fills
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 1553f58..1261e9b 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3043,14 +3043,17 @@ lerp32_unaligned_box_row(PixmapPtr scratch, uint32_t color,
 {
 	int16_t x1 = pixman_fixed_to_int(trap->left.p1.x) + dx;
 	int16_t fx1 = grid_coverage(SAMPLES_X, trap->left.p1.x);
-	int16_t x2 = pixman_fixed_to_int(trap->right.p1.x) + dx;
-	int16_t fx2 = grid_coverage(SAMPLES_X, trap->right.p1.x);
+	int16_t x2 = pixman_fixed_to_int(trap->right.p2.x) + dx;
+	int16_t fx2 = grid_coverage(SAMPLES_X, trap->right.p2.x);
 
 	if (x1 < extents->x1)
 		x1 = extents->x1, fx1 = 0;
 	if (x2 > extents->x2)
 		x2 = extents->x2, fx2 = 0;
 
+	DBG(("%s: x=(%d.%d, %d.%d), y=%dx%d, covered=%d\n", __FUNCTION__,
+	     x1, fx1, x2, fx2, y, h, covered));
+
 	if (x2 < x1) {
 		if (fx1) {
 			lerp32_opacity(scratch, color,
@@ -3185,6 +3188,7 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 	if (!(dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)) {
 		DBG(("%s: fallback -- can not perform operation in place, unhanbled format %08lx\n",
 		     __FUNCTION__, (long)dst->format));
+
 		goto pixman;
 	}
 
@@ -3193,7 +3197,7 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 
 	if (op == PictOpOver && (color >> 24) == 0xff)
 		op = PictOpSrc;
-	if (op == PictOpOver) {
+	if (op == PictOpOver || op == PictOpAdd) {
 		struct sna_pixmap *priv = sna_pixmap(pixmap);
 		if (priv && priv->clear && priv->clear_color == 0)
 			op = PictOpSrc;
@@ -3208,7 +3212,8 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 		goto pixman;
 	}
 
-	DBG(("%s: inplace operation on argb32 destination\n", __FUNCTION__));
+	DBG(("%s: inplace operation on argb32 destination x %d\n",
+	     __FUNCTION__, n));
 	do {
 		RegionRec clip;
 		BoxPtr extents;
@@ -3244,10 +3249,20 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 			int16_t y2 = dy + pixman_fixed_to_int(t->bottom);
 			int16_t fy2 = pixman_fixed_frac(t->bottom);
 
+			DBG(("%s: t=(%d, %d), (%d, %d), extents (%d, %d), (%d, %d)\n",
+			     __FUNCTION__,
+			     pixman_fixed_to_int(t->left.p1.x),
+			     pixman_fixed_to_int(t->top),
+			     pixman_fixed_to_int(t->right.p2.x),
+			     pixman_fixed_to_int(t->bottom),
+			     extents->x1, extents->y1,
+			     extents->x2, extents->y2));
+
 			if (y1 < extents->y1)
 				y1 = extents->y1, fy1 = 0;
 			if (y2 > extents->y2)
 				y2 = extents->y2, fy2 = 0;
+
 			if (y1 < y2) {
 				if (fy1) {
 					lerp32_unaligned_box_row(pixmap, color, extents,
commit 2721214868685123c973970a8ce0d93346ae0ee2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 13:52:39 2012 +0100

    sna: Move the disabling of CPU bo for gen4 to the render unit
    
    They appear to work fine with the BLT and only seem to cause issues when
    used with the sammpler. So enable them for accelerated uploads and
    downloads.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 93a0f94..51fc29d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -687,9 +687,6 @@ static bool test_has_cache_level(struct kgem *kgem)
 	if (DBG_NO_CACHE_LEVEL)
 		return false;
 
-	if (kgem->gen == 40) /* XXX sampler dies with snoopable memory */
-		return false;
-
 	handle = gem_create(kgem->fd, 1);
 	if (handle == 0)
 		return false;
@@ -708,9 +705,6 @@ static bool test_has_vmap(struct kgem *kgem)
 	if (DBG_NO_VMAP)
 		return false;
 
-	if (kgem->gen == 40)
-		return false;
-
 	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
 #else
 	return false;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index dcfab91..ab7fb81 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -301,6 +301,9 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		return NULL;
 	}
 
+	if (sna->kgem.gen == 40) /* XXX sampler dies with snoopable memory */
+		return NULL;
+
 	if (priv->gpu_bo) {
 		switch (sna_damage_contains_box(priv->cpu_damage, box)) {
 		case PIXMAN_REGION_OUT:
commit 0777b146bf1a63c99e4d4af141e676a47b1f2dc9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 13:11:07 2012 +0100

    sna: Use set-cache-level to allocate snoopable upload buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f85b5af..93a0f94 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3123,6 +3123,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return NULL;
 
 		bo->reusable = false;
+		bo->vmap = true;
 		if (!gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC) ||
 		    kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
@@ -4055,15 +4056,51 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 	}
-#else
-	flags &= ~KGEM_BUFFER_INPLACE;
 #endif
 	/* Be more parsimonious with pwrite/pread buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
 		alloc = NUM_PAGES(size);
 	flags &= ~KGEM_BUFFER_INPLACE;
 
-	if (kgem->has_vmap) {
+	if (flags & KGEM_BUFFER_WRITE && kgem->has_cache_level) {
+		uint32_t handle;
+
+		handle = gem_create(kgem->fd, alloc);
+		if (handle == 0)
+			return NULL;
+
+		if (!gem_set_cache_level(kgem->fd, handle, I915_CACHE_LLC)) {
+			gem_close(kgem->fd, handle);
+			return NULL;
+		}
+
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL) {
+			gem_close(kgem->fd, handle);
+			return NULL;
+		}
+
+		debug_alloc(kgem, alloc);
+		__kgem_bo_init(&bo->base, handle, alloc);
+		DBG(("%s: created handle=%d for buffer\n",
+		     __FUNCTION__, bo->base.handle));
+
+		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+		if (bo->mem) {
+			bo->mmapped = true;
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->base.reusable = false;
+			bo->base.vmap = true;
+			goto init;
+		} else {
+			bo->base.refcnt = 0; /* for valgrind */
+			kgem_bo_free(kgem, &bo->base);
+			bo = NULL;
+		}
+	}
+
+	if (flags & KGEM_BUFFER_WRITE && kgem->has_vmap) {
 		bo = partial_bo_alloc(alloc);
 		if (bo) {
 			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
commit 33443f7ee48fa54b6f4d09c93cddac0e32314b9c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 12:51:54 2012 +0100

    sna: Add a couple of DBG options to control accelerated up/downloads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3391a37..6d7f51c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -63,6 +63,8 @@
 #define USE_SHM_VMAP 0
 
 #define MIGRATE_ALL 0
+#define DBG_NO_CPU_UPLOAD 0
+#define DBG_NO_CPU_DOWNLOAD 0
 
 #define ACCEL_FILL_SPANS 1
 #define ACCEL_SET_SPANS 1
@@ -1071,15 +1073,16 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 static inline bool use_cpu_bo_for_download(struct sna *sna,
 					   struct sna_pixmap *priv)
 {
+	if (DBG_NO_CPU_DOWNLOAD)
+		return false;
+
 	return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
 }
 
 static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
 {
-#if 0
-	if (pixmap->devPrivate.ptr == NULL)
-		return true;
-#endif
+	if (DBG_NO_CPU_UPLOAD)
+		return false;
 
 	if (priv->cpu_bo == NULL)
 		return false;
commit 924060293826a1cc0d9d7bc26e913e46c6b2d054
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 12:03:47 2012 +0100

    sna: Discard and recreate the CPU buffer when busy during move-to-cpu
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b231c51..3391a37 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1127,8 +1127,6 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		assert(flags & MOVE_WRITE);
 		DBG(("%s: no readbck, discarding gpu damage [%d], pending clear[%d]\n",
 		     __FUNCTION__, priv->gpu_damage != NULL, priv->clear));
-		sna_damage_destroy(&priv->gpu_damage);
-		priv->clear = false;
 
 		if (priv->create & KGEM_CAN_CREATE_GPU &&
 		    pixmap_inplace(sna, pixmap, priv)) {
@@ -1165,6 +1163,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				       pixmap->drawable.height);
 			sna_damage_destroy(&priv->cpu_damage);
 			priv->undamaged = false;
+			priv->clear = false;
 			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
@@ -1177,23 +1176,20 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		}
 
 skip_inplace_map:
+		sna_damage_destroy(&priv->gpu_damage);
 		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) {
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
 			if (kgem_bo_is_busy(priv->cpu_bo)) {
 				DBG(("%s: discarding busy CPU bo\n", __FUNCTION__));
+				assert(priv->gpu_bo);
+				assert(priv->gpu_damage == NULL);
+
 				sna_damage_destroy(&priv->cpu_damage);
-				list_del(&priv->list);
-				if (priv->undamaged) {
-					sna_damage_all(&priv->gpu_damage,
-						       pixmap->drawable.width,
-						       pixmap->drawable.height);
-					list_del(&priv->list);
-					priv->undamaged = false;
-				}
-				priv->cpu = false;
-				assert(!priv->cpu_bo->sync);
+				priv->undamaged = false;
+
+				sna_pixmap_free_gpu(sna, priv);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
commit 7024ef771ff170e61e788b5216c86b46e0f8ae6a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 12:15:54 2012 +0100

    sna: Add a few DBG to show when CPU bos are being used for xfer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1734a8f..b231c51 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1068,13 +1068,13 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
 }
 
-static inline bool use_cpu_bo_for_write(struct sna *sna,
-					struct sna_pixmap *priv)
+static inline bool use_cpu_bo_for_download(struct sna *sna,
+					   struct sna_pixmap *priv)
 {
 	return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
 }
 
-static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
+static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv)
 {
 #if 0
 	if (pixmap->devPrivate.ptr == NULL)
@@ -1287,11 +1287,13 @@ skip_inplace_map:
 		if (n) {
 			bool ok = false;
 
-			if (use_cpu_bo_for_write(sna, priv))
+			if (use_cpu_bo_for_download(sna, priv)) {
+				DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    box, n, COPY_LAST);
+			}
 			if (!ok)
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
@@ -1746,11 +1748,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = false;
-			if (use_cpu_bo_for_write(sna, priv))
+			if (use_cpu_bo_for_download(sna, priv)) {
+				DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    box, n, COPY_LAST);
+			}
 			if (!ok)
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
@@ -1854,11 +1858,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (n) {
 					bool ok = false;
 
-					if (use_cpu_bo_for_write(sna, priv))
+					if (use_cpu_bo_for_download(sna, priv)) {
+						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
 									    box, n, COPY_LAST);
+					}
 
 					if (!ok)
 						sna_read_boxes(sna,
@@ -1879,11 +1885,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				DBG(("%s: region wholly inside damage\n",
 				     __FUNCTION__));
 
-				if (use_cpu_bo_for_write(sna, priv))
+				if (use_cpu_bo_for_download(sna, priv)) {
+					DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
 								    pixmap, priv->cpu_bo, 0, 0,
 								    box, n, COPY_LAST);
+				}
 				if (!ok)
 					sna_read_boxes(sna,
 						       priv->gpu_bo, 0, 0,
@@ -1904,11 +1912,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					DBG(("%s: region intersects damage\n",
 					     __FUNCTION__));
 
-					if (use_cpu_bo_for_write(sna, priv))
+					if (use_cpu_bo_for_download(sna, priv)) {
+						DBG(("%s: using CPU bo for download from GPU\n", __FUNCTION__));
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
 									    box, n, COPY_LAST);
+					}
 					if (!ok)
 						sna_read_boxes(sna,
 							       priv->gpu_bo, 0, 0,
@@ -2163,11 +2173,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		if (n) {
 			bool ok = false;
 
-			if (use_cpu_bo_for_read(priv))
+			if (use_cpu_bo_for_upload(priv)) {
+				DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    box, n, 0);
+			}
 			if (!ok) {
 				if (pixmap->devPrivate.ptr == NULL) {
 					assert(priv->stride && priv->ptr);
@@ -2201,11 +2213,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		bool ok = false;
-		if (use_cpu_bo_for_read(priv))
+		if (use_cpu_bo_for_upload(priv)) {
+			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, 1, 0);
+		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
@@ -2230,11 +2244,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 		box = REGION_RECTS(&i);
 		ok = false;
-		if (use_cpu_bo_for_read(priv))
+		if (use_cpu_bo_for_upload(priv)) {
+			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
-						    box, n,0);
+						    box, n, 0);
+		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
@@ -2674,11 +2690,13 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
 		ok = false;
-		if (use_cpu_bo_for_read(priv))
+		if (use_cpu_bo_for_upload(priv)) {
+			DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n, 0);
+		}
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
commit c564414157e27417f0de1c0542dafd9b47e01eda
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 09:34:30 2012 +0100

    sna: Disable the scanout flush when switch off via DPMS
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 87a69ba..b31f08d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -59,6 +59,7 @@
 
 struct sna_crtc {
 	struct drm_mode_modeinfo kmode;
+	int dpms_mode;
 	struct kgem_bo *bo;
 	uint32_t cursor;
 	bool shadow;
@@ -655,11 +656,38 @@ sna_crtc_disable(xf86CrtcPtr crtc)
 	}
 }
 
+static void update_flush_interval(struct sna *sna)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	int i, max_vrefresh = 0;
+
+	for (i = 0; i < xf86_config->num_crtc; i++) {
+		if (!xf86_config->crtc[i]->enabled)
+			continue;
+
+		if (to_sna_crtc(xf86_config->crtc[i])->dpms_mode != DPMSModeOn)
+			continue;
+
+		max_vrefresh = max(max_vrefresh,
+				   xf86ModeVRefresh(&xf86_config->crtc[i]->mode));
+	}
+
+	if (max_vrefresh == 0)
+		sna->vblank_interval = 0;
+	else
+		sna->vblank_interval = 1000 / max_vrefresh; /* Hz -> ms */
+
+	DBG(("max_vrefresh=%d, vblank_interval=%d ms\n",
+	       max_vrefresh, sna->vblank_interval));
+}
+
 static void
 sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 {
 	DBG(("%s(pipe %d, dpms mode -> %d):= active=%d\n",
 	     __FUNCTION__, to_sna_crtc(crtc)->pipe, mode, mode == DPMSModeOn));
+	to_sna_crtc(crtc)->dpms_mode = mode;
+	update_flush_interval(to_sna(crtc->scrn));
 }
 
 void sna_mode_disable_unused(struct sna *sna)
@@ -799,28 +827,6 @@ cleanup_scratch:
 	FreeScratchPixmapHeader(scratch);
 }
 
-static void update_flush_interval(struct sna *sna)
-{
-	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
-	int i, max_vrefresh = 0;
-
-	for (i = 0; i < xf86_config->num_crtc; i++) {
-		if (!xf86_config->crtc[i]->enabled)
-			continue;
-
-		max_vrefresh = max(max_vrefresh,
-				   xf86ModeVRefresh(&xf86_config->crtc[i]->mode));
-	}
-
-	if (max_vrefresh == 0)
-		sna->vblank_interval = 0;
-	else
-		sna->vblank_interval = 1000 / max_vrefresh; /* Hz -> ms */
-
-	DBG(("max_vrefresh=%d, vblank_interval=%d ms\n",
-	       max_vrefresh, sna->vblank_interval));
-}
-
 static bool use_shadow(struct sna *sna, xf86CrtcPtr crtc)
 {
 	RRTransformPtr transform;
@@ -1107,8 +1113,6 @@ retry: /* Attach per-crtc pixmap or direct */
 	if (saved_bo)
 		kgem_bo_destroy(&sna->kgem, saved_bo);
 
-	update_flush_interval(sna);
-
 	sna_crtc_randr(crtc);
 	if (sna_crtc->shadow)
 		sna_crtc_damage(crtc);
@@ -2619,6 +2623,8 @@ void sna_mode_update(struct sna *sna)
 		if (!crtc->active || !sna_crtc_is_bound(sna, crtc))
 			sna_crtc_disable(crtc);
 	}
+
+	update_flush_interval(sna);
 }
 
 static void
commit 536e7ab756d6821db79e4cd79a250af1c0f7d5a3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 16 12:16:26 2012 +0100

    intel: Don't use stdbool without declaring it
    
    Reported-by: Fabio Pedretti <fabio.ped at libero.it>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 89b72d8..da920ef 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -570,7 +570,7 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 	if (xf86ReturnOptValBool(intel->Options, OPTION_TILING_FB, FALSE))
 		intel->tiling &= ~INTEL_TILING_FB;
 	if (!can_accelerate_blt(intel)) {
-		intel->force_fallback = true;
+		intel->force_fallback = TRUE;
 		intel->tiling &= ~INTEL_TILING_FB;
 	}
 
commit 0c32be15b06ad63c1fc1371de879f2d879080f6b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 19:26:48 2012 +0100

    uxa: Remove Shadow hack
    
    This was an incomplete hack so deprecate in favour of Shadow-on-Steriods,
    SNA.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47324
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/man/intel.man b/man/intel.man
index f74ee80..d231fd0 100644
--- a/man/intel.man
+++ b/man/intel.man
@@ -156,19 +156,6 @@ i.e. perform synchronous rendering.
 .IP
 Default: Disabled
 .TP
-.BI "Option \*qShadow\*q \*q" boolean \*q
-This option controls the use of GPU acceleration and placement of auxiliary
-buffers in memory. Enabling the Shadow will disable all use of the GPU for
-RENDER acceleration and force software-fallbacks for all but updating the
-scan-out buffer. Hardware overlay is still supported so Xv will continue to
-playback videos using the GPU, but GL will be forced to use software
-rasterisation as well.  This is a last resort measure for systems with
-crippling bugs, such as early 8xx chipsets. It is still hoped that we will
-find a workaround to enable as much hardware acceleration on those
-architectures as is possible, but until then, using a shadow buffer should
-maintain system stability.
-.IP
-Default: Disabled
 .TP
 .BI "Option \*qSwapbuffersWait\*q \*q" boolean \*q
 This option controls the behavior of glXSwapBuffers and glXCopySubBufferMESA
diff --git a/src/Makefile.am b/src/Makefile.am
index a7043d1..feed4ce 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -64,7 +64,6 @@ intel_drv_la_SOURCES += \
          intel_driver.h \
 	 intel_glamor.h \
          intel_memory.c \
-	 intel_shadow.c \
 	 intel_uxa.c \
          intel_video.c \
          intel_video.h \
diff --git a/src/intel.h b/src/intel.h
index 1555acd..2076b2f 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -170,9 +170,6 @@ typedef struct intel_screen_private {
 	PixmapPtr back_pixmap;
 	unsigned int back_name;
 	long front_pitch, front_tiling;
-	void *shadow_buffer;
-	int shadow_stride;
-	DamagePtr shadow_damage;
 
 	dri_bufmgr *bufmgr;
 
@@ -328,10 +325,8 @@ typedef struct intel_screen_private {
 	Bool use_pageflipping;
 	Bool use_triple_buffer;
 	Bool force_fallback;
-	Bool can_blt;
 	Bool has_kernel_flush;
 	Bool needs_flush;
-	Bool use_shadow;
 
 	struct _DRI2FrameEvent *pending_flip[2];
 
@@ -640,10 +635,6 @@ void intel_uxa_block_handler(intel_screen_private *intel);
 Bool intel_get_aperture_space(ScrnInfoPtr scrn, drm_intel_bo ** bo_table,
 			      int num_bos);
 
-/* intel_shadow.c */
-void intel_shadow_blt(intel_screen_private *intel);
-void intel_shadow_create(struct intel_screen_private *intel);
-
 static inline Bool intel_pixmap_is_offscreen(PixmapPtr pixmap)
 {
 	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
diff --git a/src/intel_display.c b/src/intel_display.c
index 0a80aa8..bfe5918 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -634,7 +634,7 @@ intel_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr rotate_pixmap, void *data)
 		intel_crtc->rotate_bo = NULL;
 	}
 
-	intel->shadow_present = intel->use_shadow;
+	intel->shadow_present = FALSE;
 }
 
 static void
diff --git a/src/intel_dri.c b/src/intel_dri.c
index 0405937..d027a64 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -96,34 +96,13 @@ static uint32_t pixmap_flink(PixmapPtr pixmap)
 
 static PixmapPtr get_front_buffer(DrawablePtr drawable)
 {
-	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
-	intel_screen_private *intel = intel_get_screen_private(scrn);
 	PixmapPtr pixmap;
 
 	pixmap = get_drawable_pixmap(drawable);
-	if (!intel->use_shadow) {
-		pixmap->refcnt++;
-	} else if (pixmap_is_scanout(pixmap)) {
-		pixmap = fbCreatePixmap(screen, 0, 0, drawable->depth, 0);
-		if (pixmap) {
-			screen->ModifyPixmapHeader(pixmap,
-						   drawable->width,
-						   drawable->height,
-						   0, 0,
-						   intel->front_pitch,
-						   intel->front_buffer->virtual);
-
-			intel_set_pixmap_bo(pixmap, intel->front_buffer);
-			intel_get_pixmap_private(pixmap)->offscreen = 0;
-			if (WindowDrawable(drawable->type))
-				screen->SetWindowPixmap((WindowPtr)drawable,
-							pixmap);
-		}
-	} else if (intel_get_pixmap_bo(pixmap)) {
-		pixmap->refcnt++;
-	} else
-		pixmap = NULL;
+	if (!intel_get_pixmap_bo(pixmap))
+		return NULL;
+
+	pixmap->refcnt++;
 	return pixmap;
 }
 
@@ -186,72 +165,6 @@ static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 	return old;
 }
 
-static PixmapPtr fixup_shadow(DrawablePtr drawable, PixmapPtr pixmap)
-{
-	ScreenPtr screen = drawable->pScreen;
-	PixmapPtr old = get_drawable_pixmap(drawable);
-	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
-	GCPtr gc;
-
-	/* With an active shadow buffer, 2D pixmaps are created in
-	 * system memory and GPU acceleration of 2D render operations
-	 * is *disabled*. As DRI is still enabled, we create hardware
-	 * buffers for the clients, and need to mix this with the
-	 * 2D rendering. So we replace the system pixmap with a GTT
-	 * mapping (with the kernel enforcing coherency between
-	 * CPU and GPU) for 2D and provide the bo so that clients
-	 * can write directly to it (or read from it in the case
-	 * of TextureFromPixmap) using the GPU.
-	 *
-	 * So for a compositor with a GL backend (i.e. compiz) we have
-	 * smooth wobbly windows but incur the cost of uncached 2D rendering,
-	 * however 3D applications (games and clutter) are still fully
-	 * accelerated.
-	 */
-
-	if (drm_intel_gem_bo_map_gtt(priv->bo))
-		return pixmap;
-
-	screen->ModifyPixmapHeader(pixmap,
-				   drawable->width,
-				   drawable->height,
-				   0, 0,
-				   priv->stride,
-				   priv->bo->virtual);
-	priv->offscreen = 0;
-
-	/* Copy the current contents of the pixmap to the bo. */
-	gc = GetScratchGC(drawable->depth, screen);
-	if (gc) {
-		ValidateGC(&pixmap->drawable, gc);
-		gc->ops->CopyArea(drawable, &pixmap->drawable,
-				  gc,
-				  0, 0,
-				  drawable->width,
-				  drawable->height,
-				  0, 0);
-		FreeScratchGC(gc);
-	}
-
-	intel_set_pixmap_private(pixmap, NULL);
-	screen->DestroyPixmap(pixmap);
-
-	/* Redirect 2D rendering to the uncached GTT map of the bo */
-	screen->ModifyPixmapHeader(old,
-				   drawable->width,
-				   drawable->height,
-				   0, 0,
-				   priv->stride,
-				   priv->bo->virtual);
-
-	/* And redirect the pixmap to the new bo (for 3D). */
-	intel_set_pixmap_private(old, priv);
-	old->refcnt++;
-
-	intel_get_screen_private(xf86ScreenToScrn(screen))->needs_flush = TRUE;
-	return old;
-}
-
 #if DRI2INFOREC_VERSION < 2
 static DRI2BufferPtr
 I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
@@ -324,12 +237,8 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 				goto unwind;
 			}
 
-			if (attachment == DRI2BufferFrontLeft) {
-				if (!is_glamor_pixmap)
-					pixmap = fixup_shadow(drawable, pixmap);
-				else
-					pixmap = fixup_glamor(drawable, pixmap);
-			}
+			if (attachment == DRI2BufferFrontLeft)
+				pixmap = fixup_glamor(drawable, pixmap);
 		}
 
 		if (attachments[i] == DRI2BufferDepth)
@@ -482,12 +391,8 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 			free(buffer);
 			return NULL;
 		}
-		if (attachment == DRI2BufferFrontLeft) {
-			if (!is_glamor_pixmap)
-				pixmap = fixup_shadow(drawable, pixmap);
-			else
-				pixmap = fixup_glamor(drawable, pixmap);
-		}
+		if (attachment == DRI2BufferFrontLeft)
+			pixmap = fixup_glamor(drawable, pixmap);
 	}
 
 	buffer->attachment = attachment;
@@ -638,38 +543,11 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
 	 * that will happen before the client tries to render
 	 * again. */
 
-	/* Re-enable 2D acceleration... */
-	if (intel->use_shadow) {
-		struct intel_pixmap *src_pixmap, *dst_pixmap;
-
-		src_pixmap = intel_get_pixmap_private(get_drawable_pixmap(src));
-		if (src_pixmap) {
-			src_pixmap->offscreen = 1;
-			src_pixmap->busy = 1;
-		}
+	gc->ops->CopyArea(src, dst, gc,
+			  0, 0,
+			  drawable->width, drawable->height,
+			  0, 0);
 
-		dst_pixmap = intel_get_pixmap_private(get_drawable_pixmap(dst));
-		if (dst_pixmap) {
-			dst_pixmap->offscreen = 1;
-			dst_pixmap->busy = 1;
-		}
-
-		gc->ops->CopyArea(src, dst, gc,
-				  0, 0,
-				  drawable->width, drawable->height,
-				  0, 0);
-
-		/* and restore 2D/3D coherency */
-		if (src_pixmap)
-			src_pixmap->offscreen = 0;
-		if (dst_pixmap)
-			dst_pixmap->offscreen = 0;
-	} else {
-		gc->ops->CopyArea(src, dst, gc,
-				  0, 0,
-				  drawable->width, drawable->height,
-				  0, 0);
-	}
 	FreeScratchGC(gc);
 }
 
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 78f7ce3..89b72d8 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -569,23 +569,12 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 		intel->tiling &= ~INTEL_TILING_2D;
 	if (xf86ReturnOptValBool(intel->Options, OPTION_TILING_FB, FALSE))
 		intel->tiling &= ~INTEL_TILING_FB;
-
-	intel->can_blt = can_accelerate_blt(intel);
-	intel->has_kernel_flush = has_kernel_flush(intel);
-	intel->use_shadow = !intel->can_blt;
-
-	if (xf86IsOptionSet(intel->Options, OPTION_SHADOW)) {
-		intel->use_shadow =
-			xf86ReturnOptValBool(intel->Options,
-					     OPTION_SHADOW,
-					     FALSE);
+	if (!can_accelerate_blt(intel)) {
+		intel->force_fallback = true;
+		intel->tiling &= ~INTEL_TILING_FB;
 	}
 
-	if (intel->use_shadow) {
-		xf86DrvMsg(scrn->scrnIndex, X_CONFIG,
-			   "Shadow buffer enabled,"
-			   " 2D GPU acceleration disabled.\n");
-	}
+	intel->has_kernel_flush = has_kernel_flush(intel);
 
 	intel->has_relaxed_fencing =
 		xf86ReturnOptValBool(intel->Options,
@@ -1126,26 +1115,11 @@ static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 	}
 
 	if (intel->front_buffer) {
-		if (!intel->use_shadow)
-			intel_set_pixmap_bo(screen->GetScreenPixmap(screen),
-					    NULL);
 		intel_mode_remove_fb(intel);
 		drm_intel_bo_unreference(intel->front_buffer);
 		intel->front_buffer = NULL;
 	}
 
-	if (intel->shadow_buffer) {
-		free(intel->shadow_buffer);
-		intel->shadow_buffer = NULL;
-	}
-
-	if (intel->shadow_damage) {
-		DamageUnregister(&screen->GetScreenPixmap(screen)->drawable,
-				 intel->shadow_damage);
-		DamageDestroy(intel->shadow_damage);
-		intel->shadow_damage = NULL;
-	}
-
 	intel_batch_teardown(scrn);
 
 	if (INTEL_INFO(intel)->gen >= 40)
diff --git a/src/intel_options.c b/src/intel_options.c
index 2e112f9..77832aa 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -31,7 +31,6 @@ const OptionInfoRec intel_options[] = {
 	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, 0},
 	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, 0},
 	{OPTION_BUFFER_CACHE,	"BufferCache",	OPTV_BOOLEAN,   {0},    1},
-	{OPTION_SHADOW,		"Shadow",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_TRIPLE_BUFFER,	"TripleBuffer", OPTV_BOOLEAN,	{0},	1},
 #endif
 	{-1,			NULL,		OPTV_NONE,	{0},	0}
diff --git a/src/intel_options.h b/src/intel_options.h
index 8d0312c..233908c 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -37,7 +37,6 @@ enum intel_options {
 	OPTION_DEBUG_FLUSH_CACHES,
 	OPTION_DEBUG_WAIT,
 	OPTION_BUFFER_CACHE,
-	OPTION_SHADOW,
 	OPTION_TRIPLE_BUFFER,
 #endif
 	NUM_OPTIONS,
diff --git a/src/intel_shadow.c b/src/intel_shadow.c
deleted file mode 100644
index 6892567..0000000
--- a/src/intel_shadow.c
+++ /dev/null
@@ -1,200 +0,0 @@
-/**************************************************************************
-
-Copyright 2010 Intel Corporation
-All Rights Reserved.
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sub license, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial portions
-of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
-IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
-ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-#include "xf86.h"
-#include "intel.h"
-#include "i830_reg.h"
-
-static dri_bo *
-intel_shadow_create_bo(intel_screen_private *intel,
-		       int16_t x1, int16_t y1,
-		       int16_t x2, int16_t y2,
-		       int *pitch)
-{
-	int w = x2 - x1, h = y2 - y1;
-	int size = h * w * intel->cpp;
-	dri_bo *bo;
-
-	bo = drm_intel_bo_alloc(intel->bufmgr, "shadow", size, 0);
-	if (bo && drm_intel_gem_bo_map_gtt(bo) == 0) {
-		char *dst = bo->virtual;
-		char *src = intel->shadow_buffer;
-		int src_pitch = intel->shadow_stride;
-		int row_length = w * intel->cpp;
-		int num_rows = h;
-		src += y1 * src_pitch + x1 * intel->cpp;
-		do {
-			memcpy (dst, src, row_length);
-			src += src_pitch;
-			dst += row_length;
-		} while (--num_rows);
-		drm_intel_gem_bo_unmap_gtt(bo);
-	}
-
-	*pitch = w * intel->cpp;
-	return bo;
-}
-
-static void intel_shadow_memcpy(intel_screen_private *intel)
-{
-	char *src_data, *dst_data;
-	unsigned int src_pitch, dst_pitch;
-	RegionPtr region;
-	BoxPtr box;
-	int n;
-
-	if (drm_intel_gem_bo_map_gtt(intel->front_buffer))
-		return;
-
-	src_data = intel->shadow_buffer;
-	dst_data = intel->front_buffer->virtual;
-
-	src_pitch = intel->shadow_stride;
-	dst_pitch = intel->front_pitch;
-
-	region = DamageRegion(intel->shadow_damage);
-	box = REGION_RECTS(region);
-	n = REGION_NUM_RECTS(region);
-	while (n--) {
-		char *src = src_data + box->y1*src_pitch + box->x1*intel->cpp;
-		char *dst = dst_data + box->y1*dst_pitch + box->x1*intel->cpp;
-		int len = (box->x2 - box->x1)*intel->cpp;
-		int row = box->y2 - box->y1;
-		while (row--) {
-			memcpy(dst, src, len);
-			src += src_pitch;
-			dst += dst_pitch;
-		}
-		box++;
-	}
-}
-
-void intel_shadow_blt(intel_screen_private *intel)
-{
-	ScrnInfoPtr scrn = intel->scrn;
-	uint32_t blt, br13;
-	RegionPtr region;
-	BoxPtr box;
-	int n;
-
-	/* Can we trust the BLT? Otherwise do an uncached mmecy. */
-	if (!intel->can_blt || IS_GEN2(intel)) {
-		intel_shadow_memcpy(intel);
-		return;
-	}
-
-
-	blt = XY_SRC_COPY_BLT_CMD;
-
-	br13 = intel->front_pitch;
-	if (intel->front_tiling && INTEL_INFO(intel)->gen >= 40) {
-		br13 >>= 2;
-		blt |= XY_SRC_COPY_BLT_DST_TILED;
-	}
-	switch (intel->cpp) {
-		default:
-		case 4: blt |=
-			XY_SRC_COPY_BLT_WRITE_ALPHA | XY_SRC_COPY_BLT_WRITE_RGB;
-			br13 |= 1 << 25; /* RGB8888 */
-		case 2: br13 |= 1 << 24; /* RGB565 */
-		case 1: break;
-	}
-	br13 |= 0xcc << 16; /* copy */
-
-	region = DamageRegion(intel->shadow_damage);
-	box = REGION_RECTS(region);
-	n = REGION_NUM_RECTS(region);
-	while (n--) {
-		int pitch;
-		dri_bo *bo;
-
-		bo = intel_shadow_create_bo(intel,
-					    box->x1, box->y1,
-					    box->x2, box->y2,
-					    &pitch);
-		if (bo == NULL)
-			return;
-
-		BEGIN_BATCH_BLT(8);
-		OUT_BATCH(blt);
-		OUT_BATCH(br13);
-		OUT_BATCH(box->y1 << 16 | box->x1);
-		OUT_BATCH(box->y2 << 16 | box->x2);
-		OUT_RELOC_FENCED(intel->front_buffer,
-				I915_GEM_DOMAIN_RENDER,
-				I915_GEM_DOMAIN_RENDER,
-				0);
-		OUT_BATCH(0);
-		OUT_BATCH(pitch);
-		OUT_RELOC(bo, I915_GEM_DOMAIN_RENDER, 0, 0);
-
-		ADVANCE_BATCH();
-
-		drm_intel_bo_unreference(bo);
-		box++;
-	}
-}
-
-void intel_shadow_create(struct intel_screen_private *intel)
-{
-	ScrnInfoPtr scrn = intel->scrn;
-	ScreenPtr screen = scrn->pScreen;
-	PixmapPtr pixmap;
-	int stride;
-	void *buffer;
-
-	pixmap = screen->GetScreenPixmap(screen);
-	stride = intel->cpp*scrn->virtualX;
-	buffer = malloc(stride * scrn->virtualY);
-	if (buffer &&
-	    screen->ModifyPixmapHeader(pixmap,
-				       scrn->virtualX, scrn->virtualY,
-				       -1, -1,
-				       stride, buffer)) {
-		free(intel->shadow_buffer);
-		intel->shadow_buffer = buffer;
-	} else {
-		free(buffer);
-		stride = intel->shadow_stride;
-	}
-
-	if (!intel->shadow_damage) {
-		intel->shadow_damage =
-			DamageCreate(NULL, NULL,
-				     DamageReportNone, TRUE,
-				     screen, intel);
-		DamageRegister(&pixmap->drawable, intel->shadow_damage);
-		DamageSetReportAfterOp(intel->shadow_damage, TRUE);
-	}
-
-	scrn->displayWidth = stride / intel->cpp;
-	intel->shadow_stride = stride;
-}
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 383efc5..5aad062 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -989,12 +989,6 @@ static void intel_throttle(intel_screen_private *intel)
 
 void intel_uxa_block_handler(intel_screen_private *intel)
 {
-	if (intel->shadow_damage &&
-	    pixman_region_not_empty(DamageRegion(intel->shadow_damage))) {
-		intel_shadow_blt(intel);
-		DamageEmpty(intel->shadow_damage);
-	}
-
 	/* Emit a flush of the rendering cache, or on the 965
 	 * and beyond rendering results may not hit the
 	 * framebuffer until significantly later.
@@ -1025,9 +1019,6 @@ intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 	if (depth == 1 || intel->force_fallback)
 		return fbCreatePixmap(screen, w, h, depth, usage);
 
-	if (intel->use_shadow && (usage & INTEL_CREATE_PIXMAP_DRI2) == 0)
-		return fbCreatePixmap(screen, w, h, depth, usage);
-
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE && w <= 32 && h <= 32)
 		return fbCreatePixmap(screen, w, h, depth, usage);
 
@@ -1147,6 +1138,7 @@ static Bool intel_uxa_destroy_pixmap(PixmapPtr pixmap)
 Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
+	PixmapPtr pixmap = screen->GetScreenPixmap(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	dri_bo *bo = intel->front_buffer;
 
@@ -1155,20 +1147,16 @@ Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 
 	drm_intel_gem_bo_map_gtt(bo);
 
-	if (intel->use_shadow) {
-		intel_shadow_create(intel);
-	} else {
-		PixmapPtr pixmap = screen->GetScreenPixmap(screen);
-		intel_set_pixmap_bo(pixmap, bo);
-		intel_get_pixmap_private(pixmap)->pinned = 1;
-		screen->ModifyPixmapHeader(pixmap,
-					   scrn->virtualX,
-					   scrn->virtualY,
-					   -1, -1,
-					   intel->front_pitch,
-					   NULL);
-		scrn->displayWidth = intel->front_pitch / intel->cpp;
-	}
+	pixmap = screen->GetScreenPixmap(screen);
+	intel_set_pixmap_bo(pixmap, bo);
+	intel_get_pixmap_private(pixmap)->pinned = 1;
+	screen->ModifyPixmapHeader(pixmap,
+				   scrn->virtualX,
+				   scrn->virtualY,
+				   -1, -1,
+				   intel->front_pitch,
+				   NULL);
+	scrn->displayWidth = intel->front_pitch / intel->cpp;
 
 	if (!intel_glamor_create_screen_resources(screen))
 		return FALSE;
diff --git a/src/intel_video.c b/src/intel_video.c
index 0e9845d..c8a9a92 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -353,8 +353,7 @@ void I830InitVideo(ScreenPtr screen)
 	 * supported hardware.
 	 */
 	if (scrn->bitsPerPixel >= 16 &&
-	    INTEL_INFO(intel)->gen >= 30 &&
-	    !intel->use_shadow) {
+	    INTEL_INFO(intel)->gen >= 30) {
 		texturedAdaptor = I830SetupImageVideoTextured(screen);
 		if (texturedAdaptor != NULL) {
 			xf86DrvMsg(scrn->scrnIndex, X_INFO,
commit 6a18a0936eafc45ab920ab0eecf2fc2a601c41a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 15 20:26:00 2012 +0100

    configure: version bump for 2.20.0 release
    
    The day SNA hits mainstream...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index cc74879..8e30d9e 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,45 @@
+Release 2.12.0 (2012-07-15)
+===========================
+First the big news, a new acceleration method that aims to be faster and
+consume far less CPU than UXA is now available for selection at runtime.
+This snazzy new architecture can be selected through use of
+  Option "AccelMethod" "sna"
+in your xorg.conf. Whilst it has been under development for some time, it
+has not yet had the same degree of widespread testing of UXA, so tread
+lightly. Try it and if you spot anything that can be improved, please do
+report a bug.
+
+Otherwise we have the usual smattering of bug fixes for UXA:
+
+ * Use a white source whilst adding glyphs to the glyph mask
+   (This fixes blank glyphs if using a font that mixes ARGB and A glyphs.)
+
+ * Avoid fallbacks for glyph-to-dst in e.g. gnome-terminal
+   https://bugs.freedesktop.org/show_bug.cgi?id=50508
+
+ * Force unused outputs off when VT switching
+   https://bugs.freedesktop.org/show_bug.cgi?id=50772
+
+ * Copy the fbcon across to the Screen pixmap at startup.
+   (This patch has been kicking around in the distributions for years.)
+
+ * Many missed malloc failures checks and forgotten frees found by a static
+   analyzer. Thanks Zdenek Kabelac!
+
+ * Leak of the back buffer when terminating an application after pageflipping
+   https://bugs.freedesktop.org/show_bug.cgi?id=50670
+
+ * Double check that the pipe is on before emitting a WAIT_ON_EVENT.
+   In conjunction with an uptodate kernel, this should eliminate any
+   hangs when changing resolutions or adding/removing displays.
+   https://bugs.freedesktop.org/show_bug.cgi?id=50668
+
+ * Update to new Xorg APIs. Future proofing for the next generation of
+   hotplug Xorg display servers.
+
+Many thanks to everyone who has reported a bug and otherwise helped to
+improve the driver.
+
 Release 2.19.0 (2012-04-29)
 ===========================
 More stability fixes for UXA and support for another variant of IvyBridge.
diff --git a/configure.ac b/configure.ac
index 8cbbbc1..90dae7e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.19.0],
+        [2.20.0],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 6fa059330decd1437eef4928d732ec91fd4e17e7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 15 14:04:48 2012 +0100

    sna: Avoid creating a CPU bo for uploads if we already have a large GPU bo
    
    And vice-versa if already have a large CPU bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bc73dee..1734a8f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2990,6 +2990,19 @@ static bool upload_inplace(struct sna *sna,
 			   struct sna_pixmap *priv,
 			   RegionRec *region)
 {
+	if (priv->create & KGEM_CAN_CREATE_LARGE) {
+		if (priv->gpu_bo) {
+			DBG(("%s: yes, large buffer and already have GPU bo\n",
+			     __FUNCTION__));
+			return true;
+		}
+		if (priv->cpu_bo){
+			DBG(("%s: no, large buffer and already have CPU bo\n",
+			     __FUNCTION__));
+			return false;
+		}
+	}
+
 	if (!region_inplace(sna, pixmap, region, priv, true)) {
 		DBG(("%s? no, region not suitable\n", __FUNCTION__));
 		return false;
commit a253c95ec63b2b075e66ae7380fed6a73469eba5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 15 13:32:35 2012 +0100

    sna: Prefer uploads to be staged in snoopable bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 92aca23..bc73dee 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2995,6 +2995,25 @@ static bool upload_inplace(struct sna *sna,
 		return false;
 	}
 
+	if (sna->kgem.has_llc) {
+		if (priv->cpu_bo) {
+			if (priv->cpu_damage &&
+			    kgem_bo_is_busy(priv->cpu_bo) &&
+			    !region_subsumes_damage(region, priv->cpu_damage)) {
+				DBG(("%s? yes, CPU bo is busy\n", __FUNCTION__));
+				return true;
+			}
+
+			DBG(("%s? no, have CPU bo\n", __FUNCTION__));
+			return false;
+		}
+
+		if (priv->create & KGEM_CAN_CREATE_CPU) {
+			DBG(("%s? no, can create CPU bo\n", __FUNCTION__));
+			return false;
+		}
+	}
+
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
@@ -3016,13 +3035,6 @@ static bool upload_inplace(struct sna *sna,
 
 	}
 
-	if (priv->cpu_bo) {
-		if (kgem_bo_is_busy(priv->cpu_bo)) {
-			DBG(("%s? yes, CPU bo is busy\n", __FUNCTION__));
-			return true;
-		}
-	}
-
 	DBG(("%s? no\n", __FUNCTION__));
 	return false;
 }
commit ef6d94a8444927941db108811e1a26357dc3f18e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 15 11:46:53 2012 +0100

    sna: Simply reverse all the boxes if dx <= 0 and dy <= 0
    
    In this fairly common case, avoid both the double pass and use a simpler
    algorithm as we can simply reverse the order of the boxes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ad12615..92aca23 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3699,7 +3699,17 @@ reorder_boxes(BoxPtr box, int n, int dx, int dy)
 
 	DBG(("%s x %d dx=%d, dy=%d\n", __FUNCTION__, n, dx, dy));
 
-	if (dy < 0) {
+	if (dy <= 0 && dx <= 0) {
+		new = malloc(sizeof(BoxRec) * n);
+		if (new == NULL)
+			return NULL;
+
+		tmp = new;
+		next = box + n;
+		do {
+			*tmp++ = *--next;
+		} while (next != box);
+	} else if (dy < 0) {
 		new = malloc(sizeof(BoxRec) * n);
 		if (new == NULL)
 			return NULL;
@@ -3714,16 +3724,11 @@ reorder_boxes(BoxPtr box, int n, int dx, int dy)
 			base = next;
 		}
 		new -= n;
-		box = new;
-	}
-
-	if (dx < 0) {
+	} else {
 		new = malloc(sizeof(BoxRec) * n);
-		if (!new) {
-			if (dy < 0)
-				free(box);
+		if (!new)
 			return NULL;
-		}
+
 		base = next = box;
 		while (base < box + n) {
 			while (next < box + n && next->y1 == base->y1)
@@ -3734,10 +3739,9 @@ reorder_boxes(BoxPtr box, int n, int dx, int dy)
 			base = next;
 		}
 		new -= n;
-		box = new;
 	}
 
-	return box;
+	return new;
 }
 
 static void
commit 6601a943ff968ac39ba198351c50dc883cb4232e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 15 11:39:56 2012 +0100

    sna: Keep track of the base pointer for the reordered boxes
    
    So that we avoid freeing an invalid pointer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d3787c1..ad12615 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3814,6 +3814,7 @@ fallback:
 			FbBits *dst_bits, *src_bits;
 			int stride = pixmap->devKind;
 			int bpp = pixmap->drawable.bitsPerPixel;
+			int i;
 
 			dst_bits = (FbBits *)
 				((char *)pixmap->devPrivate.ptr +
@@ -3822,12 +3823,10 @@ fallback:
 				((char *)pixmap->devPrivate.ptr +
 				 dy * stride + dx * bpp / 8);
 
-			do {
+			for (i = 0; i < n; i++)
 				memmove_box(src_bits, dst_bits,
-					    bpp, stride, box,
+					    bpp, stride, box+i,
 					    dx, dy);
-				box++;
-			} while (--n);
 		} else {
 			if (gc && !sna_gc_move_to_cpu(gc, dst, region))
 				goto out;
commit ef34d5cf415ad7459ab44b0ec2e70b14150735fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 23:25:17 2012 +0100

    sna: Make sure we check for a busy CPU bo before declaring is-cpu
    
    Even if the pixmap is entirely damaged on the CPU, we still may be in
    the process of transferring it and so cause an unwanted stall.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 32eb54e..fff5436 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -85,10 +85,9 @@ static inline bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	if (priv == NULL || priv->gpu_bo == NULL || priv->clear || DAMAGE_IS_ALL(priv->cpu_damage))
+	if (priv == NULL || priv->gpu_bo == NULL || priv->clear)
 		return true;
 
-	assert(!priv->gpu_bo->proxy); /* should be marked as cpu damaged */
 	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo))
 		return false;
 
commit 0e397e4a1dc23ed07089c967612d705584f3b376
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 21:35:23 2012 +0100

    sna/glyphs: Perform the fallback mask reduce before moving the glyph pointers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 80c0b74..2822368 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1316,6 +1316,11 @@ glyphs_fallback(CARD8 op,
 	}
 	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
 
+	if (mask_format &&
+	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
+	    mask_format == glyphs_format(nlist, list, glyphs))
+		mask_format = NULL;
+
 	cache = sna->render.glyph_cache;
 	pixman_glyph_cache_freeze(cache);
 
@@ -1377,11 +1382,6 @@ next:
 	if (dst_image == NULL)
 		goto out_free_src;
 
-	if (mask_format &&
-	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
-	    mask_format == glyphs_format(nlist, list, glyphs))
-		mask_format = NULL;
-
 	if (mask_format) {
 		pixman_composite_glyphs(op, src_image, dst_image,
 					mask_format->format | (mask_format->depth << 24),
commit db1ee13a53b0c1348b7566ee60ee1b7b384ef59a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 21:26:01 2012 +0100

    sna/gen7: Check for gradient allocation failure within spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c06980d..1e10cb0 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3185,13 +3185,12 @@ gen7_render_composite_spans(struct sna *sna,
 		break;
 	}
 
-	tmp->base.mask.bo = NULL;
-
 	tmp->base.is_affine = tmp->base.src.is_affine;
 	tmp->base.has_component_alpha = false;
 	tmp->base.need_magic_ca_pass = false;
 
-	gen7_composite_alpha_gradient_init(sna, &tmp->base.mask);
+	if (!gen7_composite_alpha_gradient_init(sna, &tmp->base.mask))
+		goto cleanup_src;
 
 	tmp->prim_emit = gen7_emit_composite_spans_primitive;
 	if (tmp->base.src.is_solid) {
commit 86479e97460da798a3804cbb4ae39e62de881af1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 21:24:04 2012 +0100

    sna/gen7: Uses EXTEND_PAD for its alpha-gradient with spans
    
    Fixes regression from 2b94f9a043.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 589c594..c06980d 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2874,8 +2874,6 @@ gen7_composite_alpha_gradient_init(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	channel->filter = PictFilterNearest;
-	channel->repeat = RepeatPad;
 	channel->is_affine = true;
 	channel->is_solid  = false;
 	channel->transform = NULL;
@@ -3222,7 +3220,7 @@ gen7_render_composite_spans(struct sna *sna,
 	tmp->base.u.gen7.sampler = SAMPLER_OFFSET(tmp->base.src.filter,
 						  tmp->base.src.repeat,
 						  SAMPLER_FILTER_NEAREST,
-						  SAMPLER_EXTEND_NONE);
+						  SAMPLER_EXTEND_PAD);
 
 	tmp->box   = gen7_render_composite_spans_box;
 	tmp->boxes = gen7_render_composite_spans_boxes;
commit f17037275c05198c3c3f456964fd42032f9085b6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 20:11:17 2012 +0100

    sna: Reorder overlapping boxes for CopyArea/Window
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4257e07..d3787c1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3692,6 +3692,54 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	}
 }
 
+static BoxPtr
+reorder_boxes(BoxPtr box, int n, int dx, int dy)
+{
+	BoxPtr new, base, next, tmp;
+
+	DBG(("%s x %d dx=%d, dy=%d\n", __FUNCTION__, n, dx, dy));
+
+	if (dy < 0) {
+		new = malloc(sizeof(BoxRec) * n);
+		if (new == NULL)
+			return NULL;
+
+		base = next = box + n - 1;
+		while (base >= box) {
+			while (next >= box && base->y1 == next->y1)
+				next--;
+			tmp = next + 1;
+			while (tmp <= base)
+				*new++ = *tmp++;
+			base = next;
+		}
+		new -= n;
+		box = new;
+	}
+
+	if (dx < 0) {
+		new = malloc(sizeof(BoxRec) * n);
+		if (!new) {
+			if (dy < 0)
+				free(box);
+			return NULL;
+		}
+		base = next = box;
+		while (base < box + n) {
+			while (next < box + n && next->y1 == base->y1)
+				next++;
+			tmp = next;
+			while (tmp != base)
+				*new++ = *--tmp;
+			base = next;
+		}
+		new -= n;
+		box = new;
+	}
+
+	return box;
+}
+
 static void
 sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		    RegionPtr region,int dx, int dy,
@@ -3700,6 +3748,8 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	PixmapPtr pixmap = get_drawable_pixmap(src);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	BoxPtr box = RegionRects(region);
+	int n = RegionNumRects(region);
 	int alu = gc ? gc->alu : GXcopy;
 	int16_t tx, ty;
 
@@ -3707,8 +3757,14 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (((dx | dy) == 0 && alu == GXcopy))
 		return;
 
+	if (n > 1 && (dx | dy) < 0) {
+		box = reorder_boxes(box, n, dx, dy);
+		if (box == NULL)
+			return;
+	}
+
 	DBG(("%s (boxes=%dx[(%d, %d), (%d, %d)...], src=+(%d, %d), alu=%d, pix.size=%dx%d)\n",
-	     __FUNCTION__, RegionNumRects(region),
+	     __FUNCTION__, n,
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2,
 	     dx, dy, alu,
@@ -3725,7 +3781,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	if (priv->gpu_bo) {
 		if (alu == GXcopy && priv->clear)
-			return;
+			goto out;
 
 		assert(priv->gpu_bo->proxy == NULL);
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ)) {
@@ -3737,9 +3793,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (!sna->render.copy_boxes(sna, alu,
 					    pixmap, priv->gpu_bo, dx, dy,
 					    pixmap, priv->gpu_bo, tx, ty,
-					    RegionRects(region),
-					    RegionNumRects(region),
-					    0)) {
+					    box, n, 0)) {
 			DBG(("%s: fallback - accelerated copy boxes failed\n",
 			     __FUNCTION__));
 			goto fallback;
@@ -3754,11 +3808,9 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 fallback:
 		DBG(("%s: fallback", __FUNCTION__));
 		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
-			return;
+			goto out;
 
 		if (alu == GXcopy && pixmap->drawable.bitsPerPixel >= 8) {
-			BoxPtr box = RegionRects(region);
-			int n = RegionNumRects(region);
 			FbBits *dst_bits, *src_bits;
 			int stride = pixmap->devKind;
 			int bpp = pixmap->drawable.bitsPerPixel;
@@ -3778,7 +3830,7 @@ fallback:
 			} while (--n);
 		} else {
 			if (gc && !sna_gc_move_to_cpu(gc, dst, region))
-				return;
+				goto out;
 
 			get_drawable_deltas(src, pixmap, &tx, &ty);
 			miCopyRegion(src, dst, gc,
@@ -3789,6 +3841,10 @@ fallback:
 				sna_gc_move_to_gpu(gc);
 		}
 	}
+
+out:
+	if (box != RegionRects(region))
+		free(box);
 }
 
 static int
commit 86e09d14bd00344d378b86a19ebb44f7d946926c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 17:51:31 2012 +0100

    sna: Tidy sna_copy_boxes
    
    So there appears to be a bug hidden here. But only when we scroll
    upwards in a GTK+ application. Hmm.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5880c78..f85b5af 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -800,7 +800,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
 	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
-	     kgem->half_cpu_cace_pages));
+	     kgem->half_cpu_cache_pages));
 
 	list_init(&kgem->batch_partials);
 	list_init(&kgem->active_partials);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9627a06..4257e07 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -69,6 +69,7 @@
 #define ACCEL_PUT_IMAGE 1
 #define ACCEL_COPY_AREA 1
 #define ACCEL_COPY_PLANE 1
+#define ACCEL_COPY_WINDOW 1
 #define ACCEL_POLY_POINT 1
 #define ACCEL_POLY_LINE 1
 #define ACCEL_POLY_SEGMENT 1
@@ -3723,6 +3724,9 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		goto fallback;
 
 	if (priv->gpu_bo) {
+		if (alu == GXcopy && priv->clear)
+			return;
+
 		assert(priv->gpu_bo->proxy == NULL);
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ)) {
 			DBG(("%s: fallback - not a pure copy and failed to move dst to GPU\n",
@@ -3773,7 +3777,7 @@ fallback:
 				box++;
 			} while (--n);
 		} else {
-			if (!sna_gc_move_to_cpu(gc, dst, region))
+			if (gc && !sna_gc_move_to_cpu(gc, dst, region))
 				return;
 
 			get_drawable_deltas(src, pixmap, &tx, &ty);
@@ -3781,7 +3785,8 @@ fallback:
 				     region, dx - tx, dy - ty,
 				     fbCopyNtoN, 0, NULL);
 
-			sna_gc_move_to_gpu(gc);
+			if (gc)
+				sna_gc_move_to_gpu(gc);
 		}
 	}
 }
@@ -3821,17 +3826,17 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	struct sna *sna = to_sna_from_pixmap(src_pixmap);
 	struct sna_damage **damage;
 	struct kgem_bo *bo;
-	int alu = gc ? gc->alu : GXcopy;
 	int16_t src_dx, src_dy;
 	int16_t dst_dx, dst_dy;
 	BoxPtr box = RegionRects(region);
 	int n = RegionNumRects(region);
+	int alu = gc->alu;
 	int stride, bpp;
 	char *bits;
 	bool replaces;
 
-	if (n == 0)
-		return;
+	assert(RegionNumRects(region));
+	assert_pixmap_contains_box(dst_pixmap, RegionExtents(region));
 
 	if (src_pixmap == dst_pixmap)
 		return sna_self_copy_boxes(src, dst, gc,
@@ -3851,13 +3856,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	bpp = dst_pixmap->drawable.bitsPerPixel;
 
 	get_drawable_deltas(dst, dst_pixmap, &dst_dx, &dst_dy);
-	get_drawable_deltas(src, src_pixmap, &src_dx, &src_dy);
-	src_dx += dx;
-	src_dy += dy;
-
 	RegionTranslate(region, dst_dx, dst_dy);
-	src_dx -= dst_dx;
-	src_dy -= dst_dy;
+	get_drawable_deltas(src, src_pixmap, &src_dx, &src_dy);
+	src_dx += dx - dst_dx;
+	src_dy += dy - dst_dy;
 
 	replaces = n == 1 &&
 		box->x1 <= 0 &&
@@ -3897,8 +3899,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (src_priv && src_priv->clear) {
 			DBG(("%s: applying src clear[%08x] to dst\n",
 			     __FUNCTION__, src_priv->clear_color));
-			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(region));
 			if (n == 1) {
 				if (!sna->render.fill_one(sna,
 							  dst_pixmap, bo,
@@ -3927,7 +3927,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (damage)
 				sna_damage_add(damage, region);
-
 			return;
 		}
 
@@ -3945,11 +3944,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
-			if (damage) {
-				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(region));
+			if (damage)
 				sna_damage_add(damage, region);
-			}
 			return;
 		}
 
@@ -3980,11 +3976,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
-			if (damage) {
-				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(region));
+			if (damage)
 				sna_damage_add(damage, region);
-			}
 			return;
 		}
 
@@ -4016,11 +4009,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
-			if (damage) {
-				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(region));
+			if (damage)
 				sna_damage_add(damage, region);
-			}
 			return;
 		}
 
@@ -4028,7 +4018,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			PixmapPtr tmp;
 			int i;
 
-			assert (src_pixmap->drawable.depth != 1);
+			assert(src_pixmap->drawable.depth != 1);
 
 			DBG(("%s: creating temporary source upload for non-copy alu [%d]\n",
 			     __FUNCTION__, alu));
@@ -4078,11 +4068,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 			tmp->drawable.pScreen->DestroyPixmap(tmp);
 
-			if (damage) {
-				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(region));
+			if (damage)
 				sna_damage_add(damage, region);
-			}
 			return;
 		} else {
 			DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
@@ -4132,12 +4119,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						       dst_pixmap->drawable.height);
 					list_del(&dst_priv->list);
 					dst_priv->undamaged = false;
-				} else {
-					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(region));
+				} else
 					sna_damage_add(&dst_priv->gpu_damage,
 						       region);
-				}
 				assert_pixmap_damage(dst_pixmap);
 			}
 		}
@@ -4151,9 +4135,6 @@ fallback:
 		     __FUNCTION__, src_priv->clear_color));
 
 		if (dst_priv) {
-			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(region));
-
 			if (!sna_drawable_move_region_to_cpu(&dst_pixmap->drawable,
 							     region,
 							     MOVE_WRITE | MOVE_INPLACE_HINT))
@@ -4198,9 +4179,6 @@ fallback:
 		if (dst_priv) {
 			unsigned mode;
 
-			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(region));
-
 			if (alu_overwrites(alu))
 				mode = MOVE_WRITE | MOVE_INPLACE_HINT;
 			else
@@ -4267,6 +4245,21 @@ typedef void (*sna_copy_func)(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			      RegionPtr region, int dx, int dy,
 			      Pixel bitPlane, void *closure);
 
+inline static bool
+box_intersect(BoxPtr a, const BoxRec *b)
+{
+	if (a->x1 < b->x1)
+		a->x1 = b->x1;
+	if (a->x2 > b->x2)
+		a->x2 = b->x2;
+	if (a->y1 < b->y1)
+		a->y1 = b->y1;
+	if (a->y2 > b->y2)
+		a->y2 = b->y2;
+
+	return a->x1 < a->x2 && a->y1 < a->y2;
+}
+
 static RegionPtr
 sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	    int sx, int sy,
@@ -4274,13 +4267,18 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	    int dx, int dy,
 	    sna_copy_func copy, Pixel bitPlane, void *closure)
 {
-	RegionPtr clip = NULL, free_clip = NULL;
+	RegionPtr clip, free_clip = NULL;
 	RegionRec region;
-	bool expose = false;
+	bool expose;
+
+	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=(%dx%d)\n",
+	     __FUNCTION__, sx, sy, dx, dy, width, height));
 
 	/* Short cut for unmapped windows */
-	if (dst->type == DRAWABLE_WINDOW && !((WindowPtr)dst)->realized)
+	if (dst->type == DRAWABLE_WINDOW && !((WindowPtr)dst)->realized) {
+		DBG(("%s: unmapped\n", __FUNCTION__));
 		return NULL;
+	}
 
 	if (src->pScreen->SourceValidate)
 		src->pScreen->SourceValidate(src, sx, sy,
@@ -4293,25 +4291,23 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	dx += dst->x;
 	dy += dst->y;
 
+	DBG(("%s: after drawable: src=(%d, %d), dst=(%d, %d), size=(%dx%d)\n",
+	     __FUNCTION__, sx, sy, dx, dy, width, height));
+
 	region.extents.x1 = dx;
 	region.extents.y1 = dy;
 	region.extents.x2 = dx + width;
 	region.extents.y2 = dy + height;
 	region.data = NULL;
 
-	{
-		BoxPtr box = &gc->pCompositeClip->extents;
-		if (region.extents.x1 < box->x1)
-			region.extents.x1 = box->x1;
-		if (region.extents.x2 > box->x2)
-			region.extents.x2 = box->x2;
-		if (region.extents.y1 < box->y1)
-			region.extents.y1 = box->y1;
-		if (region.extents.y2 > box->y2)
-			region.extents.y2 = box->y2;
-	}
-	if (box_empty(&region.extents))
+	DBG(("%s: dst extents (%d, %d), (%d, %d)\n", __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	if (!box_intersect(&region.extents, &gc->pCompositeClip->extents)) {
+		DBG(("%s: dst clipped out\n", __FUNCTION__));
 		return NULL;
+	}
 
 	region.extents.x1 += sx - dx;
 	region.extents.x2 += sx - dx;
@@ -4319,31 +4315,28 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	region.extents.y2 += sy - dy;
 
 	/* Compute source clip region */
-	if (src->type == DRAWABLE_PIXMAP) {
-		if (src == dst && gc->clientClipType == CT_NONE)
-			clip = gc->pCompositeClip;
+	clip = NULL;
+	if (src == dst && gc->clientClipType == CT_NONE) {
+		DBG(("%s: using gc clip for src\n", __FUNCTION__));
+		clip = gc->pCompositeClip;
+	} else if (src->type == DRAWABLE_PIXMAP) {
+		DBG(("%s: pixmap -- no source clipping\n", __FUNCTION__));
+	} else if (gc->subWindowMode == IncludeInferiors) {
+		/*
+		 * XFree86 DDX empties the border clip when the
+		 * VT is inactive, make sure the region isn't empty
+		 */
+		if (((WindowPtr)src)->parent ||
+		    !RegionNotEmpty(&((WindowPtr)src)->borderClip)) {
+			DBG(("%s: include inferiors\n", __FUNCTION__));
+			free_clip = clip = NotClippedByChildren((WindowPtr)src);
+		}
 	} else {
-		if (gc->subWindowMode == IncludeInferiors) {
-			/*
-			 * XFree86 DDX empties the border clip when the
-			 * VT is inactive, make sure the region isn't empty
-			 */
-			if (!((WindowPtr) src)->parent &&
-			    RegionNotEmpty(&((WindowPtr) src)->borderClip)) {
-				/*
-				 * special case bitblt from root window in
-				 * IncludeInferiors mode; just like from a pixmap
-				 */
-			} else if (src == dst && gc->clientClipType == CT_NONE) {
-				clip = gc->pCompositeClip;
-			} else {
-				free_clip = clip =
-					NotClippedByChildren((WindowPtr) src);
-			}
-		} else
-			clip = &((WindowPtr)src)->clipList;
+		DBG(("%s: window clip\n", __FUNCTION__));
+		clip = &((WindowPtr)src)->clipList;
 	}
 	if (clip == NULL) {
+		DBG(("%s: fast source clip against extents\n", __FUNCTION__));
 		expose = true;
 		if (region.extents.x1 < src->x) {
 			region.extents.x1 = src->x;
@@ -4364,13 +4357,22 @@ sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (box_empty(&region.extents))
 			return NULL;
 	} else {
+		expose = false;
 		RegionIntersect(&region, &region, clip);
 		if (free_clip)
 			RegionDestroy(free_clip);
 	}
+	DBG(("%s: src extents (%d, %d), (%d, %d) x %d\n", __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2,
+	     RegionNumRects(&region)));
 	RegionTranslate(&region, dx-sx, dy-sy);
 	if (gc->pCompositeClip->data)
 		RegionIntersect(&region, &region, gc->pCompositeClip);
+	DBG(("%s: copy region (%d, %d), (%d, %d) x %d\n", __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2,
+	     RegionNumRects(&region)));
 
 	if (RegionNotEmpty(&region))
 		copy(src, dst, gc, &region, sx-dx, sy-dy, bitPlane, closure);
@@ -4485,21 +4487,6 @@ out:
 			   0, NULL);
 }
 
-inline static bool
-box_intersect(BoxPtr a, const BoxRec *b)
-{
-	if (a->x1 < b->x1)
-		a->x1 = b->x1;
-	if (a->x2 > b->x2)
-		a->x2 = b->x2;
-	if (a->y1 < b->y1)
-		a->y1 = b->y1;
-	if (a->y2 > b->y2)
-		a->y2 = b->y2;
-
-	return a->x1 < a->x2 && a->y1 < a->y2;
-}
-
 static const BoxRec *
 find_clip_box_for_y(const BoxRec *begin, const BoxRec *end, int16_t y)
 {
@@ -12231,7 +12218,7 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
 		RegionTranslate(&dst, -pixmap->screen_x, -pixmap->screen_y);
 #endif
 
-	if (wedged(sna)) {
+	if (wedged(sna) || FORCE_FALLBACK || !ACCEL_COPY_WINDOW) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
 			return;
commit 924f596463555db27214fd8227218c2e21ecddc8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 19:03:08 2012 +0100

    sna: Avoid BLT to snoopable bo on older gen
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b44c734..5880c78 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -768,6 +768,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
 	     kgem->has_semaphores));
 
+	kgem->can_blt_cpu = gen == 0 || gen >= 30;
+	DBG(("%s: can blt to cpu? %d\n", __FUNCTION__,
+	     kgem->can_blt_cpu));
+
 	if (!is_hw_supported(kgem, dev)) {
 		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
 			   "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index e5db6fd..c354547 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -160,6 +160,8 @@ struct kgem {
 	uint32_t has_cache_level :1;
 	uint32_t has_llc :1;
 
+	uint32_t can_blt_cpu :1;
+
 	uint16_t fence_max;
 	uint16_t half_cpu_cache_pages;
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 406cbfa..9627a06 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1070,7 +1070,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 static inline bool use_cpu_bo_for_write(struct sna *sna,
 					struct sna_pixmap *priv)
 {
-	return priv->cpu_bo != NULL && sna->kgem.gen >= 30;
+	return priv->cpu_bo != NULL && sna->kgem.can_blt_cpu;
 }
 
 static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
@@ -2455,6 +2455,9 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
+	if (!to_sna_from_pixmap(pixmap)->kgem.can_blt_cpu)
+		return NULL;
+
 	if (flags == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
commit 44e226b1d9fca8cb95b0864adf8708b03ee8472c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 18:36:29 2012 +0100

    sna/glyphs: A repeat of the earlier typo for pixman glyphs
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index fa5e45b..80c0b74 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1346,7 +1346,7 @@ glyphs_fallback(CARD8 op,
 				pixman_image_t *glyph_image;
 
 				glyph_image = sna_glyph_get_image(g, screen);
-				if (glyph_image)
+				if (glyph_image == NULL)
 					goto next;
 
 				ptr = pixman_glyph_cache_insert(cache, g, NULL,
commit 797ebf937fcfcc87502727c70e6b52f89fecc799
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 16:58:18 2012 +0100

    sna/glyphs: Fix typo and render glyphs to the small mask
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 8c4796a..fa5e45b 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -920,7 +920,7 @@ upload:
 						pixman_image_t *glyph_image;
 
 						glyph_image = sna_glyph_get_image(g, screen);
-						if (glyph_image)
+						if (glyph_image == NULL)
 							goto next_image;
 
 						ptr = pixman_glyph_cache_insert(cache, g, NULL,
@@ -943,7 +943,7 @@ next_image:
 				list++;
 			} while (--nlist);
 
-			pixman_composite_glyphs_no_mask(PictOpAdd,
+			pixman_composite_glyphs_no_mask(PIXMAN_OP_ADD,
 							sna->render.white_image,
 							mask_image,
 							0, 0,
commit 2b94f9a043372ffede01339eea99377a71169fbc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 16:38:21 2012 +0100

    sna/gen7: Preselect sampler
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 352bd45..589c594 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -229,6 +229,19 @@ static const struct blendinfo {
 #define SAMPLER_OFFSET(sf, se, mf, me) \
 	(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * 2 * sizeof(struct gen7_sampler_state))
 
+#define FILL_SAMPLER \
+	SAMPLER_OFFSET(SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_REPEAT, \
+		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+
+#define COPY_SAMPLER \
+	SAMPLER_OFFSET(SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE, \
+		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+
+#define VIDEO_SAMPLER \
+	SAMPLER_OFFSET(SAMPLER_FILTER_BILINEAR, SAMPLER_EXTEND_PAD, \
+		       SAMPLER_FILTER_NEAREST, SAMPLER_EXTEND_NONE)
+
+
 #define OUT_BATCH(v) batch_emit(sna, v)
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
@@ -982,19 +995,7 @@ gen7_emit_state(struct sna *sna,
 				    op->has_component_alpha,
 				    op->dst.format));
 
-	DBG(("%s: sampler src=(%d, %d), mask=(%d, %d), offset=%d\n",
-	     __FUNCTION__,
-	     op->src.filter, op->src.repeat,
-	     op->mask.filter, op->mask.repeat,
-	     (int)SAMPLER_OFFSET(op->src.filter,
-				 op->src.repeat,
-				 op->mask.filter,
-				 op->mask.repeat)));
-	gen7_emit_sampler(sna,
-			  SAMPLER_OFFSET(op->src.filter,
-					 op->src.repeat,
-					 op->mask.filter,
-					 op->mask.repeat));
+	gen7_emit_sampler(sna, op->u.gen7.sampler);
 	gen7_emit_sf(sna, op->mask.bo != NULL);
 	gen7_emit_wm(sna,
 		     op->u.gen7.wm_kernel,
@@ -2017,9 +2018,6 @@ gen7_render_video(struct sna *sna,
 	tmp.dst.bo = priv->gpu_bo;
 
 	tmp.src.bo = frame->bo;
-	tmp.src.filter = SAMPLER_FILTER_BILINEAR;
-	tmp.src.repeat = SAMPLER_EXTEND_PAD;
-
 	tmp.mask.bo = NULL;
 
 	tmp.is_affine = true;
@@ -2035,6 +2033,7 @@ gen7_render_video(struct sna *sna,
 	}
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.u.gen7.sampler = VIDEO_SAMPLER;
 	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
@@ -2829,6 +2828,10 @@ gen7_render_composite(struct sna *sna,
 	tmp->u.gen7.nr_surfaces = 2 + (tmp->mask.bo != NULL);
 	tmp->u.gen7.nr_inputs = 1 + (tmp->mask.bo != NULL);
 	tmp->u.gen7.ve_id = gen7_choose_composite_vertex_buffer(tmp);
+	tmp->u.gen7.sampler = SAMPLER_OFFSET(tmp->src.filter,
+					     tmp->src.repeat,
+					     tmp->mask.filter,
+					     tmp->mask.repeat);
 
 	tmp->blt   = gen7_render_composite_blt;
 	tmp->box   = gen7_render_composite_box;
@@ -3216,6 +3219,10 @@ gen7_render_composite_spans(struct sna *sna,
 	tmp->base.u.gen7.nr_surfaces = 3;
 	tmp->base.u.gen7.nr_inputs = 2;
 	tmp->base.u.gen7.ve_id = 1 << 1 | tmp->base.is_affine;
+	tmp->base.u.gen7.sampler = SAMPLER_OFFSET(tmp->base.src.filter,
+						  tmp->base.src.repeat,
+						  SAMPLER_FILTER_NEAREST,
+						  SAMPLER_EXTEND_NONE);
 
 	tmp->box   = gen7_render_composite_spans_box;
 	tmp->boxes = gen7_render_composite_spans_boxes;
@@ -3450,8 +3457,6 @@ fallback_blt:
 			goto fallback_tiled;
 	}
 
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_NONE;
 	tmp.src.card_format = gen7_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
 		BoxRec extents = box[0];
@@ -3485,8 +3490,6 @@ fallback_blt:
 	}
 
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
@@ -3498,6 +3501,7 @@ fallback_blt:
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.u.gen7.sampler = COPY_SAMPLER;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -3659,8 +3663,6 @@ fallback:
 	op->base.src.height = src->drawable.height;
 	op->base.src.scale[0] = 1.f/src->drawable.width;
 	op->base.src.scale[1] = 1.f/src->drawable.height;
-	op->base.src.filter = SAMPLER_FILTER_NEAREST;
-	op->base.src.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.mask.bo = NULL;
 
@@ -3672,6 +3674,7 @@ fallback:
 	op->base.u.gen7.nr_surfaces = 2;
 	op->base.u.gen7.nr_inputs = 1;
 	op->base.u.gen7.ve_id = 1;
+	op->base.u.gen7.sampler = COPY_SAMPLER;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
@@ -3813,12 +3816,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
@@ -3830,6 +3828,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.u.gen7.sampler = FILL_SAMPLER;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -4001,12 +4000,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 		sna_render_get_solid(sna,
 				     sna_rgba_for_color(color,
 							dst->drawable.depth));
-	op->base.src.filter = SAMPLER_FILTER_NEAREST;
-	op->base.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	op->base.mask.bo = NULL;
-	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
-	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	op->base.is_affine = true;
 	op->base.has_component_alpha = false;
@@ -4018,6 +4012,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen7.nr_surfaces = 2;
 	op->base.u.gen7.nr_inputs = 1;
 	op->base.u.gen7.ve_id = 1;
+	op->base.u.gen7.sampler = FILL_SAMPLER;
 
 	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
@@ -4094,12 +4089,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		sna_render_get_solid(sna,
 				     sna_rgba_for_color(color,
 							dst->drawable.depth));
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
@@ -4111,6 +4101,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.u.gen7.sampler = FILL_SAMPLER;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
@@ -4189,12 +4180,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, 0);
-	tmp.src.filter = SAMPLER_FILTER_NEAREST;
-	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
-
 	tmp.mask.bo = NULL;
-	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
-	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
@@ -4206,6 +4192,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.u.gen7.sampler = FILL_SAMPLER;
 
 	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 364492f..7c43f61 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -137,6 +137,7 @@ struct sna_composite_op {
 			int nr_surfaces;
 			int nr_inputs;
 			int ve_id;
+			int sampler;
 		} gen7;
 	} u;
 
commit 50d61c38bdb720c17ba5bfcaf97032338e466d9b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 16:07:32 2012 +0100

    sna/gen4: Fix typo introduced in checking dst format
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d6623f7..c6fbddb 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -573,7 +573,7 @@ static uint32_t gen4_get_dest_format(PictFormat format)
 
 static bool gen4_check_dst_format(PictFormat format)
 {
-	if (gen4_check_dst_format(format) != -1)
+	if (gen4_get_dest_format(format) != -1)
 		return true;
 
 	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
commit fc91b1f157f9e6e4097e662e2890574e530344ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 15:48:50 2012 +0100

    sna/gen7: Only emit the render flushes between operations
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index f393bf6..352bd45 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -974,6 +974,9 @@ gen7_emit_state(struct sna *sna,
 {
 	bool need_stall = false;
 
+	if (sna->render_state.gen7.emit_flush)
+		gen7_emit_pipe_flush(sna);
+
 	gen7_emit_cc(sna,
 		     gen7_get_blend(op->op,
 				    op->has_component_alpha,
@@ -1011,7 +1014,7 @@ gen7_emit_state(struct sna *sna,
 	if (need_stall)
 		gen7_emit_pipe_stall(sna);
 
-	sna->render_state.gen7.emit_flush = op->op != PictOpSrc;
+	sna->render_state.gen7.emit_flush = op->op > PictOpSrc;
 }
 
 static void gen7_magic_ca_pass(struct sna *sna,
@@ -1055,11 +1058,6 @@ static void gen7_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen7.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen7.vertex_offset = 0;
-
-	if (sna->render_state.gen7.emit_flush) {
-		gen7_emit_pipe_flush(sna);
-		sna->render_state.gen7.emit_flush = false;
-	}
 }
 
 static int gen7_vertex_finish(struct sna *sna)
@@ -4293,6 +4291,7 @@ gen7_render_expire(struct kgem *kgem)
 
 static void gen7_render_reset(struct sna *sna)
 {
+	sna->render_state.gen7.emit_flush = false;
 	sna->render_state.gen7.needs_invariant = true;
 	sna->render_state.gen7.vb_id = 0;
 	sna->render_state.gen7.ve_id = -1;
commit d30dc59651f156e9d4572141e2c54af89d007a37
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 15:42:19 2012 +0100

    sna/gen7: Only fallback to BLT for a redirected target
    
    As we can always use a partial surface for the source.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 9e7683e..f393bf6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3369,8 +3369,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return true;
 
-	if ((too_large(dst->drawable.width, dst->drawable.height) ||
-	     too_large(src->drawable.width, src->drawable.height)) &&
+	if (too_large(dst->drawable.width, dst->drawable.height) ||
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable)) {
 		BoxRec extents = box[0];
 		int i;
commit 9f05b0c03b1fdab474b436431c430028fca1937e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 15:03:32 2012 +0100

    sna/gen4+: Fix up card/dest format confusion in previous commit
    
    That would have been much more successful had I not supplied the wrong
    opaque formats to the sampler.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 9cd0250..d6623f7 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -510,75 +510,83 @@ static uint32_t gen4_get_blend(int op,
 	return BLEND_OFFSET(src, dst);
 }
 
-static uint32_t gen4_get_dest_format(PictFormat format)
+static uint32_t gen4_get_card_format(PictFormat format)
 {
 	switch (format) {
 	default:
-		assert(0);
+		return -1;
 	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
 		return GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_x8r8g8b8:
+		return GEN4_SURFACEFORMAT_B8G8R8X8_UNORM;
 	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
 		return GEN4_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_x8b8g8r8:
+		return GEN4_SURFACEFORMAT_R8G8B8X8_UNORM;
 	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
 		return GEN4_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_x2r10g10b10:
+		return GEN4_SURFACEFORMAT_B10G10R10X2_UNORM;
+	case PICT_a2b10g10r10:
+		return GEN4_SURFACEFORMAT_R10G10B10A2_UNORM;
+	case PICT_r8g8b8:
+		return GEN4_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN4_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
+		return GEN4_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN4_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
 		return GEN4_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
 		return GEN4_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
 
-static bool gen4_check_dst_format(PictFormat format)
+static uint32_t gen4_get_dest_format(PictFormat format)
 {
 	switch (format) {
+	default:
+		return -1;
 	case PICT_a8r8g8b8:
 	case PICT_x8r8g8b8:
+		return GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
 	case PICT_a8b8g8r8:
 	case PICT_x8b8g8r8:
+		return GEN4_SURFACEFORMAT_R8G8B8A8_UNORM;
 	case PICT_a2r10g10b10:
 	case PICT_x2r10g10b10:
+		return GEN4_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_r5g6b5:
+		return GEN4_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
 	case PICT_a1r5g5b5:
+		return GEN4_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
+		return GEN4_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return false;
+		return GEN4_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
 
+static bool gen4_check_dst_format(PictFormat format)
+{
+	if (gen4_check_dst_format(format) != -1)
+		return true;
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
+}
+
 static bool gen4_check_format(uint32_t format)
 {
-	switch (format) {
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-	case PICT_r8g8b8:
-	case PICT_r5g6b5:
-	case PICT_a1r5g5b5:
-	case PICT_a8:
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
+	if (gen4_get_card_format(format) != -1)
 		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
-		return false;
-	}
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
 }
 
 typedef struct gen4_surface_state_padded {
@@ -640,33 +648,6 @@ sampler_state_init(struct gen4_sampler_state *sampler_state,
 	}
 }
 
-static uint32_t gen4_get_card_format(PictFormat format)
-{
-	switch (format) {
-	default:
-		return -1;
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-		return GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-		return GEN4_SURFACEFORMAT_R8G8B8A8_UNORM;
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-		return GEN4_SURFACEFORMAT_B10G10R10A2_UNORM;
-	case PICT_r5g6b5:
-		return GEN4_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-	case PICT_a1r5g5b5:
-		return GEN4_SURFACEFORMAT_B5G5R5A1_UNORM;
-	case PICT_a8:
-		return GEN4_SURFACEFORMAT_A8_UNORM;
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
-		return GEN4_SURFACEFORMAT_B4G4R4A4_UNORM;
-	}
-}
-
 static uint32_t gen4_filter(uint32_t filter)
 {
 	switch (filter) {
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 06123cc..f611a51 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -504,74 +504,83 @@ static uint32_t gen5_get_blend(int op,
 	return BLEND_OFFSET(src, dst);
 }
 
-static uint32_t gen5_get_dest_format(PictFormat format)
+static uint32_t gen5_get_card_format(PictFormat format)
 {
 	switch (format) {
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
 	default:
+		return -1;
+	case PICT_a8r8g8b8:
 		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_x8r8g8b8:
+		return GEN5_SURFACEFORMAT_B8G8R8X8_UNORM;
 	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
 		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_x8b8g8r8:
+		return GEN5_SURFACEFORMAT_R8G8B8X8_UNORM;
 	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
 		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_x2r10g10b10:
+		return GEN5_SURFACEFORMAT_B10G10R10X2_UNORM;
+	case PICT_a2b10g10r10:
+		return GEN5_SURFACEFORMAT_R10G10B10A2_UNORM;
+	case PICT_r8g8b8:
+		return GEN5_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
+		return GEN5_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
 		return GEN5_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
 		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
 
-static bool gen5_check_dst_format(PictFormat format)
+static uint32_t gen5_get_dest_format(PictFormat format)
 {
 	switch (format) {
+	default:
+		return -1;
 	case PICT_a8r8g8b8:
 	case PICT_x8r8g8b8:
+		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
 	case PICT_a8b8g8r8:
 	case PICT_x8b8g8r8:
+		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
 	case PICT_a2r10g10b10:
 	case PICT_x2r10g10b10:
+		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_r5g6b5:
+		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
 	case PICT_a1r5g5b5:
+		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
+		return GEN5_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return false;
+		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
 
+static bool gen5_check_dst_format(PictFormat format)
+{
+	if (gen5_get_dest_format(format) != -1)
+		return true;
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
+}
+
 static bool gen5_check_format(uint32_t format)
 {
-	switch (format) {
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-	case PICT_r8g8b8:
-	case PICT_r5g6b5:
-	case PICT_a1r5g5b5:
-	case PICT_a8:
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
+	if (gen5_get_card_format(format) != -1)
 		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
-		return false;
-	}
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
 }
 
 typedef struct gen5_surface_state_padded {
@@ -633,33 +642,6 @@ sampler_state_init(struct gen5_sampler_state *sampler_state,
 	}
 }
 
-static uint32_t gen5_get_card_format(PictFormat format)
-{
-	switch (format) {
-	default:
-		return -1;
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
-	case PICT_r5g6b5:
-		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-	case PICT_a1r5g5b5:
-		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
-	case PICT_a8:
-		return GEN5_SURFACEFORMAT_A8_UNORM;
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
-		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
-	}
-}
-
 static uint32_t gen5_filter(uint32_t filter)
 {
 	switch (filter) {
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index ab3e4d6..c635f4d 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -241,73 +241,83 @@ static uint32_t gen6_get_blend(int op,
 	return BLEND_OFFSET(src, dst);
 }
 
-static uint32_t gen6_get_dest_format(PictFormat format)
+static uint32_t gen6_get_card_format(PictFormat format)
 {
 	switch (format) {
 	default:
-		assert(0);
+		return -1;
 	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
 		return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_x8r8g8b8:
+		return GEN6_SURFACEFORMAT_B8G8R8X8_UNORM;
 	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
 		return GEN6_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_x8b8g8r8:
+		return GEN6_SURFACEFORMAT_R8G8B8X8_UNORM;
 	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
 		return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_x2r10g10b10:
+		return GEN6_SURFACEFORMAT_B10G10R10X2_UNORM;
+	case PICT_a2b10g10r10:
+		return GEN6_SURFACEFORMAT_R10G10B10A2_UNORM;
+	case PICT_r8g8b8:
+		return GEN6_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN6_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
+		return GEN6_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
 		return GEN6_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
 		return GEN6_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
 
-static bool gen6_check_dst_format(PictFormat format)
+static uint32_t gen6_get_dest_format(PictFormat format)
 {
 	switch (format) {
+	default:
+		return -1;
 	case PICT_a8r8g8b8:
 	case PICT_x8r8g8b8:
+		return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
 	case PICT_a8b8g8r8:
 	case PICT_x8b8g8r8:
+		return GEN6_SURFACEFORMAT_R8G8B8A8_UNORM;
 	case PICT_a2r10g10b10:
 	case PICT_x2r10g10b10:
+		return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_r5g6b5:
+		return GEN6_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
 	case PICT_a1r5g5b5:
+		return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
+		return GEN6_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return true;
+		return GEN6_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
+}
+
+static bool gen6_check_dst_format(PictFormat format)
+{
+	if (gen6_get_dest_format(format) != -1)
+		return true;
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
 	return false;
 }
 
 static bool gen6_check_format(uint32_t format)
 {
-	switch (format) {
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-	case PICT_r8g8b8:
-	case PICT_r5g6b5:
-	case PICT_a1r5g5b5:
-	case PICT_a8:
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
+	if (gen6_get_card_format(format) != -1)
 		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
-		return false;
-	}
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
 }
 
 static uint32_t gen6_filter(uint32_t filter)
@@ -1116,33 +1126,6 @@ static uint32_t gen6_create_cc_viewport(struct sna_static_stream *stream)
 	return sna_static_stream_add(stream, &vp, sizeof(vp), 32);
 }
 
-static uint32_t gen6_get_card_format(PictFormat format)
-{
-	switch (format) {
-	default:
-		return -1;
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-		return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-		return GEN6_SURFACEFORMAT_R8G8B8A8_UNORM;
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-		return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM;
-	case PICT_r5g6b5:
-		return GEN6_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-	case PICT_a1r5g5b5:
-		return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM;
-	case PICT_a8:
-		return GEN6_SURFACEFORMAT_A8_UNORM;
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
-		return GEN6_SURFACEFORMAT_B4G4R4A4_UNORM;
-	}
-}
-
 static uint32_t
 gen6_tiling_bits(uint32_t tiling)
 {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5d1d93f..9e7683e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -274,7 +274,7 @@ static uint32_t gen7_get_blend(int op,
 	return BLEND_OFFSET(src, dst);
 }
 
-static uint32_t gen7_get_dest_format(PictFormat format)
+static uint32_t gen7_get_card_format(PictFormat format)
 {
 	switch (format) {
 	default:
@@ -308,46 +308,49 @@ static uint32_t gen7_get_dest_format(PictFormat format)
 	}
 }
 
-static bool gen7_check_dst_format(PictFormat format)
+static uint32_t gen7_get_dest_format(PictFormat format)
 {
 	switch (format) {
+	default:
+		return -1;
 	case PICT_a8r8g8b8:
 	case PICT_x8r8g8b8:
+		return GEN7_SURFACEFORMAT_B8G8R8A8_UNORM;
 	case PICT_a8b8g8r8:
 	case PICT_x8b8g8r8:
+		return GEN7_SURFACEFORMAT_R8G8B8A8_UNORM;
 	case PICT_a2r10g10b10:
 	case PICT_x2r10g10b10:
+		return GEN7_SURFACEFORMAT_B10G10R10A2_UNORM;
 	case PICT_r5g6b5:
+		return GEN7_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
 	case PICT_a1r5g5b5:
+		return GEN7_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
+		return GEN7_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return true;
+		return GEN7_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
+}
+
+static bool gen7_check_dst_format(PictFormat format)
+{
+	if (gen7_get_dest_format(format) != -1)
+		return true;
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
 	return false;
 }
 
 static bool gen7_check_format(uint32_t format)
 {
-	switch (format) {
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-	case PICT_r8g8b8:
-	case PICT_r5g6b5:
-	case PICT_a1r5g5b5:
-	case PICT_a8:
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
+	if (gen7_get_card_format(format) != -1)
 		return true;
-	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
-		return false;
-	}
+
+	DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
+	return false;
 }
 
 static uint32_t gen7_filter(uint32_t filter)
@@ -1264,33 +1267,6 @@ static uint32_t gen7_create_cc_viewport(struct sna_static_stream *stream)
 	return sna_static_stream_add(stream, &vp, sizeof(vp), 32);
 }
 
-static uint32_t gen7_get_card_format(PictFormat format)
-{
-	switch (format) {
-	default:
-		return -1;
-	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
-		return GEN7_SURFACEFORMAT_B8G8R8A8_UNORM;
-	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
-		return GEN7_SURFACEFORMAT_R8G8B8A8_UNORM;
-	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
-		return GEN7_SURFACEFORMAT_B10G10R10A2_UNORM;
-	case PICT_r5g6b5:
-		return GEN7_SURFACEFORMAT_B5G6R5_UNORM;
-	case PICT_x1r5g5b5:
-	case PICT_a1r5g5b5:
-		return GEN7_SURFACEFORMAT_B5G5R5A1_UNORM;
-	case PICT_a8:
-		return GEN7_SURFACEFORMAT_A8_UNORM;
-	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
-		return GEN7_SURFACEFORMAT_B4G4R4A4_UNORM;
-	}
-}
-
 static uint32_t
 gen7_tiling_bits(uint32_t tiling)
 {
commit fffbc34e4621898eee9b80bf8b6b3699bcade52a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 14:54:27 2012 +0100

    sna/gen4+: Translate to card format using a switch
    
    GCC produces faster code than a walk over the format tables.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 2edfbd0..9cd0250 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -249,25 +249,6 @@ static const struct blendinfo {
  */
 #define GEN4_BLENDFACTOR_COUNT (GEN4_BLENDFACTOR_INV_DST_ALPHA + 1)
 
-static const struct formatinfo {
-	CARD32 pict_fmt;
-	uint32_t card_fmt;
-} gen4_tex_formats[] = {
-	{PICT_a8, GEN4_SURFACEFORMAT_A8_UNORM},
-	{PICT_a8r8g8b8, GEN4_SURFACEFORMAT_B8G8R8A8_UNORM},
-	{PICT_x8r8g8b8, GEN4_SURFACEFORMAT_B8G8R8X8_UNORM},
-	{PICT_a8b8g8r8, GEN4_SURFACEFORMAT_R8G8B8A8_UNORM},
-	{PICT_x8b8g8r8, GEN4_SURFACEFORMAT_R8G8B8X8_UNORM},
-	{PICT_r8g8b8, GEN4_SURFACEFORMAT_R8G8B8_UNORM},
-	{PICT_r5g6b5, GEN4_SURFACEFORMAT_B5G6R5_UNORM},
-	{PICT_a1r5g5b5, GEN4_SURFACEFORMAT_B5G5R5A1_UNORM},
-	{PICT_a2r10g10b10, GEN4_SURFACEFORMAT_B10G10R10A2_UNORM},
-	{PICT_x2r10g10b10, GEN4_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a2b10g10r10, GEN4_SURFACEFORMAT_R10G10B10A2_UNORM},
-	{PICT_x2r10g10b10, GEN4_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a4r4g4b4, GEN4_SURFACEFORMAT_B4G4R4A4_UNORM},
-};
-
 #define BLEND_OFFSET(s, d) \
 	(((s) * GEN4_BLENDFACTOR_COUNT + (d)) * 64)
 
@@ -661,13 +642,29 @@ sampler_state_init(struct gen4_sampler_state *sampler_state,
 
 static uint32_t gen4_get_card_format(PictFormat format)
 {
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(gen4_tex_formats); i++) {
-		if (gen4_tex_formats[i].pict_fmt == format)
-			return gen4_tex_formats[i].card_fmt;
+	switch (format) {
+	default:
+		return -1;
+	case PICT_a8r8g8b8:
+	case PICT_x8r8g8b8:
+		return GEN4_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_a8b8g8r8:
+	case PICT_x8b8g8r8:
+		return GEN4_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_a2r10g10b10:
+	case PICT_x2r10g10b10:
+		return GEN4_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_r5g6b5:
+		return GEN4_SURFACEFORMAT_B5G6R5_UNORM;
+	case PICT_x1r5g5b5:
+	case PICT_a1r5g5b5:
+		return GEN4_SURFACEFORMAT_B5G5R5A1_UNORM;
+	case PICT_a8:
+		return GEN4_SURFACEFORMAT_A8_UNORM;
+	case PICT_a4r4g4b4:
+	case PICT_x4r4g4b4:
+		return GEN4_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
-	return -1;
 }
 
 static uint32_t gen4_filter(uint32_t filter)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index c4b1ecf..06123cc 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -235,28 +235,6 @@ static const struct blendinfo {
  */
 #define GEN5_BLENDFACTOR_COUNT (GEN5_BLENDFACTOR_INV_DST_ALPHA + 1)
 
-/* FIXME: surface format defined in gen5_defines.h, shared Sampling engine
- * 1.7.2
- */
-static const struct formatinfo {
-	CARD32 pict_fmt;
-	uint32_t card_fmt;
-} gen5_tex_formats[] = {
-	{PICT_a8, GEN5_SURFACEFORMAT_A8_UNORM},
-	{PICT_a8r8g8b8, GEN5_SURFACEFORMAT_B8G8R8A8_UNORM},
-	{PICT_x8r8g8b8, GEN5_SURFACEFORMAT_B8G8R8X8_UNORM},
-	{PICT_a8b8g8r8, GEN5_SURFACEFORMAT_R8G8B8A8_UNORM},
-	{PICT_x8b8g8r8, GEN5_SURFACEFORMAT_R8G8B8X8_UNORM},
-	{PICT_r8g8b8, GEN5_SURFACEFORMAT_R8G8B8_UNORM},
-	{PICT_r5g6b5, GEN5_SURFACEFORMAT_B5G6R5_UNORM},
-	{PICT_a1r5g5b5, GEN5_SURFACEFORMAT_B5G5R5A1_UNORM},
-	{PICT_a2r10g10b10, GEN5_SURFACEFORMAT_B10G10R10A2_UNORM},
-	{PICT_x2r10g10b10, GEN5_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a2b10g10r10, GEN5_SURFACEFORMAT_R10G10B10A2_UNORM},
-	{PICT_x2r10g10b10, GEN5_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a4r4g4b4, GEN5_SURFACEFORMAT_B4G4R4A4_UNORM},
-};
-
 #define BLEND_OFFSET(s, d) \
 	(((s) * GEN5_BLENDFACTOR_COUNT + (d)) * 64)
 
@@ -657,13 +635,29 @@ sampler_state_init(struct gen5_sampler_state *sampler_state,
 
 static uint32_t gen5_get_card_format(PictFormat format)
 {
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(gen5_tex_formats); i++) {
-		if (gen5_tex_formats[i].pict_fmt == format)
-			return gen5_tex_formats[i].card_fmt;
+	switch (format) {
+	default:
+		return -1;
+	case PICT_a8r8g8b8:
+	case PICT_x8r8g8b8:
+		return GEN5_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_a8b8g8r8:
+	case PICT_x8b8g8r8:
+		return GEN5_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_a2r10g10b10:
+	case PICT_x2r10g10b10:
+		return GEN5_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_r5g6b5:
+		return GEN5_SURFACEFORMAT_B5G6R5_UNORM;
+	case PICT_x1r5g5b5:
+	case PICT_a1r5g5b5:
+		return GEN5_SURFACEFORMAT_B5G5R5A1_UNORM;
+	case PICT_a8:
+		return GEN5_SURFACEFORMAT_A8_UNORM;
+	case PICT_a4r4g4b4:
+	case PICT_x4r4g4b4:
+		return GEN5_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
-	return -1;
 }
 
 static uint32_t gen5_filter(uint32_t filter)
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index ccf27be..ab3e4d6 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -188,28 +188,6 @@ static const struct blendinfo {
  */
 #define GEN6_BLENDFACTOR_COUNT (GEN6_BLENDFACTOR_INV_DST_ALPHA + 1)
 
-/* FIXME: surface format defined in gen6_defines.h, shared Sampling engine
- * 1.7.2
- */
-static const struct formatinfo {
-	CARD32 pict_fmt;
-	uint32_t card_fmt;
-} gen6_tex_formats[] = {
-	{PICT_a8, GEN6_SURFACEFORMAT_A8_UNORM},
-	{PICT_a8r8g8b8, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM},
-	{PICT_x8r8g8b8, GEN6_SURFACEFORMAT_B8G8R8X8_UNORM},
-	{PICT_a8b8g8r8, GEN6_SURFACEFORMAT_R8G8B8A8_UNORM},
-	{PICT_x8b8g8r8, GEN6_SURFACEFORMAT_R8G8B8X8_UNORM},
-	{PICT_r8g8b8, GEN6_SURFACEFORMAT_R8G8B8_UNORM},
-	{PICT_r5g6b5, GEN6_SURFACEFORMAT_B5G6R5_UNORM},
-	{PICT_a1r5g5b5, GEN6_SURFACEFORMAT_B5G5R5A1_UNORM},
-	{PICT_a2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10A2_UNORM},
-	{PICT_x2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a2b10g10r10, GEN6_SURFACEFORMAT_R10G10B10A2_UNORM},
-	{PICT_x2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a4r4g4b4, GEN6_SURFACEFORMAT_B4G4R4A4_UNORM},
-};
-
 #define GEN6_BLEND_STATE_PADDED_SIZE	ALIGN(sizeof(struct gen6_blend_state), 64)
 
 #define BLEND_OFFSET(s, d) \
@@ -1140,13 +1118,29 @@ static uint32_t gen6_create_cc_viewport(struct sna_static_stream *stream)
 
 static uint32_t gen6_get_card_format(PictFormat format)
 {
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(gen6_tex_formats); i++) {
-		if (gen6_tex_formats[i].pict_fmt == format)
-			return gen6_tex_formats[i].card_fmt;
+	switch (format) {
+	default:
+		return -1;
+	case PICT_a8r8g8b8:
+	case PICT_x8r8g8b8:
+		return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_a8b8g8r8:
+	case PICT_x8b8g8r8:
+		return GEN6_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_a2r10g10b10:
+	case PICT_x2r10g10b10:
+		return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_r5g6b5:
+		return GEN6_SURFACEFORMAT_B5G6R5_UNORM;
+	case PICT_x1r5g5b5:
+	case PICT_a1r5g5b5:
+		return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM;
+	case PICT_a8:
+		return GEN6_SURFACEFORMAT_A8_UNORM;
+	case PICT_a4r4g4b4:
+	case PICT_x4r4g4b4:
+		return GEN6_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
-	return -1;
 }
 
 static uint32_t
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e76acd8..5d1d93f 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -221,28 +221,6 @@ static const struct blendinfo {
  */
 #define GEN7_BLENDFACTOR_COUNT (GEN7_BLENDFACTOR_INV_DST_ALPHA + 1)
 
-/* FIXME: surface format defined in gen7_defines.h, shared Sampling engine
- * 1.7.2
- */
-static const struct formatinfo {
-	CARD32 pict_fmt;
-	uint32_t card_fmt;
-} gen7_tex_formats[] = {
-	{PICT_a8, GEN7_SURFACEFORMAT_A8_UNORM},
-	{PICT_a8r8g8b8, GEN7_SURFACEFORMAT_B8G8R8A8_UNORM},
-	{PICT_x8r8g8b8, GEN7_SURFACEFORMAT_B8G8R8X8_UNORM},
-	{PICT_a8b8g8r8, GEN7_SURFACEFORMAT_R8G8B8A8_UNORM},
-	{PICT_x8b8g8r8, GEN7_SURFACEFORMAT_R8G8B8X8_UNORM},
-	{PICT_r8g8b8, GEN7_SURFACEFORMAT_R8G8B8_UNORM},
-	{PICT_r5g6b5, GEN7_SURFACEFORMAT_B5G6R5_UNORM},
-	{PICT_a1r5g5b5, GEN7_SURFACEFORMAT_B5G5R5A1_UNORM},
-	{PICT_a2r10g10b10, GEN7_SURFACEFORMAT_B10G10R10A2_UNORM},
-	{PICT_x2r10g10b10, GEN7_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a2b10g10r10, GEN7_SURFACEFORMAT_R10G10B10A2_UNORM},
-	{PICT_x2r10g10b10, GEN7_SURFACEFORMAT_B10G10R10X2_UNORM},
-	{PICT_a4r4g4b4, GEN7_SURFACEFORMAT_B4G4R4A4_UNORM},
-};
-
 #define GEN7_BLEND_STATE_PADDED_SIZE	ALIGN(sizeof(struct gen7_blend_state), 64)
 
 #define BLEND_OFFSET(s, d) \
@@ -300,25 +278,32 @@ static uint32_t gen7_get_dest_format(PictFormat format)
 {
 	switch (format) {
 	default:
-		assert(0);
+		return -1;
 	case PICT_a8r8g8b8:
-	case PICT_x8r8g8b8:
 		return GEN7_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_x8r8g8b8:
+		return GEN7_SURFACEFORMAT_B8G8R8X8_UNORM;
 	case PICT_a8b8g8r8:
-	case PICT_x8b8g8r8:
 		return GEN7_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_x8b8g8r8:
+		return GEN7_SURFACEFORMAT_R8G8B8X8_UNORM;
 	case PICT_a2r10g10b10:
-	case PICT_x2r10g10b10:
 		return GEN7_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_x2r10g10b10:
+		return GEN7_SURFACEFORMAT_B10G10R10X2_UNORM;
+	case PICT_a2b10g10r10:
+		return GEN7_SURFACEFORMAT_R10G10B10A2_UNORM;
+	case PICT_r8g8b8:
+		return GEN7_SURFACEFORMAT_R8G8B8_UNORM;
 	case PICT_r5g6b5:
 		return GEN7_SURFACEFORMAT_B5G6R5_UNORM;
 	case PICT_x1r5g5b5:
+		return GEN7_SURFACEFORMAT_B5G5R5X1_UNORM;
 	case PICT_a1r5g5b5:
 		return GEN7_SURFACEFORMAT_B5G5R5A1_UNORM;
 	case PICT_a8:
 		return GEN7_SURFACEFORMAT_A8_UNORM;
 	case PICT_a4r4g4b4:
-	case PICT_x4r4g4b4:
 		return GEN7_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
 }
@@ -1281,13 +1266,29 @@ static uint32_t gen7_create_cc_viewport(struct sna_static_stream *stream)
 
 static uint32_t gen7_get_card_format(PictFormat format)
 {
-	unsigned int i;
-
-	for (i = 0; i < ARRAY_SIZE(gen7_tex_formats); i++) {
-		if (gen7_tex_formats[i].pict_fmt == format)
-			return gen7_tex_formats[i].card_fmt;
+	switch (format) {
+	default:
+		return -1;
+	case PICT_a8r8g8b8:
+	case PICT_x8r8g8b8:
+		return GEN7_SURFACEFORMAT_B8G8R8A8_UNORM;
+	case PICT_a8b8g8r8:
+	case PICT_x8b8g8r8:
+		return GEN7_SURFACEFORMAT_R8G8B8A8_UNORM;
+	case PICT_a2r10g10b10:
+	case PICT_x2r10g10b10:
+		return GEN7_SURFACEFORMAT_B10G10R10A2_UNORM;
+	case PICT_r5g6b5:
+		return GEN7_SURFACEFORMAT_B5G6R5_UNORM;
+	case PICT_x1r5g5b5:
+	case PICT_a1r5g5b5:
+		return GEN7_SURFACEFORMAT_B5G5R5A1_UNORM;
+	case PICT_a8:
+		return GEN7_SURFACEFORMAT_A8_UNORM;
+	case PICT_a4r4g4b4:
+	case PICT_x4r4g4b4:
+		return GEN7_SURFACEFORMAT_B4G4R4A4_UNORM;
 	}
-	return -1;
 }
 
 static uint32_t
commit 95fdd9af5c8a8360d02debc400e75869c36f05ca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 13:05:27 2012 +0100

    sna: Enable pixman_glyphs if available
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index d323da7..8cbbbc1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -118,6 +118,10 @@ AC_ARG_ENABLE(kms-only, AS_HELP_STRING([--enable-kms-only],
 required_xorg_xserver_version=1.6
 required_pixman_version=0.24
 
+if pkg-config --exists 'pixman-1 >= 0.27.1'; then
+	AC_DEFINE([HAS_PIXMAN_GLYPHS], 1, [Enable pixman glyph cache])
+fi
+
 AC_ARG_ENABLE(sna,
 	      AS_HELP_STRING([--enable-sna],
 			     [Enable SandyBridge's New Acceleration (SNA) [default=auto]]),
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 6f9faf4..8c4796a 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -81,6 +81,8 @@
 #define GLYPH_MAX_SIZE 64
 #define GLYPH_CACHE_SIZE (CACHE_PICTURE_SIZE * CACHE_PICTURE_SIZE / (GLYPH_MIN_SIZE * GLYPH_MIN_SIZE))
 
+#define N_STACK_GLYPHS 512
+
 #if HAS_DEBUG_FULL
 static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char *function)
 {
@@ -146,6 +148,12 @@ void sna_glyphs_close(struct sna *sna)
 		FreePicture(render->white_picture, 0);
 		render->white_picture = NULL;
 	}
+#if HAS_PIXMAN_GLYPHS
+	if (render->glyph_cache) {
+		pixman_glyph_cache_destroy(render->glyph_cache);
+		render->glyph_cache = NULL;
+	}
+#endif
 }
 
 /* All caches for a single format share a single pixmap for glyph storage,
@@ -170,8 +178,18 @@ bool sna_glyphs_create(struct sna *sna)
 
 	DBG(("%s\n", __FUNCTION__));
 
+#if HAS_PIXMAN_GLYPHS
+	sna->render.glyph_cache = pixman_glyph_cache_create();
+	if (sna->render.glyph_cache == NULL)
+		goto bail;
+#endif
+
+	sna->render.white_image = pixman_image_create_solid_fill(&white);
+	if (sna->render.white_image == NULL)
+		goto bail;
+
 	if (sna->kgem.wedged || !sna->have_render)
-		return TRUE;
+		return true;
 
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
 		struct sna_glyph_cache *cache = &sna->render.glyph[i];
@@ -222,14 +240,16 @@ bool sna_glyphs_create(struct sna *sna)
 		cache->evict = rand() % GLYPH_CACHE_SIZE;
 	}
 
-	sna->render.white_image = pixman_image_create_solid_fill(&white);
 	sna->render.white_picture =
 		CreateSolidPicture(0, (xRenderColor *)&white, &error);
-	return sna->render.white_image && sna->render.white_picture;
+	if (sna->render.white_picture == NULL)
+		goto bail;
+
+	return true;
 
 bail:
 	sna_glyphs_close(sna);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -330,7 +350,7 @@ glyph_cache(ScreenPtr screen,
 	int size, mask, pos, s;
 
 	if (NO_GLYPH_CACHE)
-		return FALSE;
+		return false;
 
 	if (glyph->info.width > GLYPH_MAX_SIZE ||
 	    glyph->info.height > GLYPH_MAX_SIZE) {
@@ -340,7 +360,7 @@ glyph_cache(ScreenPtr screen,
 			pixmap->usage_hint = 0;
 			sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
 		}
-		return FALSE;
+		return false;
 	}
 
 	for (size = GLYPH_MIN_SIZE; size <= GLYPH_MAX_SIZE; size *= 2)
@@ -404,7 +424,7 @@ glyph_cache(ScreenPtr screen,
 	glyph_cache_upload(cache, glyph, glyph_picture,
 			   priv->coordinate.x, priv->coordinate.y);
 
-	return TRUE;
+	return true;
 }
 
 static void apply_damage(struct sna_composite_op *op,
@@ -470,7 +490,7 @@ glyphs_to_dst(struct sna *sna,
 	int16_t x, y;
 
 	if (NO_GLYPHS_TO_DST)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -525,7 +545,7 @@ glyphs_to_dst(struct sna *sna,
 							   0, 0, 0, 0, 0, 0,
 							   0, 0,
 							   &tmp))
-					return FALSE;
+					return false;
 
 				glyph_atlas = priv.atlas;
 			}
@@ -603,7 +623,7 @@ next_glyph:
 	if (glyph_atlas)
 		tmp.done(sna, &tmp);
 
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -619,7 +639,7 @@ glyphs_slow(struct sna *sna,
 	int16_t x, y;
 
 	if (NO_GLYPHS_SLOW)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -675,7 +695,7 @@ glyphs_slow(struct sna *sna,
 						   glyph->info.width,
 						   glyph->info.height,
 						   &tmp))
-				return FALSE;
+				return false;
 
 			rects = REGION_RECTS(dst->pCompositeClip);
 			nrect = REGION_NUM_RECTS(dst->pCompositeClip);
@@ -726,7 +746,7 @@ next_glyph:
 		list++;
 	}
 
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -743,6 +763,37 @@ too_large(struct sna *sna, int width, int height)
 		height > sna->render.max_3d_size);
 }
 
+static pixman_image_t *
+__sna_glyph_get_image(GlyphPtr g, ScreenPtr s)
+{
+	pixman_image_t *image;
+	PicturePtr p;
+	int dx, dy;
+
+	p = GetGlyphPicture(g, s);
+	if (p == NULL)
+		return NULL;
+
+	image = image_from_pict(p, FALSE, &dx, &dy);
+	if (!image)
+		return NULL;
+
+	assert(dx == 0 && dy == 0);
+	return sna_glyph(g)->image = image;
+}
+
+static inline pixman_image_t *
+sna_glyph_get_image(GlyphPtr g, ScreenPtr s)
+{
+	pixman_image_t *image;
+
+	image = sna_glyph(g)->image;
+	if (image == NULL)
+		image = __sna_glyph_get_image(g, s);
+
+	return image;
+}
+
 static bool
 glyphs_via_mask(struct sna *sna,
 		CARD8 op,
@@ -762,7 +813,7 @@ glyphs_via_mask(struct sna *sna,
 	BoxRec box;
 
 	if (NO_GLYPHS_VIA_MASK)
-		return FALSE;
+		return false;
 
 	DBG(("%s(op=%d, src=(%d, %d), nlist=%d,  dst=(%d, %d)+(%d, %d))\n",
 	     __FUNCTION__, op, src_x, src_y, nlist,
@@ -770,7 +821,7 @@ glyphs_via_mask(struct sna *sna,
 
 	glyph_extents(nlist, list, glyphs, &box);
 	if (box.x2 <= box.x1 || box.y2 <= box.y1)
-		return TRUE;
+		return true;
 
 	DBG(("%s: bounds=((%d, %d), (%d, %d))\n", __FUNCTION__,
 	     box.x1, box.y1, box.x2, box.y2));
@@ -782,7 +833,7 @@ glyphs_via_mask(struct sna *sna,
 					   box.x1, box.y1,
 					   box.x2 - box.x1,
 					   box.y2 - box.y1))
-		return TRUE;
+		return true;
 
 	DBG(("%s: extents=((%d, %d), (%d, %d))\n", __FUNCTION__,
 	     box.x1, box.y1, box.x2, box.y2));
@@ -799,7 +850,7 @@ glyphs_via_mask(struct sna *sna,
 	if (format->depth < 8) {
 		format = PictureMatchFormat(screen, 8, PICT_a8);
 		if (!format)
-			return FALSE;
+			return false;
 	}
 
 	component_alpha = NeedsComponent(format->format);
@@ -818,7 +869,7 @@ upload:
 						  format->depth,
 						  KGEM_BUFFER_WRITE);
 		if (!pixmap)
-			return FALSE;
+			return false;
 
 		mask_image =
 			pixman_image_create_bits(format->depth << 24 | format->format,
@@ -827,17 +878,88 @@ upload:
 						 pixmap->devKind);
 		if (mask_image == NULL) {
 			screen->DestroyPixmap(pixmap);
-			return FALSE;
+			return false;
 		}
 
 		memset(pixmap->devPrivate.ptr, 0, pixmap->devKind*height);
+#if HAS_PIXMAN_GLYPHS
+		{
+			pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
+			pixman_glyph_t *pglyphs = stack_glyphs;
+			pixman_glyph_cache_t *cache;
+			int count, n;
+
+			cache = sna->render.glyph_cache;
+			pixman_glyph_cache_freeze(cache);
+
+			count = 0;
+			for (n = 0; n < nlist; ++n)
+				count += list[n].len;
+			if (count > N_STACK_GLYPHS) {
+				pglyphs = malloc (count * sizeof(pixman_glyph_t));
+				if (pglyphs == NULL) {
+					screen->DestroyPixmap(pixmap);
+					return false;
+				}
+			}
+
+			count = 0;
+			do {
+				n = list->len;
+				x += list->xOff;
+				y += list->yOff;
+				while (n--) {
+					GlyphPtr g = *glyphs++;
+					const void *ptr;
+
+					if (g->info.width == 0 || g->info.height == 0)
+						goto next_image;
+
+					ptr = pixman_glyph_cache_lookup(cache, g, NULL);
+					if (ptr == NULL) {
+						pixman_image_t *glyph_image;
+
+						glyph_image = sna_glyph_get_image(g, screen);
+						if (glyph_image)
+							goto next_image;
+
+						ptr = pixman_glyph_cache_insert(cache, g, NULL,
+										g->info.x,
+										g->info.y,
+										glyph_image);
+						if (ptr == NULL)
+							goto next_image;
+					}
+
+					pglyphs[count].x = x;
+					pglyphs[count].y = y;
+					pglyphs[count].glyph = ptr;
+					count++;
+
+next_image:
+					x += g->info.xOff;
+					y += g->info.yOff;
+				}
+				list++;
+			} while (--nlist);
+
+			pixman_composite_glyphs_no_mask(PictOpAdd,
+							sna->render.white_image,
+							mask_image,
+							0, 0,
+							0, 0,
+							cache, count, pglyphs);
+			pixman_glyph_cache_thaw(cache);
+			if (pglyphs != stack_glyphs)
+				free(pglyphs);
+		}
+#else
 		do {
 			int n = list->len;
 			x += list->xOff;
 			y += list->yOff;
 			while (n--) {
 				GlyphPtr g = *glyphs++;
-				PicturePtr picture;
 				pixman_image_t *glyph_image;
 				int16_t xi, yi;
 
@@ -855,23 +977,8 @@ upload:
 				    yi + g->info.height <= 0)
 					goto next_image;
 
-				glyph_image = sna_glyph(g)->image;
-				if (glyph_image == NULL) {
-					int dx, dy;
-
-					picture = GetGlyphPicture(g, dst->pDrawable->pScreen);
-					if (picture == NULL)
-						goto next_image;
-
-					glyph_image = image_from_pict(picture,
-								      FALSE,
-								      &dx, &dy);
-					if (!glyph_image)
-						goto next_image;
-
-					assert(dx == 0 && dy == 0);
-					sna_glyph(g)->image = glyph_image;
-				}
+				glyph_image =
+					sna_glyph_get_image(g, dst->pDrawable->pScreen);
 
 				DBG(("%s: glyph to mask (%d, %d)x(%d, %d)\n",
 				     __FUNCTION__,
@@ -908,6 +1015,7 @@ next_image:
 			}
 			list++;
 		} while (--nlist);
+#endif
 		pixman_image_unref(mask_image);
 
 		mask = CreatePicture(0, &pixmap->drawable,
@@ -915,7 +1023,7 @@ next_image:
 				     &component_alpha, serverClient, &error);
 		screen->DestroyPixmap(pixmap);
 		if (!mask)
-			return FALSE;
+			return false;
 
 		ValidatePicture(mask);
 	} else {
@@ -923,14 +1031,14 @@ next_image:
 					      width, height, format->depth,
 					      SNA_CREATE_SCRATCH);
 		if (!pixmap)
-			return FALSE;
+			return false;
 
 		mask = CreatePicture(0, &pixmap->drawable,
 				     format, CPComponentAlpha,
 				     &component_alpha, serverClient, &error);
 		screen->DestroyPixmap(pixmap);
 		if (!mask)
-			return FALSE;
+			return false;
 
 		ValidatePicture(mask);
 		if (!clear_pixmap(sna, pixmap)) {
@@ -999,7 +1107,7 @@ next_image:
 						DBG(("%s: fallback -- can not handle PictOpAdd of glyph onto mask!\n",
 						     __FUNCTION__));
 						FreePicture(mask, 0);
-						return FALSE;
+						return false;
 					}
 
 					glyph_atlas = this_atlas;
@@ -1037,7 +1145,7 @@ next_glyph:
 		      width, height);
 
 	FreePicture(mask, 0);
-	return TRUE;
+	return true;
 }
 
 static PictFormatPtr
@@ -1098,7 +1206,7 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 				extents.y1 = y1;
 				extents.x2 = x2;
 				extents.y2 = y2;
-				first = FALSE;
+				first = false;
 			} else {
 				/* Potential overlap?
 				 * We cheat and ignore the boundary pixels, as
@@ -1148,6 +1256,160 @@ out:
 	return format;
 }
 
+#if HAS_PIXMAN_GLYPHS
+static void
+glyphs_fallback(CARD8 op,
+		PicturePtr src,
+		PicturePtr dst,
+		PictFormatPtr mask_format,
+		int src_x, int src_y,
+		int nlist, GlyphListPtr list, GlyphPtr *glyphs)
+{
+	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+	pixman_glyph_t stack_glyphs[N_STACK_GLYPHS];
+	pixman_glyph_t *pglyphs = stack_glyphs;
+	pixman_image_t *src_image, *dst_image;
+	pixman_glyph_cache_t *cache;
+	int dst_x = list->xOff, dst_y = list->yOff;
+	int src_dx, src_dy, dst_dx, dst_dy;
+	ScreenPtr screen = dst->pDrawable->pScreen;
+	RegionRec region;
+	int x, y, count, n;
+
+	glyph_extents(nlist, list, glyphs, &region.extents);
+	if (region.extents.x2 <= region.extents.x1 ||
+	    region.extents.y2 <= region.extents.y1)
+		return;
+
+	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	region.data = NULL;
+	RegionTranslate(&region, dst->pDrawable->x, dst->pDrawable->y);
+	if (dst->pCompositeClip)
+		RegionIntersect(&region, &region, dst->pCompositeClip);
+	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     RegionExtents(&region)->x1, RegionExtents(&region)->y1,
+	     RegionExtents(&region)->x2, RegionExtents(&region)->y2));
+	if (!RegionNotEmpty(&region))
+		return;
+
+	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
+					     MOVE_READ | MOVE_WRITE))
+		return;
+	if (dst->alphaMap &&
+	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
+				      MOVE_READ | MOVE_WRITE))
+		return;
+
+	if (src->pDrawable) {
+		if (!sna_drawable_move_to_cpu(src->pDrawable,
+					      MOVE_READ))
+			return;
+
+		if (src->alphaMap &&
+		    !sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
+					      MOVE_READ))
+			return;
+	}
+	RegionTranslate(&region, -dst->pDrawable->x, -dst->pDrawable->y);
+
+	cache = sna->render.glyph_cache;
+	pixman_glyph_cache_freeze(cache);
+
+	count = 0;
+	for (n = 0; n < nlist; ++n)
+		count += list[n].len;
+	if (count > N_STACK_GLYPHS) {
+		pglyphs = malloc (count * sizeof(pixman_glyph_t));
+		if (pglyphs == NULL)
+			goto out;
+	}
+
+	count = 0;
+	x = y = 0;
+	while (nlist--) {
+		n = list->len;
+		x += list->xOff;
+		y += list->yOff;
+		while (n--) {
+			GlyphPtr g = *glyphs++;
+			const void *ptr;
+
+			if (g->info.width == 0 || g->info.height == 0)
+				goto next;
+
+			ptr = pixman_glyph_cache_lookup(cache, g, NULL);
+			if (ptr == NULL) {
+				pixman_image_t *glyph_image;
+
+				glyph_image = sna_glyph_get_image(g, screen);
+				if (glyph_image)
+					goto next;
+
+				ptr = pixman_glyph_cache_insert(cache, g, NULL,
+								g->info.x,
+								g->info.y,
+								glyph_image);
+				if (ptr == NULL)
+					goto out;
+			}
+
+			pglyphs[count].x = x;
+			pglyphs[count].y = y;
+			pglyphs[count].glyph = ptr;
+			count++;
+
+next:
+			x += g->info.xOff;
+			y += g->info.yOff;
+		}
+		list++;
+	}
+
+	src_image = image_from_pict(src, FALSE, &src_dx, &src_dy);
+	if (src_image == NULL)
+		goto out;
+
+	dst_image = image_from_pict(dst, TRUE, &dst_dx, &dst_dy);
+	if (dst_image == NULL)
+		goto out_free_src;
+
+	if (mask_format &&
+	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
+	    mask_format == glyphs_format(nlist, list, glyphs))
+		mask_format = NULL;
+
+	if (mask_format) {
+		pixman_composite_glyphs(op, src_image, dst_image,
+					mask_format->format | (mask_format->depth << 24),
+					src_x + src_dx + region.extents.x1 - dst_x,
+					src_y + src_dy + region.extents.y1 - dst_y,
+					region.extents.x1, region.extents.y1,
+					region.extents.x1 + dst_dx, region.extents.y1 + dst_dy,
+					region.extents.x2 - region.extents.x1,
+					region.extents.y2 - region.extents.y1,
+					cache, count, pglyphs);
+	} else {
+		pixman_composite_glyphs_no_mask(op, src_image, dst_image,
+						src_x + src_dx - dst_x, src_y + src_dy - dst_y,
+						dst_dx, dst_dy,
+						cache, count, pglyphs);
+	}
+
+	free_pixman_pict(dst, dst_image);
+
+out_free_src:
+	free_pixman_pict(src, src_image);
+
+out:
+	pixman_glyph_cache_thaw(cache);
+	if (pglyphs != stack_glyphs)
+		free(pglyphs);
+}
+#else
 static void
 glyphs_fallback(CARD8 op,
 		PicturePtr src,
@@ -1160,19 +1422,21 @@ glyphs_fallback(CARD8 op,
 		GlyphPtr *glyphs)
 {
 	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+	ScreenPtr screen = dst->pDrawable->pScreen;
 	pixman_image_t *dst_image, *mask_image, *src_image;
 	int dx, dy, x, y;
-	BoxRec box;
 	RegionRec region;
 
-	glyph_extents(nlist, list, glyphs, &box);
-	if (box.x2 <= box.x1 || box.y2 <= box.y1)
+	glyph_extents(nlist, list, glyphs, &region.extents);
+	if (region.extents.x2 <= region.extents.x1 ||
+	    region.extents.y2 <= region.extents.y1)
 		return;
 
-	DBG(("%s: (%d, %d), (%d, %d)\n",
-	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
+	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
-	RegionInit(&region, &box, 0);
+	region.data = NULL;
 	RegionTranslate(&region, dst->pDrawable->x, dst->pDrawable->y);
 	if (dst->pCompositeClip)
 		RegionIntersect(&region, &region, dst->pCompositeClip);
@@ -1267,23 +1531,9 @@ glyphs_fallback(CARD8 op,
 			if (g->info.width == 0 || g->info.height == 0)
 				goto next_glyph;
 
-			glyph_image = sna_glyph(g)->image;
-			if (glyph_image == NULL) {
-				PicturePtr picture;
-				int gx, gy;
-
-				picture = GetGlyphPicture(g, dst->pDrawable->pScreen);
-				if (picture == NULL)
-					goto next_glyph;
-
-				glyph_image = image_from_pict(picture, FALSE,
-							      &gx, &gy);
-				if (!glyph_image)
-					goto next_glyph;
-
-				assert(gx == 0 && gy == 0);
-				sna_glyph(g)->image = glyph_image;
-			}
+			glyph_image = sna_glyph_get_image(g, screen);
+			if (glyph_image == NULL)
+				goto next_glyph;
 
 			if (mask_format) {
 				DBG(("%s: glyph+(%d,%d) to mask (%d, %d)x(%d, %d)\n",
@@ -1376,6 +1626,7 @@ cleanup_dst:
 cleanup_region:
 	RegionUninit(&region);
 }
+#endif
 
 void
 sna_glyphs(CARD8 op,
@@ -1470,6 +1721,10 @@ sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph)
 	struct sna_glyph *priv = sna_glyph(glyph);
 
 	if (priv->image) {
+#if HAS_PIXMAN_GLYPHS
+		struct sna *sna = to_sna_from_screen(screen);
+		pixman_glyph_cache_remove(sna->render.glyph_cache, glyph, NULL);
+#endif
 		pixman_image_unref(priv->image);
 		priv->image = NULL;
 	}
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index b003e7b..364492f 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -280,6 +280,9 @@ struct sna_render {
 	} glyph[2];
 	pixman_image_t *white_image;
 	PicturePtr white_picture;
+#if HAS_PIXMAN_GLYPHS
+	pixman_glyph_cache_t *glyph_cache;
+#endif
 
 	uint16_t vertex_start;
 	uint16_t vertex_index;
commit 3f764ee4c50567cfb831495d42cb6c2bb94055ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 12:29:47 2012 +0100

    sna/dri: Presume an unknown chipset will want i965_dri.so
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 5f237b0..d369871 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -2148,7 +2148,8 @@ bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	sna->deviceName = drmGetDeviceNameFromFd(sna->kgem.fd);
 	memset(&info, '\0', sizeof(info));
 	info.fd = sna->kgem.fd;
-	info.driverName = sna->kgem.gen < 40 ? "i915" : "i965";
+	info.driverName =
+		(sna->kgem.gen && sna->kgem.gen < 40) ? "i915" : "i965";
 	info.deviceName = sna->deviceName;
 
 	DBG(("%s: loading dri driver '%s' [gen=%d] for device '%s'\n",
commit 39845280b5a9d0ed60c6a158c3d0df9fb8756d40
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 12:14:28 2012 +0100

    sna: Allow BLT support on future unknown hardware
    
    Assume that if the kernel recognises the chipset and declares it has a
    BLT, we can use it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c528fe7..b44c734 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -599,7 +599,7 @@ static int gem_param(struct kgem *kgem, int name)
 	return v;
 }
 
-static bool semaphores_enabled(void)
+static bool test_has_semaphores_enabled(void)
 {
 	FILE *file;
 	bool detected = false;
@@ -632,6 +632,9 @@ static bool is_hw_supported(struct kgem *kgem,
 	if (DBG_NO_HW)
 		return false;
 
+	if (kgem->gen == 0) /* unknown chipset, assume future gen */
+		return kgem->has_blt;
+
 	if (kgem->gen <= 20) /* dynamic GTT is fubar */
 		return false;
 
@@ -641,11 +644,40 @@ static bool is_hw_supported(struct kgem *kgem,
 	}
 
 	if (kgem->gen >= 60) /* Only if the kernel supports the BLT ring */
-		return gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+		return kgem->has_blt;
 
 	return true;
 }
 
+static bool test_has_relaxed_fencing(struct kgem *kgem)
+{
+	if (kgem->gen < 40) {
+		if (DBG_NO_RELAXED_FENCING)
+			return false;
+
+		return gem_param(kgem, I915_PARAM_HAS_RELAXED_FENCING) > 0;
+	} else
+		return true;
+}
+
+static bool test_has_llc(struct kgem *kgem)
+{
+	int has_llc = -1;
+
+	if (DBG_NO_LLC)
+		return false;
+
+#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
+	has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
+#endif
+	if (has_llc == -1) {
+		DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
+		has_llc = kgem->gen >= 60;
+	}
+
+	return has_llc;
+}
+
 static bool test_has_cache_level(struct kgem *kgem)
 {
 #if defined(USE_CACHE_LEVEL)
@@ -670,6 +702,21 @@ static bool test_has_cache_level(struct kgem *kgem)
 #endif
 }
 
+static bool test_has_vmap(struct kgem *kgem)
+{
+#if defined(USE_VMAP)
+	if (DBG_NO_VMAP)
+		return false;
+
+	if (kgem->gen == 40)
+		return false;
+
+	return gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
+#else
+	return false;
+#endif
+}
+
 static int kgem_get_screen_index(struct kgem *kgem)
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
@@ -683,10 +730,44 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	unsigned half_gpu_max;
 	unsigned int i, j;
 
+	DBG(("%s: fd=%d, gen=%d\n", __FUNCTION__, fd, gen));
+
 	memset(kgem, 0, sizeof(*kgem));
 
 	kgem->fd = fd;
 	kgem->gen = gen;
+
+	kgem->has_blt = gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+	DBG(("%s: has BLT ring? %d\n", __FUNCTION__,
+	     kgem->has_blt));
+
+	kgem->has_relaxed_delta =
+		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
+	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_delta));
+
+	kgem->has_relaxed_fencing = test_has_relaxed_fencing(kgem);
+	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_fencing));
+
+	kgem->has_llc = test_has_llc(kgem);
+	DBG(("%s: has shared last-level-cache? %d\n", __FUNCTION__,
+	     kgem->has_llc));
+
+	kgem->has_cache_level = test_has_cache_level(kgem);
+	DBG(("%s: has set-cache-level? %d\n", __FUNCTION__,
+	     kgem->has_cache_level));
+
+	kgem->has_vmap = test_has_vmap(kgem);
+	DBG(("%s: has vmap? %d\n", __FUNCTION__,
+	     kgem->has_vmap));
+
+	kgem->has_semaphores = false;
+	if (kgem->has_blt && test_has_semaphores_enabled())
+		kgem->has_semaphores = true;
+	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
+	     kgem->has_semaphores));
+
 	if (!is_hw_supported(kgem, dev)) {
 		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
 			   "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
@@ -697,11 +778,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->wedged = 1;
 	}
 
-	kgem->has_relaxed_delta =
-		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
-	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
-	     kgem->has_relaxed_delta));
-
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
@@ -719,6 +795,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->min_alignment = 64;
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
+	DBG(("%s: half cpu cache %d pages\n", __FUNCTION__,
+	     kgem->half_cpu_cace_pages));
 
 	list_init(&kgem->batch_partials);
 	list_init(&kgem->active_partials);
@@ -741,49 +819,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-	kgem->has_cache_level = test_has_cache_level(kgem);
-	DBG(("%s: using set-cache-level=%d\n", __FUNCTION__, kgem->has_cache_level));
-
-#if defined(USE_VMAP)
-	if (!DBG_NO_VMAP)
-		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
-	if (gen == 40)
-		kgem->has_vmap = false; /* sampler dies with snoopable memory */
-#endif
-	DBG(("%s: using vmap=%d\n", __FUNCTION__, kgem->has_vmap));
-
-	if (gen < 40) {
-		if (!DBG_NO_RELAXED_FENCING) {
-			kgem->has_relaxed_fencing =
-				gem_param(kgem, I915_PARAM_HAS_RELAXED_FENCING) > 0;
-		}
-	} else
-		kgem->has_relaxed_fencing = 1;
-	DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__,
-	     kgem->has_relaxed_fencing));
-
-	kgem->has_llc = false;
-	if (!DBG_NO_LLC) {
-		int has_llc = -1;
-#if defined(I915_PARAM_HAS_LLC) /* Expected in libdrm-2.4.31 */
-		has_llc = gem_param(kgem, I915_PARAM_HAS_LLC);
-#endif
-		if (has_llc == -1) {
-			DBG(("%s: no kernel/drm support for HAS_LLC, assuming support for LLC based on GPU generation\n", __FUNCTION__));
-			has_llc = gen >= 60;
-		}
-		kgem->has_llc = has_llc;
-	}
 	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
 	     kgem->has_llc | kgem->has_vmap | kgem->has_cache_level,
 	     kgem->has_llc, kgem->has_cache_level, kgem->has_vmap));
 
-	kgem->has_semaphores = false;
-	if (gen >= 60 && semaphores_enabled())
-		kgem->has_semaphores = true;
-	DBG(("%s: semaphores enabled? %d\n", __FUNCTION__,
-	     kgem->has_semaphores));
-
 	VG_CLEAR(aperture);
 	aperture.aper_size = 64*1024*1024;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 7596326..e5db6fd 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -153,6 +153,7 @@ struct kgem {
 	uint32_t busy:1;
 
 	uint32_t has_vmap :1;
+	uint32_t has_blt :1;
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_relaxed_delta :1;
 	uint32_t has_semaphores :1;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 02727a3..dcfab91 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -283,7 +283,7 @@ void no_render_init(struct sna *sna)
 	sna->kgem.context_switch = no_render_context_switch;
 	sna->kgem.retire = no_render_retire;
 	sna->kgem.expire = no_render_expire;
-	if (sna->kgem.gen >= 60)
+	if (sna->kgem.has_blt)
 		sna->kgem.ring = KGEM_BLT;
 }
 
commit b260ca44b3aaba2c8cd25640ad7ac9ca6478b0f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 09:59:07 2012 +0100

    Drop some unused includes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/common.h b/src/common.h
index e3ab1f2..86e5b11 100644
--- a/src/common.h
+++ b/src/common.h
@@ -38,6 +38,8 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #ifndef _INTEL_COMMON_H_
 #define _INTEL_COMMON_H_
 
+#include <xf86.h>
+
 /* Provide substitutes for gcc's __FUNCTION__ on other compilers */
 #if !defined(__GNUC__) && !defined(__FUNCTION__)
 # if defined(__STDC__) && (__STDC_VERSION__>=199901L) /* C99 */
@@ -47,117 +49,14 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 # endif
 #endif
 
-
 #define PFX __FILE__,__LINE__,__FUNCTION__
 #define FUNCTION_NAME __FUNCTION__
 
 #define KB(x) ((x) * 1024)
 #define MB(x) ((x) * KB(1024))
 
-/* Using usleep() makes things noticably slow. */
-#if 0
-#define DELAY(x) usleep(x)
-#else
-#define DELAY(x) do {;} while (0)
-#endif
-
-/* I830 hooks for the I810 driver setup/probe. */
-extern const OptionInfoRec *I830AvailableOptions(int chipid, int busid);
 extern Bool intel_init_scrn(ScrnInfoPtr scrn);
 
-/* Symbol lists shared by the i810 and i830 parts. */
-extern int I830EntityIndex;
-
-#ifdef _I830_H_
-#define PrintErrorState i830_dump_error_state
-#define WaitRingFunc I830WaitLpRing
-#define RecPtr intel
-#else
-#define PrintErrorState I810PrintErrorState
-#define WaitRingFunc I810WaitLpRing
-#define RecPtr pI810
-#endif
-
-static inline void memset_volatile(volatile void *b, int c, size_t len)
-{
-    size_t i;
-
-    for (i = 0; i < len; i++)
-	((volatile char *)b)[i] = c;
-}
-
-static inline void memcpy_volatile(volatile void *dst, const void *src,
-				   size_t len)
-{
-    size_t i;
-
-    for (i = 0; i < len; i++)
-	((volatile char *)dst)[i] = ((const volatile char *)src)[i];
-}
-
-/* Memory mapped register access macros */
-#define INREG8(addr)        *(volatile uint8_t *)(RecPtr->MMIOBase + (addr))
-#define INREG16(addr)       *(volatile uint16_t *)(RecPtr->MMIOBase + (addr))
-#define INREG(addr)         *(volatile uint32_t *)(RecPtr->MMIOBase + (addr))
-#define INGTT(addr)         *(volatile uint32_t *)(RecPtr->GTTBase + (addr))
-#define POSTING_READ(addr)  (void)INREG(addr)
-
-#define OUTREG8(addr, val) do {						\
-   *(volatile uint8_t *)(RecPtr->MMIOBase  + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG8(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-#define OUTREG16(addr, val) do {					\
-   *(volatile uint16_t *)(RecPtr->MMIOBase + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG16(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-#define OUTREG(addr, val) do {						\
-   *(volatile uint32_t *)(RecPtr->MMIOBase + (addr)) = (val);		\
-   if (I810_DEBUG&DEBUG_VERBOSE_OUTREG) {				\
-      ErrorF("OUTREG(0x%lx, 0x%lx) in %s\n", (unsigned long)(addr),	\
-		(unsigned long)(val), FUNCTION_NAME);			\
-   }									\
-} while (0)
-
-
-#define DEBUG_VERBOSE_ACCEL  0x1
-#define DEBUG_VERBOSE_SYNC   0x2
-#define DEBUG_VERBOSE_VGA    0x4
-#define DEBUG_VERBOSE_RING   0x8
-#define DEBUG_VERBOSE_OUTREG 0x10
-#define DEBUG_VERBOSE_MEMORY 0x20
-#define DEBUG_VERBOSE_CURSOR 0x40
-#define DEBUG_ALWAYS_SYNC    0x80
-#define DEBUG_VERBOSE_DRI    0x100
-#define DEBUG_VERBOSE_BIOS   0x200
-
-/* Size of the mmio region.
- */
-#define I810_REG_SIZE 0x80000
-
-#define GTT_PAGE_SIZE			KB(4)
-#define PRIMARY_RINGBUFFER_SIZE		KB(128)
-#define MIN_SCRATCH_BUFFER_SIZE		KB(16)
-#define MAX_SCRATCH_BUFFER_SIZE		KB(64)
-#define HWCURSOR_SIZE			GTT_PAGE_SIZE
-#define HWCURSOR_SIZE_ARGB		GTT_PAGE_SIZE * 4
-
-/* Use a 64x64 HW cursor */
-#define I810_CURSOR_X			64
-#define I810_CURSOR_Y			I810_CURSOR_X
-
-#define PIPE_NAME(n)			('A' + (n))
-
-struct pci_device *
-intel_host_bridge (void);
-
 /**
  * Hints to CreatePixmap to tell the driver how the pixmap is going to be
  * used.
diff --git a/src/intel_display.c b/src/intel_display.c
index a974e34..0a80aa8 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -711,8 +711,7 @@ intel_crtc_init(ScrnInfoPtr scrn, struct intel_mode *mode, int num)
 							   crtc_id(intel_crtc));
 
 	intel_crtc->cursor = drm_intel_bo_alloc(intel->bufmgr, "ARGB cursor",
-						HWCURSOR_SIZE_ARGB,
-						GTT_PAGE_SIZE);
+						4*64*64, 4096);
 
 	intel_crtc->crtc = crtc;
 	list_add(&intel_crtc->link, &mode->crtcs);
diff --git a/src/intel_driver.c b/src/intel_driver.c
index f2770d6..78f7ce3 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -932,7 +932,7 @@ I830ScreenInit(SCREEN_INIT_ARGS_DECL)
 	miDCInitialize(screen, xf86GetPointerScreenFuncs());
 
 	xf86DrvMsg(scrn->scrnIndex, X_INFO, "Initializing HW Cursor\n");
-	if (!xf86_cursors_init(screen, I810_CURSOR_X, I810_CURSOR_Y,
+	if (!xf86_cursors_init(screen, 64, 64,
 			       (HARDWARE_CURSOR_TRUECOLOR_AT_8BPP |
 				HARDWARE_CURSOR_BIT_ORDER_MSBFIRST |
 				HARDWARE_CURSOR_INVERT_MASK |
diff --git a/src/intel_memory.c b/src/intel_memory.c
index bfc0e8c..bb7710f 100644
--- a/src/intel_memory.c
+++ b/src/intel_memory.c
@@ -96,7 +96,7 @@ unsigned long intel_get_fence_size(intel_screen_private *intel, unsigned long si
 
 	if (INTEL_INFO(intel)->gen >= 40 || intel->has_relaxed_fencing) {
 		/* The 965 can have fences at any page boundary. */
-		return ALIGN(size, GTT_PAGE_SIZE);
+		return ALIGN(size, 4096);
 	} else {
 		/* Align the size to a power of two greater than the smallest fence
 		 * size.
diff --git a/src/intel_module.c b/src/intel_module.c
index f8ba149..a39affb 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -28,9 +28,7 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
 #include <xf86_OSproc.h>
-#include <xf86cmap.h>
 #include <xf86Parser.h>
 #include <xf86drmMode.h>
 
@@ -159,10 +157,12 @@ SymTabRec *intel_chipsets = (SymTabRec *) _intel_chipsets;
     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0, 0, (intptr_t)(i) }
 
 static const struct pci_id_match intel_device_match[] = {
+#if !KMS_ONLY
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810_DC100, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I810_E, &intel_i81x_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_I815, &intel_i81x_info ),
+#endif
 
 	INTEL_DEVICE_MATCH (PCI_CHIP_I830_M, &intel_i830_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_845_G, &intel_i845_info ),
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 6fc17bd..9821612 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -142,8 +142,6 @@ static int i810_pitches[] = {
 };
 #endif
 
-int I830EntityIndex = -1;
-
 /*
  * I810GetRec and I810FreeRec --
  *
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d7fa71b..03115c2 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -289,10 +289,6 @@ extern void sna_mode_wakeup(struct sna *sna);
 extern void sna_mode_redisplay(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
-extern int sna_crtc_id(xf86CrtcPtr crtc);
-extern bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc);
-extern int sna_output_dpms_status(xf86OutputPtr output);
-
 extern int sna_page_flip(struct sna *sna,
 			 struct kgem_bo *bo,
 			 void *data,
@@ -356,9 +352,9 @@ static inline void sna_dri_destroy_window(WindowPtr win) { }
 static inline void sna_dri_close(struct sna *sna, ScreenPtr pScreen) { }
 #endif
 
-extern bool sna_crtc_on(xf86CrtcPtr crtc);
-int sna_crtc_to_pipe(xf86CrtcPtr crtc);
-int sna_crtc_to_plane(xf86CrtcPtr crtc);
+extern int sna_crtc_to_pipe(xf86CrtcPtr crtc);
+extern int sna_crtc_to_plane(xf86CrtcPtr crtc);
+extern int sna_crtc_id(xf86CrtcPtr crtc);
 
 CARD32 sna_format_for_depth(int depth);
 CARD32 sna_render_format_for_depth(int depth);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0928f6a..87a69ba 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -113,21 +113,14 @@ int sna_crtc_id(xf86CrtcPtr crtc)
 	return to_sna_crtc(crtc)->id;
 }
 
-bool sna_crtc_on(xf86CrtcPtr crtc)
-{
-	return to_sna_crtc(crtc)->bo != NULL;
-}
-
 int sna_crtc_to_pipe(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	return sna_crtc->pipe;
+	return to_sna_crtc(crtc)->pipe;
 }
 
 int sna_crtc_to_plane(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	return sna_crtc->plane;
+	return to_sna_crtc(crtc)->plane;
 }
 
 static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
@@ -465,7 +458,7 @@ mode_to_kmode(struct drm_mode_modeinfo *kmode, DisplayModePtr mode)
 	kmode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
 
-bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
+static bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 {
 	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_crtc mode;
@@ -1676,13 +1669,6 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 	}
 }
 
-int
-sna_output_dpms_status(xf86OutputPtr output)
-{
-	struct sna_output *sna_output = output->driver_private;
-	return sna_output->dpms_mode;
-}
-
 static bool
 sna_property_ignore(drmModePropertyPtr prop)
 {
@@ -2579,7 +2565,7 @@ sna_wait_for_scanline(struct sna *sna,
 	int y1, y2, pipe;
 
 	assert(crtc);
-	assert(sna_crtc_on(crtc));
+	assert(to_sna_crtc(crtc)->bo != NULL);
 	assert(pixmap == sna->front);
 
 	/* XXX WAIT_EVENT is still causing hangs on SNB */
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6afeb51..5f237b0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -553,7 +553,6 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
-		assert(sna_crtc_is_bound(sna, crtc));
 		kgem_submit(&sna->kgem);
 		bo = kgem_get_last_request(&sna->kgem);
 	}
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 110bb00..2332ce2 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -295,10 +295,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	/* Push the frame to the GPU as soon as possible so
 	 * we can hit the next vsync.
 	 */
-	if (flush) {
-		assert(sna_crtc_is_bound(sna, crtc));
+	if (flush)
 		kgem_submit(&sna->kgem);
-	}
 
 	return ret;
 }
commit f517bdb12b909ef9d0897166bdabc537551a065b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 14 08:56:21 2012 +0100

    sna: Do not perform batch compaction on old kernels
    
    As they will reject the delta pointing outside of the target batch.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e59811f..c528fe7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -697,12 +697,22 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->wedged = 1;
 	}
 
+	kgem->has_relaxed_delta =
+		gem_param(kgem, I915_PARAM_HAS_RELAXED_DELTA) > 0;
+	DBG(("%s: has relaxed delta? %d\n", __FUNCTION__,
+	     kgem->has_relaxed_delta));
+
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
 		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
 	if (gen == 70)
 		kgem->batch_size = 16*1024;
+	if (!kgem->has_relaxed_delta)
+		kgem->batch_size = 4*1024;
+
+	DBG(("%s: maximum batch size? %d\n", __FUNCTION__,
+	     kgem->batch_size));
 
 	kgem->min_alignment = 4;
 	if (gen < 40)
@@ -1877,6 +1887,9 @@ static int compact_batch_surface(struct kgem *kgem)
 {
 	int size, shrink, n;
 
+	if (!kgem->has_relaxed_delta)
+		return kgem->batch_size;
+
 	/* See if we can pack the contents into one or two pages */
 	n = ALIGN(kgem->batch_size, 1024);
 	size = n - kgem->surface + kgem->nbatch;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index ba110b6..7596326 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -154,6 +154,7 @@ struct kgem {
 
 	uint32_t has_vmap :1;
 	uint32_t has_relaxed_fencing :1;
+	uint32_t has_relaxed_delta :1;
 	uint32_t has_semaphores :1;
 	uint32_t has_cache_level :1;
 	uint32_t has_llc :1;
commit ce69a1e8686889f1eebb4cb3a39c41f473e58b93
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 23:25:02 2012 +0100

    sna: Add some DBG for selecting glyph path
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index f8959e1..6f9faf4 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1433,6 +1433,7 @@ sna_glyphs(CARD8 op,
 	if (mask && dst->pCompositeClip->data == NULL &&
 	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
 	    mask == glyphs_format(nlist, list, glyphs)) {
+		DBG(("%s: discarding mask\n", __FUNCTION__));
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
 				  src_x, src_y,
@@ -1441,8 +1442,10 @@ sna_glyphs(CARD8 op,
 	}
 
 	/* Otherwise see if we can substitute a mask */
-	if (!mask)
+	if (!mask) {
 		mask = glyphs_format(nlist, list, glyphs);
+		DBG(("%s: substituting mask? %d\n", __FUNCTION__, mask!=NULL));
+	}
 	if (mask) {
 		if (glyphs_via_mask(sna, op,
 				    src, dst, mask,
commit b68b2d90915f7a97e43a39e3117927544886fba0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 23:24:32 2012 +0100

    sna: Add some DBG for short-circuiting choice of source bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 546148d..02727a3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -510,6 +510,7 @@ sna_render_pixmap_bo(struct sna *sna,
 		if (priv->gpu_bo &&
 		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage ||
 		     priv->gpu_bo->proxy)) {
+			DBG(("%s: GPU all damaged\n", __FUNCTION__));
 			channel->bo = kgem_bo_reference(priv->gpu_bo);
 			return 1;
 		}
@@ -517,6 +518,7 @@ sna_render_pixmap_bo(struct sna *sna,
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
 		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
+			DBG(("%s: CPU all damaged\n", __FUNCTION__));
 			channel->bo = kgem_bo_reference(priv->cpu_bo);
 			return 1;
 		}
commit ad4786b285074b5bd70b3ad0bea6ec1b77ad6740
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 22:24:51 2012 +0100

    sna: Aim for consistency and use stdbool except for core X APIs
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 93880a8..622ba1f 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -57,8 +57,8 @@
 #define VERTEX(v) batch_emit_float(sna, v)
 
 static const struct blendinfo {
-	Bool dst_alpha;
-	Bool src_alpha;
+	bool dst_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen2_blend_op[] = {
@@ -148,7 +148,7 @@ gen2_get_dst_format(uint32_t format)
 #undef BIAS
 }
 
-static Bool
+static bool
 gen2_check_dst_format(uint32_t format)
 {
 	switch (format) {
@@ -160,9 +160,9 @@ gen2_check_dst_format(uint32_t format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -229,32 +229,32 @@ gen2_sampler_tiling_bits(uint32_t tiling)
 	return bits;
 }
 
-static Bool
+static bool
 gen2_check_filter(PicturePtr picture)
 {
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool
+static bool
 gen2_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -406,7 +406,7 @@ gen2_get_blend_factors(const struct sna_composite_op *op,
 }
 
 static uint32_t gen2_get_blend_cntl(int op,
-				    Bool has_component_alpha,
+				    bool has_component_alpha,
 				    uint32_t dst_format)
 {
 	uint32_t sblend, dblend;
@@ -503,7 +503,7 @@ static void gen2_emit_invariant(struct sna *sna)
 	      ENABLE_COLOR_WRITE |
 	      ENABLE_TEX_CACHE);
 
-	sna->render_state.gen2.need_invariant = FALSE;
+	sna->render_state.gen2.need_invariant = false;
 }
 
 static void
@@ -1104,14 +1104,14 @@ static void gen2_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen2_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_solid  = TRUE;
+	channel->is_solid  = true;
 	channel->width  = 1;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1121,12 +1121,12 @@ gen2_composite_solid_init(struct sna *sna,
 
 	channel->scale[0]  = channel->scale[1]  = 1;
 	channel->offset[0] = channel->offset[1] = 0;
-	return TRUE;
+	return true;
 }
 
 #define xFixedToDouble(f) pixman_fixed_to_double(f)
 
-static Bool
+static bool
 gen2_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1161,7 +1161,7 @@ gen2_composite_linear_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
-	channel->is_linear = TRUE;
+	channel->is_linear = true;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1229,17 +1229,17 @@ gen2_composite_linear_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool source_is_covered(PicturePtr picture,
+static bool source_is_covered(PicturePtr picture,
 			      int x, int y,
 			      int width, int height)
 {
 	int x1, y1, x2, y2;
 
 	if (picture->repeat && picture->repeatType != RepeatNone)
-		return TRUE;
+		return true;
 
 	if (picture->pDrawable == NULL)
-		return FALSE;
+		return false;
 
 	if (picture->transform) {
 		pixman_box16_t sample;
@@ -1268,7 +1268,7 @@ static Bool source_is_covered(PicturePtr picture,
 		y2 <= picture->pDrawable->height;
 }
 
-static Bool
+static bool
 gen2_check_card_format(struct sna *sna,
 		       PicturePtr picture,
 		       struct sna_composite_channel *channel,
@@ -1279,24 +1279,24 @@ gen2_check_card_format(struct sna *sna,
 
 	for (i = 0; i < ARRAY_SIZE(i8xx_tex_formats); i++) {
 		if (i8xx_tex_formats[i].fmt == format)
-			return TRUE;
+			return true;
 	}
 
 	for (i = 0; i < ARRAY_SIZE(i85x_tex_formats); i++) {
 		if (i85x_tex_formats[i].fmt == format) {
 			if (sna->kgem.gen >= 21)
-				return TRUE;
+				return true;
 
 			if (source_is_covered(picture, x, y, w,h)) {
 				channel->is_opaque = true;
-				return TRUE;
+				return true;
 			}
 
-			return FALSE;
+			return false;
 		}
 	}
 
-	return FALSE;
+	return false;
 }
 
 static int
@@ -1315,10 +1315,10 @@ gen2_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
-	channel->is_linear = FALSE;
-	channel->is_opaque = FALSE;
-	channel->is_affine = TRUE;
+	channel->is_solid = false;
+	channel->is_linear = false;
+	channel->is_opaque = false;
+	channel->is_affine = true;
 	channel->transform = NULL;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -1398,7 +1398,7 @@ gen2_composite_picture(struct sna *sna,
 				    x, y, w, h, dst_x, dst_y);
 }
 
-static Bool
+static bool
 gen2_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -1412,19 +1412,19 @@ gen2_composite_set_target(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_WRITE | MOVE_READ);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	if (priv->gpu_bo->pitch < 8) {
 		struct kgem_bo *bo;
 
 		if (priv->pinned)
-			return FALSE;
+			return false;
 
 		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 				     op->dst.width, op->dst.height, 8,
 				     op->dst.pixmap->drawable.bitsPerPixel);
 		if (bo == NULL)
-			return FALSE;
+			return false;
 
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = bo;
@@ -1437,10 +1437,10 @@ gen2_composite_set_target(struct sna *sna,
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst,
 	PicturePtr src,
@@ -1450,34 +1450,34 @@ try_blt(struct sna *sna,
 
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: target too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	/* If it is a solid, try to use the BLT paths */
 	if (sna_picture_is_solid(src, &color))
-		return TRUE;
+		return true;
 
 	if (!src->pDrawable)
-		return FALSE;
+		return false;
 
 	if (too_large(src->pDrawable->width, src->pDrawable->height)) {
 		DBG(("%s: source too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     src->pDrawable->width, src->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 	return is_cpu(src->pDrawable);
 }
@@ -1486,14 +1486,14 @@ static bool
 is_unhandled_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -1564,7 +1564,7 @@ gen2_composite_fallback(struct sna *sna,
 	if (!gen2_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -1586,11 +1586,11 @@ gen2_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -1598,18 +1598,18 @@ gen2_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -1619,25 +1619,25 @@ gen2_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -1648,40 +1648,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen2_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen2_check_format(sna, mask))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -1691,10 +1691,10 @@ reuse_source(struct sna *sna,
 	mc->filter = mask->filter;
 	mc->pict_format = mask->format;
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen2_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -1711,12 +1711,12 @@ gen2_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -1736,10 +1736,10 @@ gen2_render_composite(struct sna *sna,
 			      dst_x, dst_y,
 			      width, height,
 			      tmp))
-		return TRUE;
+		return true;
 
 	if (gen2_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -1752,7 +1752,7 @@ gen2_render_composite(struct sna *sna,
 	if (!gen2_composite_set_target(sna, tmp, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
@@ -1761,7 +1761,7 @@ gen2_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
@@ -1770,7 +1770,7 @@ gen2_render_composite(struct sna *sna,
 	    tmp->dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen2_composite_picture(sna, src, &tmp->src,
@@ -1809,13 +1809,13 @@ gen2_render_composite(struct sna *sna,
 			 * and on the source value.  We can only get one of those
 			 * into the single source value that we get to blend with.
 			 */
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 			if (gen2_blend_op[op].src_alpha &&
 			    (gen2_blend_op[op].src_blend != BLENDFACTOR_ZERO)) {
 				if (op != PictOpOver)
-					return FALSE;
+					return false;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -1865,7 +1865,7 @@ gen2_render_composite(struct sna *sna,
 	}
 
 	gen2_emit_composite_state(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -1876,7 +1876,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 fastcall static void
@@ -2094,7 +2094,7 @@ static void gen2_emit_composite_spans_state(struct sna *sna,
 	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
 	BATCH(!op->base.src.is_solid << 12);
 	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY | S3_DIFFUSE_PRESENT);
-	BATCH(gen2_get_blend_cntl(op->base.op, FALSE, op->base.dst.format));
+	BATCH(gen2_get_blend_cntl(op->base.op, false, op->base.dst.format));
 	if (memcmp(sna->kgem.batch + sna->render_state.gen2.ls1 + 1,
 		   sna->kgem.batch + unwind + 1,
 		   3 * sizeof(uint32_t)) == 0)
@@ -2191,7 +2191,7 @@ gen2_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -2206,17 +2206,17 @@ gen2_render_composite_spans(struct sna *sna,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
 #if NO_COMPOSITE_SPANS
-	return FALSE;
+	return false;
 #endif
 
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (gen2_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -2225,7 +2225,7 @@ gen2_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -2236,7 +2236,7 @@ gen2_render_composite_spans(struct sna *sna,
 	if (!gen2_composite_set_target(sna, &tmp->base, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
@@ -2245,7 +2245,7 @@ gen2_render_composite_spans(struct sna *sna,
 	    tmp->base.dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen2_composite_picture(sna, src, &tmp->base.src,
@@ -2294,7 +2294,7 @@ gen2_render_composite_spans(struct sna *sna,
 	}
 
 	gen2_emit_composite_spans_state(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -2302,7 +2302,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2347,7 +2347,7 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 	      I1_LOAD_S(2) | I1_LOAD_S(3) | I1_LOAD_S(8) | 2);
 	BATCH(0);
 	BATCH(S3_CULLMODE_NONE | S3_VERTEXHAS_XY);
-	BATCH(gen2_get_blend_cntl(op->op, FALSE, op->dst.format));
+	BATCH(gen2_get_blend_cntl(op->op, false, op->dst.format));
 	if (memcmp(sna->kgem.batch + sna->render_state.gen2.ls1 + 1,
 		   sna->kgem.batch + ls1 + 1,
 		   3 * sizeof(uint32_t)) == 0)
@@ -2364,7 +2364,7 @@ static void gen2_emit_fill_composite_state(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 gen2_render_fill_boxes_try_blt(struct sna *sna,
 			       CARD8 op, PictFormat format,
 			       const xRenderColor *color,
@@ -2375,7 +2375,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 	uint32_t pixel;
 
 	if (op > PictOpSrc)
-		return FALSE;
+		return false;
 
 	if (op == PictOpClear) {
 		alu = GXclear;
@@ -2386,7 +2386,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 					    color->blue,
 					    color->alpha,
 					    format))
-		return FALSE;
+		return false;
 	else
 		alu = GXcopy;
 
@@ -2395,7 +2395,7 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 				  pixel, box, n);
 }
 
-static inline Bool prefer_blt_fill(struct sna *sna)
+static inline bool prefer_blt_fill(struct sna *sna)
 {
 #if PREFER_BLT_FILL
 	return true;
@@ -2404,7 +2404,7 @@ static inline Bool prefer_blt_fill(struct sna *sna)
 #endif
 }
 
-static inline Bool prefer_blt_copy(struct sna *sna, unsigned flags)
+static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
 {
 #if PREFER_BLT_COPY
 	return true;
@@ -2413,7 +2413,7 @@ static inline Bool prefer_blt_copy(struct sna *sna, unsigned flags)
 #endif
 }
 
-static Bool
+static bool
 gen2_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -2427,7 +2427,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen2_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_FILL_BOXES
@@ -2448,10 +2448,10 @@ gen2_render_fill_boxes(struct sna *sna,
 		if (gen2_render_fill_boxes_try_blt(sna, op, format, color,
 						   dst, dst_bo,
 						   box, n))
-			return TRUE;
+			return true;
 
 		if (!gen2_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		assert(dst_bo->pitch >= 8);
 		return sna_tiling_fill_boxes(sna, op, format, color,
@@ -2462,7 +2462,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	    gen2_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
-		return TRUE;
+		return true;
 
 	if (op == PictOpClear)
 		pixel = 0;
@@ -2472,7 +2472,7 @@ gen2_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s: using shader for op=%d, format=%x, pixel=%x\n",
 	     __FUNCTION__, op, (int)format, pixel));
@@ -2516,7 +2516,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	} while (n);
 
 	gen2_vertex_flush(sna, &tmp);
-	return TRUE;
+	return true;
 }
 
 static void gen2_emit_fill_state(struct sna *sna,
@@ -2621,7 +2621,7 @@ gen2_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	gen2_vertex_flush(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -2640,7 +2640,7 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
@@ -2677,10 +2677,10 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 	tmp->done  = gen2_render_fill_op_done;
 
 	gen2_emit_fill_state(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen2_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -2698,7 +2698,7 @@ gen2_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -2716,7 +2716,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna) &&
 	    gen2_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
@@ -2728,7 +2728,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		kgem_submit(&sna->kgem);
 		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
 						 x1, y1, x2, y2, alu))
-			return TRUE;
+			return true;
 		assert(kgem_check_bo(&sna->kgem, bo, NULL));
 	}
 
@@ -2757,7 +2757,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	VERTEX(y1);
 	gen2_vertex_flush(sna, &tmp);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2855,7 +2855,7 @@ static void gen2_emit_copy_state(struct sna *sna, const struct sna_composite_op
 	gen2_emit_texture(sna, &op->src, 0);
 }
 
-static Bool
+static bool
 gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2865,7 +2865,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -2884,7 +2884,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (src_bo == dst_bo || /* XXX handle overlap using 3D ? */
 	    too_large(src->drawable.width, src->drawable.height) ||
@@ -2983,7 +2983,7 @@ fallback:
 
 	gen2_vertex_flush(sna, &tmp);
 	sna_render_composite_redirect_done(sna, &tmp);
-	return TRUE;
+	return true;
 
 fallback_tiled:
 	return sna_tiling_copy_boxes(sna, alu,
@@ -3026,7 +3026,7 @@ gen2_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 	gen2_vertex_flush(sna, &op->base);
 }
 
-static Bool
+static bool
 gen2_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3034,7 +3034,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3049,7 +3049,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(src->drawable.width, src->drawable.height) ||
@@ -3058,7 +3058,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3089,13 +3089,13 @@ fallback:
 	tmp->done = gen2_render_copy_done;
 
 	gen2_emit_composite_state(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
 static void
 gen2_render_reset(struct sna *sna)
 {
-	sna->render_state.gen2.need_invariant = TRUE;
+	sna->render_state.gen2.need_invariant = true;
 	sna->render_state.gen2.logic_op_enabled = 0;
 	sna->render_state.gen2.vertex_offset = 0;
 	sna->render_state.gen2.target = 0;
@@ -3124,7 +3124,7 @@ gen2_render_context_switch(struct kgem *kgem,
 	sna->blt_state.fill_bo = 0;
 }
 
-Bool gen2_render_init(struct sna *sna)
+bool gen2_render_init(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 
@@ -3148,5 +3148,5 @@ Bool gen2_render_init(struct sna *sna)
 
 	render->max_3d_size = MAX_3D_SIZE;
 	render->max_3d_pitch = MAX_3D_PITCH;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 20286fc..e02eb89 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -72,8 +72,8 @@ enum gen3_radial_mode {
 };
 
 static const struct blendinfo {
-	Bool dst_alpha;
-	Bool src_alpha;
+	bool dst_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen3_blend_op[] = {
@@ -101,21 +101,21 @@ static const struct blendinfo {
 static const struct formatinfo {
 	unsigned int fmt, xfmt;
 	uint32_t card_fmt;
-	Bool rb_reversed;
+	bool rb_reversed;
 } gen3_tex_formats[] = {
-	{PICT_a8, 0, MAPSURF_8BIT | MT_8BIT_A8, FALSE},
-	{PICT_a8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_ARGB8888, FALSE},
-	{PICT_x8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_XRGB8888, FALSE},
-	{PICT_a8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_ABGR8888, FALSE},
-	{PICT_x8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_XBGR8888, FALSE},
-	{PICT_a2r10g10b10, PICT_x2r10g10b10, MAPSURF_32BIT | MT_32BIT_ARGB2101010, FALSE},
-	{PICT_a2b10g10r10, PICT_x2b10g10r10, MAPSURF_32BIT | MT_32BIT_ABGR2101010, FALSE},
-	{PICT_r5g6b5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, FALSE},
-	{PICT_b5g6r5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, TRUE},
-	{PICT_a1r5g5b5, PICT_x1r5g5b5, MAPSURF_16BIT | MT_16BIT_ARGB1555, FALSE},
-	{PICT_a1b5g5r5, PICT_x1b5g5r5, MAPSURF_16BIT | MT_16BIT_ARGB1555, TRUE},
-	{PICT_a4r4g4b4, PICT_x4r4g4b4, MAPSURF_16BIT | MT_16BIT_ARGB4444, FALSE},
-	{PICT_a4b4g4r4, PICT_x4b4g4r4, MAPSURF_16BIT | MT_16BIT_ARGB4444, TRUE},
+	{PICT_a8, 0, MAPSURF_8BIT | MT_8BIT_A8, false},
+	{PICT_a8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_ARGB8888, false},
+	{PICT_x8r8g8b8, 0, MAPSURF_32BIT | MT_32BIT_XRGB8888, false},
+	{PICT_a8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_ABGR8888, false},
+	{PICT_x8b8g8r8, 0, MAPSURF_32BIT | MT_32BIT_XBGR8888, false},
+	{PICT_a2r10g10b10, PICT_x2r10g10b10, MAPSURF_32BIT | MT_32BIT_ARGB2101010, false},
+	{PICT_a2b10g10r10, PICT_x2b10g10r10, MAPSURF_32BIT | MT_32BIT_ABGR2101010, false},
+	{PICT_r5g6b5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, false},
+	{PICT_b5g6r5, 0, MAPSURF_16BIT | MT_16BIT_RGB565, true},
+	{PICT_a1r5g5b5, PICT_x1r5g5b5, MAPSURF_16BIT | MT_16BIT_ARGB1555, false},
+	{PICT_a1b5g5r5, PICT_x1b5g5r5, MAPSURF_16BIT | MT_16BIT_ARGB1555, true},
+	{PICT_a4r4g4b4, PICT_x4r4g4b4, MAPSURF_16BIT | MT_16BIT_ARGB4444, false},
+	{PICT_a4b4g4r4, PICT_x4b4g4r4, MAPSURF_16BIT | MT_16BIT_ARGB4444, true},
 };
 
 #define xFixedToDouble(f) pixman_fixed_to_double(f)
@@ -136,14 +136,14 @@ static inline uint32_t gen3_buf_tiling(uint32_t tiling)
 	return v;
 }
 
-static inline Bool
+static inline bool
 gen3_check_pitch_3d(struct kgem_bo *bo)
 {
 	return bo->pitch <= MAX_3D_PITCH;
 }
 
 static uint32_t gen3_get_blend_cntl(int op,
-				    Bool has_component_alpha,
+				    bool has_component_alpha,
 				    uint32_t dst_format)
 {
 	uint32_t sblend = gen3_blend_op[op].src_blend;
@@ -193,7 +193,7 @@ static uint32_t gen3_get_blend_cntl(int op,
 		dblend << S6_CBUF_DST_BLEND_FACT_SHIFT);
 }
 
-static Bool gen3_check_dst_format(uint32_t format)
+static bool gen3_check_dst_format(uint32_t format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -215,13 +215,13 @@ static Bool gen3_check_dst_format(uint32_t format)
 	case PICT_x4r4g4b4:
 	case PICT_a4b4g4r4:
 	case PICT_x4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool gen3_dst_rb_reversed(uint32_t format)
+static bool gen3_dst_rb_reversed(uint32_t format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -234,9 +234,9 @@ static Bool gen3_dst_rb_reversed(uint32_t format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -277,7 +277,7 @@ static uint32_t gen3_get_dst_format(uint32_t format)
 #undef BIAS
 }
 
-static Bool gen3_check_format(PicturePtr p)
+static bool gen3_check_format(PicturePtr p)
 {
 	switch (p->format) {
 	case PICT_a8:
@@ -293,13 +293,13 @@ static Bool gen3_check_format(PicturePtr p)
 	case PICT_a1b5g5r5:
 	case PICT_a4r4g4b4:
 	case PICT_a4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool gen3_check_xformat(PicturePtr p)
+static bool gen3_check_xformat(PicturePtr p)
 {
 	switch (p->format) {
 	case PICT_a8r8g8b8:
@@ -321,9 +321,9 @@ static Bool gen3_check_xformat(PicturePtr p)
 	case PICT_x4r4g4b4:
 	case PICT_a4b4g4r4:
 	case PICT_x4b4g4r4:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -367,19 +367,19 @@ static uint32_t gen3_gradient_repeat(uint32_t repeat)
 #undef REPEAT
 }
 
-static Bool gen3_check_repeat(PicturePtr p)
+static bool gen3_check_repeat(PicturePtr p)
 {
 	if (!p->repeat)
-		return TRUE;
+		return true;
 
 	switch (p->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -404,9 +404,9 @@ static bool gen3_check_filter(PicturePtr p)
 	switch (p->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -917,7 +917,7 @@ gen3_composite_emit_shader(struct sna *sna,
 			   const struct sna_composite_op *op,
 			   uint8_t blend)
 {
-	Bool dst_is_alpha = PIXMAN_FORMAT_RGB(op->dst.format) == 0;
+	bool dst_is_alpha = PIXMAN_FORMAT_RGB(op->dst.format) == 0;
 	const struct sna_composite_channel *src, *mask;
 	struct gen3_render_state *state = &sna->render_state.gen3;
 	uint32_t shader_offset, id;
@@ -1292,7 +1292,7 @@ static void gen3_emit_invariant(struct sna *sna)
 	OUT_BATCH(_3DSTATE_STIPPLE);
 	OUT_BATCH(0x00000000);
 
-	sna->render_state.gen3.need_invariant = FALSE;
+	sna->render_state.gen3.need_invariant = false;
 }
 
 #define MAX_OBJECTS 3 /* worst case: dst + src + mask  */
@@ -1585,7 +1585,7 @@ static void gen3_magic_ca_pass(struct sna *sna,
 	     sna->render.vertex_index - sna->render.vertex_start));
 
 	OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(6) | 0);
-	OUT_BATCH(gen3_get_blend_cntl(PictOpAdd, TRUE, op->dst.format));
+	OUT_BATCH(gen3_get_blend_cntl(PictOpAdd, true, op->dst.format));
 	gen3_composite_emit_shader(sna, op, PictOpAdd);
 
 	OUT_BATCH(PRIM3D_RECTLIST | PRIM3D_INDIRECT_SEQUENTIAL |
@@ -1950,7 +1950,7 @@ gen3_render_reset(struct sna *sna)
 {
 	struct gen3_render_state *state = &sna->render_state.gen3;
 
-	state->need_invariant = TRUE;
+	state->need_invariant = true;
 	state->current_dst = 0;
 	state->tex_count = 0;
 	state->last_drawrect_limit = ~0U;
@@ -1999,7 +1999,7 @@ gen3_render_expire(struct kgem *kgem)
 	}
 }
 
-static Bool gen3_composite_channel_set_format(struct sna_composite_channel *channel,
+static bool gen3_composite_channel_set_format(struct sna_composite_channel *channel,
 					      CARD32 format)
 {
 	unsigned int i;
@@ -2008,23 +2008,23 @@ static Bool gen3_composite_channel_set_format(struct sna_composite_channel *chan
 		if (gen3_tex_formats[i].fmt == format) {
 			channel->card_format = gen3_tex_formats[i].card_fmt;
 			channel->rb_reversed = gen3_tex_formats[i].rb_reversed;
-			return TRUE;
+			return true;
 		}
 	}
-	return FALSE;
+	return false;
 }
 
-static Bool source_is_covered(PicturePtr picture,
+static bool source_is_covered(PicturePtr picture,
 			      int x, int y,
 			      int width, int height)
 {
 	int x1, y1, x2, y2;
 
 	if (picture->repeat && picture->repeatType != RepeatNone)
-		return TRUE;
+		return true;
 
 	if (picture->pDrawable == NULL)
-		return FALSE;
+		return false;
 
 	if (picture->transform) {
 		pixman_box16_t sample;
@@ -2053,7 +2053,7 @@ static Bool source_is_covered(PicturePtr picture,
 		y2 <= picture->pDrawable->height;
 }
 
-static Bool gen3_composite_channel_set_xformat(PicturePtr picture,
+static bool gen3_composite_channel_set_xformat(PicturePtr picture,
 					       struct sna_composite_channel *channel,
 					       int x, int y,
 					       int width, int height)
@@ -2061,24 +2061,24 @@ static Bool gen3_composite_channel_set_xformat(PicturePtr picture,
 	unsigned int i;
 
 	if (PICT_FORMAT_A(picture->format) != 0)
-		return FALSE;
+		return false;
 
 	if (width == 0 || height == 0)
-		return FALSE;
+		return false;
 
 	if (!source_is_covered(picture, x, y, width, height))
-		return FALSE;
+		return false;
 
 	for (i = 0; i < ARRAY_SIZE(gen3_tex_formats); i++) {
 		if (gen3_tex_formats[i].xfmt == picture->format) {
 			channel->card_format = gen3_tex_formats[i].card_fmt;
 			channel->rb_reversed = gen3_tex_formats[i].rb_reversed;
 			channel->alpha_fixup = true;
-			return TRUE;
+			return true;
 		}
 	}
 
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2123,7 +2123,7 @@ static void gen3_composite_channel_convert(struct sna_composite_channel *channel
 		gen3_composite_channel_set_format(channel, channel->pict_format);
 }
 
-static Bool gen3_gradient_setup(struct sna *sna,
+static bool gen3_gradient_setup(struct sna *sna,
 				PicturePtr picture,
 				struct sna_composite_channel *channel,
 				int16_t ox, int16_t oy)
@@ -2140,14 +2140,14 @@ static Bool gen3_gradient_setup(struct sna *sna,
 		channel->repeat = picture->repeatType;
 		break;
 	default:
-		return FALSE;
+		return false;
 	}
 
 	channel->bo =
 		sna_render_get_gradient(sna,
 					(PictGradient *)picture->pSourcePict);
 	if (channel->bo == NULL)
-		return FALSE;
+		return false;
 
 	channel->pict_format = PICT_a8r8g8b8;
 	channel->card_format = MAPSURF_32BIT | MT_32BIT_ARGB8888;
@@ -2166,7 +2166,7 @@ static Bool gen3_gradient_setup(struct sna *sna,
 	channel->offset[0] = ox;
 	channel->offset[1] = oy;
 	channel->scale[0] = channel->scale[1] = 1;
-	return TRUE;
+	return true;
 }
 
 static int
@@ -2271,7 +2271,7 @@ gen3_init_radial(struct sna *sna,
 	return 1;
 }
 
-static Bool
+static bool
 sna_picture_is_clear(PicturePtr picture,
 		     int x, int y, int w, int h,
 		     uint32_t *color)
@@ -2279,20 +2279,20 @@ sna_picture_is_clear(PicturePtr picture,
 	struct sna_pixmap *priv;
 
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	priv = sna_pixmap(get_drawable_pixmap(picture->pDrawable));
 	if (priv == NULL || !priv->clear)
-		return FALSE;
+		return false;
 
 	if (!source_is_covered(picture, x, y, w, h))
-		return FALSE;
+		return false;
 
 	*color = priv->clear_color;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static int
 gen3_composite_picture(struct sna *sna,
 		       PicturePtr picture,
 		       struct sna_composite_op *op,
@@ -2426,7 +2426,7 @@ source_use_blt(struct sna *sna, PicturePtr picture)
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst,
 	PicturePtr src,
@@ -2434,20 +2434,20 @@ try_blt(struct sna *sna,
 {
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: target too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
@@ -2474,7 +2474,7 @@ gen3_align_vertex(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 gen3_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -2497,7 +2497,7 @@ gen3_composite_set_target(struct sna *sna,
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		/* For single-stream mode there should be no minimum alignment
 		 * required, except that the width must be at least 2 elements.
@@ -2506,14 +2506,14 @@ gen3_composite_set_target(struct sna *sna,
 			struct kgem_bo *bo;
 
 			if (priv->pinned)
-				return FALSE;
+				return false;
 
 			bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
 					     op->dst.width, op->dst.height,
 					     2*op->dst.pixmap->drawable.bitsPerPixel,
 					     op->dst.pixmap->drawable.bitsPerPixel);
 			if (bo == NULL)
-				return FALSE;
+				return false;
 
 			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 			priv->gpu_bo = bo;
@@ -2537,7 +2537,7 @@ gen3_composite_set_target(struct sna *sna,
 	     op->dst.x, op->dst.y,
 	     op->damage ? *op->damage : (void *)-1));
 
-	return TRUE;
+	return true;
 }
 
 static inline uint8_t
@@ -2634,7 +2634,7 @@ gen3_composite_fallback(struct sna *sna,
 	if (!gen3_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2656,11 +2656,11 @@ gen3_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask &&
@@ -2670,7 +2670,7 @@ gen3_composite_fallback(struct sna *sna,
 	{
 		DBG(("%s: component-alpha mask with op=%d, should fallback\n",
 		     __FUNCTION__, op));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2678,18 +2678,18 @@ gen3_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2699,25 +2699,25 @@ gen3_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2726,37 +2726,37 @@ reuse_source(struct sna *sna,
 	     PicturePtr mask, struct sna_composite_channel *mc, int msk_x, int msk_y)
 {
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (mask == src) {
 		*mc = *sc;
 		if (mc->bo)
 			kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if ((src->pDrawable == NULL || mask->pDrawable != src->pDrawable))
-		return FALSE;
+		return false;
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen3_check_format(mask))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2768,10 +2768,10 @@ reuse_source(struct sna *sna,
 	gen3_composite_channel_set_format(mc, mask->format);
 	if (mc->bo)
 		kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2788,12 +2788,12 @@ gen3_render_composite(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2813,10 +2813,10 @@ gen3_render_composite(struct sna *sna,
 			      dst_x, dst_y,
 			      width, height,
 			      tmp))
-		return TRUE;
+		return true;
 
 	if (gen3_composite_fallback(sna, op, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2829,7 +2829,7 @@ gen3_render_composite(struct sna *sna,
 	if (!gen3_composite_set_target(sna, tmp, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode != KGEM_RENDER &&
@@ -2838,7 +2838,7 @@ gen3_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
@@ -2848,12 +2848,12 @@ gen3_render_composite(struct sna *sna,
 	    !gen3_check_pitch_3d(tmp->dst.bo)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	tmp->u.gen3.num_constants = 0;
 	tmp->src.u.gen3.type = SHADER_TEXTURE;
-	tmp->src.is_affine = TRUE;
+	tmp->src.is_affine = true;
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen3_composite_picture(sna, src, tmp, &tmp->src,
 				       src_x, src_y,
@@ -2872,9 +2872,9 @@ gen3_render_composite(struct sna *sna,
 	DBG(("%s: source type=%d\n", __FUNCTION__, tmp->src.u.gen3.type));
 
 	tmp->mask.u.gen3.type = SHADER_NONE;
-	tmp->mask.is_affine = TRUE;
-	tmp->need_magic_ca_pass = FALSE;
-	tmp->has_component_alpha = FALSE;
+	tmp->mask.is_affine = true;
+	tmp->need_magic_ca_pass = false;
+	tmp->has_component_alpha = false;
 	if (mask && tmp->src.u.gen3.type != SHADER_ZERO) {
 		if (!reuse_source(sna,
 				  src, &tmp->src, src_x, src_y,
@@ -2915,16 +2915,16 @@ gen3_render_composite(struct sna *sna,
 				 */
 				DBG(("%s: component-alpha mask: %d\n",
 				     __FUNCTION__, tmp->mask.u.gen3.type));
-				tmp->has_component_alpha = TRUE;
+				tmp->has_component_alpha = true;
 				if (tmp->mask.u.gen3.type == SHADER_WHITE) {
 					tmp->mask.u.gen3.type = SHADER_NONE;
-					tmp->has_component_alpha = FALSE;
+					tmp->has_component_alpha = false;
 				} else if (gen3_blend_op[op].src_alpha &&
 					   (gen3_blend_op[op].src_blend != BLENDFACT_ZERO)) {
 					if (op != PictOpOver)
 						goto cleanup_mask;
 
-					tmp->need_magic_ca_pass = TRUE;
+					tmp->need_magic_ca_pass = true;
 					tmp->op = PictOpOutReverse;
 					sna->render.vertex_start = sna->render.vertex_index;
 				}
@@ -3037,7 +3037,7 @@ gen3_render_composite(struct sna *sna,
 
 	gen3_emit_composite_state(sna, tmp);
 	gen3_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -3048,7 +3048,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -3367,7 +3367,7 @@ gen3_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen3_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3384,17 +3384,17 @@ gen3_render_composite_spans(struct sna *sna,
 	     src_x, src_y, dst_x, dst_y, width, height));
 
 #if NO_COMPOSITE_SPANS
-	return FALSE;
+	return false;
 #endif
 
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (gen3_composite_fallback(sna, op, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3403,7 +3403,7 @@ gen3_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3414,7 +3414,7 @@ gen3_render_composite_spans(struct sna *sna,
 	if (!gen3_composite_set_target(sna, &tmp->base, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
@@ -3424,11 +3424,11 @@ gen3_render_composite_spans(struct sna *sna,
 	    !gen3_check_pitch_3d(tmp->base.dst.bo)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	tmp->base.src.u.gen3.type = SHADER_TEXTURE;
-	tmp->base.src.is_affine = TRUE;
+	tmp->base.src.is_affine = true;
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen3_composite_picture(sna, src, &tmp->base, &tmp->base.src,
 				       src_x, src_y,
@@ -3505,7 +3505,7 @@ gen3_render_composite_spans(struct sna *sna,
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3513,7 +3513,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -3862,7 +3862,7 @@ gen3_get_inline_rectangles(struct sna *sna, int want, int floats_per_vertex)
 	return want;
 }
 
-static Bool
+static bool
 gen3_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -3887,7 +3887,7 @@ gen3_render_video(struct sna *sna,
 
 	dst_bo = priv->gpu_bo;
 	if (dst_bo == NULL)
-		return FALSE;
+		return false;
 
 	if (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
 	    !gen3_check_pitch_3d(dst_bo)) {
@@ -3900,7 +3900,7 @@ gen3_render_video(struct sna *sna,
 							   width, height, bpp),
 					0);
 		if (!dst_bo)
-			return FALSE;
+			return false;
 
 		pix_xoff = -dxo;
 		pix_yoff = -dyo;
@@ -4008,7 +4008,7 @@ gen3_render_video(struct sna *sna,
 	}
 	priv->clear = false;
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -4031,7 +4031,7 @@ gen3_render_copy_setup_source(struct sna_composite_channel *channel,
 	channel->is_affine = 1;
 }
 
-static Bool
+static bool
 gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -4041,7 +4041,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -4059,7 +4059,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    src_bo == dst_bo || /* XXX handle overlap using 3D ? */
@@ -4068,7 +4068,7 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 fallback_blt:
 		if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
 		    !kgem_bo_can_blt(&sna->kgem, dst_bo))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -4164,7 +4164,7 @@ fallback_blt:
 
 	gen3_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
-	return TRUE;
+	return true;
 
 fallback_tiled:
 	return sna_tiling_copy_boxes(sna, alu,
@@ -4205,7 +4205,7 @@ gen3_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen3_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen3_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -4213,7 +4213,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -4228,7 +4228,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4237,7 +4237,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	    src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch > MAX_3D_PITCH) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -4270,10 +4270,10 @@ fallback:
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_fill_boxes_try_blt(struct sna *sna,
 			       CARD8 op, PictFormat format,
 			       const xRenderColor *color,
@@ -4286,11 +4286,11 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 	if (dst_bo->tiling == I915_TILING_Y) {
 		DBG(("%s: y-tiling, can't blit\n", __FUNCTION__));
 		assert(!too_large(dst->drawable.width, dst->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	if (op > PictOpSrc)
-		return FALSE;
+		return false;
 
 	if (op == PictOpClear) {
 		alu = GXclear;
@@ -4301,7 +4301,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 					    color->blue,
 					    color->alpha,
 					    format))
-		return FALSE;
+		return false;
 	else
 		alu = GXcopy;
 
@@ -4310,7 +4310,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 				  pixel, box, n);
 }
 
-static inline Bool prefer_fill_blt(struct sna *sna)
+static inline bool prefer_fill_blt(struct sna *sna)
 {
 #if PREFER_BLT_FILL
 	return true;
@@ -4319,7 +4319,7 @@ static inline Bool prefer_fill_blt(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 gen3_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -4333,7 +4333,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen3_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 #if NO_FILL_BOXES
@@ -4354,10 +4354,10 @@ gen3_render_fill_boxes(struct sna *sna,
 		if (gen3_render_fill_boxes_try_blt(sna, op, format, color,
 						   dst, dst_bo,
 						   box, n))
-			return TRUE;
+			return true;
 
 		if (!gen3_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		return sna_tiling_fill_boxes(sna, op, format, color,
 					     dst, dst_bo, box, n);
@@ -4367,7 +4367,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	    gen3_render_fill_boxes_try_blt(sna, op, format, color,
 					   dst, dst_bo,
 					   box, n))
-		return TRUE;
+		return true;
 
 	if (op == PictOpClear) {
 		pixel = 0;
@@ -4379,7 +4379,7 @@ gen3_render_fill_boxes(struct sna *sna,
 					     color->alpha,
 					     PICT_a8r8g8b8)) {
 			assert(0);
-			return FALSE;
+			return false;
 		}
 	}
 	DBG(("%s: using shader for op=%d, format=%x, pixel=%x\n",
@@ -4430,7 +4430,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	} while (n);
 
 	gen3_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -4497,7 +4497,7 @@ gen3_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 		gen3_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen3_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -4516,7 +4516,7 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 tmp))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4560,10 +4560,10 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen3_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -4581,7 +4581,7 @@ gen3_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -4599,7 +4599,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_fill_blt(sna) &&
 	    gen3_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4633,7 +4633,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		kgem_submit(&sna->kgem);
 		if (gen3_render_fill_one_try_blt(sna, dst, bo, color,
 						 x1, y1, x2, y2, alu))
-			return TRUE;
+			return true;
 	}
 
 	gen3_emit_composite_state(sna, &tmp);
@@ -4648,7 +4648,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	OUT_VERTEX(y1);
 	gen3_vertex_flush(sna);
 
-	return TRUE;
+	return true;
 }
 
 static void gen3_render_flush(struct sna *sna)
@@ -4661,7 +4661,7 @@ gen3_render_fini(struct sna *sna)
 {
 }
 
-Bool gen3_render_init(struct sna *sna)
+bool gen3_render_init(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 
@@ -4686,5 +4686,5 @@ Bool gen3_render_init(struct sna *sna)
 
 	sna->kgem.retire = gen3_render_retire;
 	sna->kgem.expire = gen3_render_expire;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 44504c5..2edfbd0 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -199,29 +199,29 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 static const struct wm_kernel_info {
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, FALSE),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
+	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, TRUE),
+	       ps_kernel_maskca_srcalpha_affine, true),
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, TRUE),
+	       ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, FALSE),
-	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, FALSE),
+	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
+	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen4_blend_op[] = {
@@ -291,7 +291,7 @@ static inline bool too_large(int width, int height)
 }
 
 static int
-gen4_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen4_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -326,7 +326,7 @@ static void gen4_magic_ca_pass(struct sna *sna,
 
 	gen4_emit_pipelined_pointers(sna, op, PictOpAdd,
 				     gen4_choose_composite_kernel(PictOpAdd,
-								  TRUE, TRUE, op->is_affine));
+								  true, true, op->is_affine));
 
 	OUT_BATCH(GEN4_3DPRIMITIVE |
 		  GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -494,7 +494,7 @@ static void gen4_vertex_close(struct sna *sna)
 
 
 static uint32_t gen4_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -556,7 +556,7 @@ static uint32_t gen4_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen4_check_dst_format(PictFormat format)
+static bool gen4_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -571,10 +571,10 @@ static Bool gen4_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -687,13 +687,13 @@ static uint32_t gen4_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown filter: %s [%d]\n",
 		     __FUNCTION__,
 		     PictureGetFilterName(picture->filter),
 		     picture->filter));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -716,18 +716,18 @@ static uint32_t gen4_repeat(uint32_t repeat)
 static bool gen4_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown repeat: %d\n",
 		     __FUNCTION__, picture->repeatType));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -741,7 +741,7 @@ gen4_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	struct gen4_surface_state *ss;
 	uint32_t domains;
@@ -948,7 +948,7 @@ gen4_emit_composite_primitive(struct sna *sna,
 			      const struct sna_composite_rectangles *r)
 {
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
+	bool is_affine = op->is_affine;
 	const float *src_sf = op->src.scale;
 	const float *mask_sf = op->mask.scale;
 
@@ -1274,7 +1274,7 @@ gen4_emit_invariant(struct sna *sna)
 
 	gen4_emit_state_base_address(sna);
 
-	sna->render_state.gen4.needs_invariant = FALSE;
+	sna->render_state.gen4.needs_invariant = false;
 }
 
 static void
@@ -1393,11 +1393,11 @@ gen4_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen4_render_state *render = &sna->render_state.gen4;
-	Bool has_mask = op->mask.bo != NULL;
+	bool has_mask = op->mask.bo != NULL;
 	int nelem = has_mask ? 2 : 1;
 	int selem;
 	uint32_t w_component;
@@ -1494,12 +1494,12 @@ gen4_bind_surfaces(struct sna *sna,
 		gen4_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen4_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo)
 		binding_table[2] =
 			gen4_bind_bo(sna,
@@ -1507,7 +1507,7 @@ gen4_bind_surfaces(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table &&
@@ -1676,7 +1676,7 @@ static void gen4_video_bind_surfaces(struct sna *sna,
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen4_bind_video_source(sna,
@@ -1691,7 +1691,7 @@ static void gen4_video_bind_surfaces(struct sna *sna,
 	gen4_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen4_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1710,7 +1710,7 @@ gen4_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1725,7 +1725,7 @@ gen4_render_video(struct sna *sna,
 	tmp.src.repeat = SAMPLER_EXTEND_PAD;
 	tmp.u.gen4.wm_kernel =
 		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.ve_id = 1;
 	tmp.priv = frame;
@@ -1792,18 +1792,18 @@ gen4_render_video(struct sna *sna,
 
 	if (sna->render_state.gen4.vertex_offset)
 		gen4_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -1816,7 +1816,7 @@ gen4_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen4_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1946,7 +1946,7 @@ gen4_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2050,7 +2050,7 @@ gen4_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 {
 	struct sna_pixmap *priv;
@@ -2058,7 +2058,7 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	if (!gen4_check_dst_format(dst->format)) {
 		DBG(("%s: incompatible render target format %08x\n",
 		     __FUNCTION__, dst->format));
-		return FALSE;
+		return false;
 	}
 
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2067,7 +2067,7 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	op->dst.format = dst->format;
 	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	op->dst.bo = priv->gpu_bo;
 	op->damage = &priv->gpu_damage;
@@ -2079,20 +2079,20 @@ gen4_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static inline Bool
+static inline bool
 picture_is_cpu(PicturePtr picture)
 {
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	/* If it is a solid, try to use the render paths */
 	if (picture->pDrawable->width == 1 &&
 	    picture->pDrawable->height == 1 &&
 	    picture->repeat)
-		return FALSE;
+		return false;
 
 	return is_cpu(picture->pDrawable);
 }
@@ -2107,20 +2107,20 @@ static inline bool prefer_blt(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr source,
 	int width, int height)
 {
 	if (prefer_blt(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
@@ -2133,9 +2133,9 @@ check_gradient(PicturePtr picture)
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2207,7 +2207,7 @@ gen4_composite_fallback(struct sna *sna,
 	if (!gen4_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2229,11 +2229,11 @@ gen4_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2241,18 +2241,18 @@ gen4_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2262,25 +2262,25 @@ gen4_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2291,40 +2291,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen4_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen4_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2335,10 +2335,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen4_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2354,11 +2354,11 @@ gen4_render_composite(struct sna *sna,
 	     width, height, sna->kgem.mode));
 
 	if (op >= ARRAY_SIZE(gen4_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2374,10 +2374,10 @@ gen4_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen4_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2388,13 +2388,13 @@ gen4_render_composite(struct sna *sna,
 					    tmp);
 
 	if (!gen4_composite_set_target(dst, tmp))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
 	    !sna_render_composite_redirect(sna, tmp,
 					   dst_x, dst_y, width, height))
-		return FALSE;
+		return false;
 
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
@@ -2414,13 +2414,13 @@ gen4_render_composite(struct sna *sna,
 
 	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->prim_emit = gen4_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2435,7 +2435,7 @@ gen4_render_composite(struct sna *sna,
 					goto cleanup_src;
 				}
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2504,7 +2504,7 @@ gen4_render_composite(struct sna *sna,
 
 	gen4_bind_surfaces(sna, tmp);
 	gen4_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2515,7 +2515,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2532,12 +2532,12 @@ gen4_copy_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table) {
@@ -2583,7 +2583,7 @@ static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
 	(void)flags;
 }
 
-static Bool
+static bool
 gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2595,7 +2595,7 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -2611,12 +2611,12 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
 fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -2709,7 +2709,7 @@ fallback_blt:
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -2739,7 +2739,7 @@ fallback_blt:
 	} while (--n);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -2770,7 +2770,7 @@ gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen4_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen4_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -2784,7 +2784,7 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -2798,14 +2798,14 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -2856,7 +2856,7 @@ fallback:
 
 	op->blt  = gen4_render_copy_blt;
 	op->done = gen4_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2873,12 +2873,12 @@ gen4_fill_bind_surfaces(struct sna *sna, const struct sna_composite_op *op)
 		gen4_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen4_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen4_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN4_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen4.surface_table) == *(uint64_t*)binding_table) {
@@ -2912,7 +2912,7 @@ gen4_render_fill_rectangle(struct sna *sna,
 	FLUSH(op);
 }
 
-static Bool
+static bool
 gen4_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -2926,7 +2926,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen4_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -2950,10 +2950,10 @@ gen4_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen4_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -2961,7 +2961,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear)
@@ -2972,7 +2972,7 @@ gen4_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
 
@@ -2990,7 +2990,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -3012,7 +3012,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	} while (--n);
 
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3054,7 +3054,7 @@ gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen4_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3072,7 +3072,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3104,7 +3104,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
+	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
 	op->base.need_magic_ca_pass = 0;
 	op->base.has_component_alpha = 0;
@@ -3123,10 +3123,10 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen4_render_fill_op_box;
 	op->boxes = gen4_render_fill_op_boxes;
 	op->done  = gen4_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen4_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3144,7 +3144,7 @@ gen4_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3162,12 +3162,12 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	if (gen4_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
-		return FALSE;
+		return false;
 
 	if (alu == GXclear)
 		color = 0;
@@ -3192,10 +3192,10 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
@@ -3214,7 +3214,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3236,8 +3236,8 @@ discard_vbo(struct sna *sna)
 
 static void gen4_render_reset(struct sna *sna)
 {
-	sna->render_state.gen4.needs_invariant = TRUE;
-	sna->render_state.gen4.needs_urb = TRUE;
+	sna->render_state.gen4.needs_invariant = true;
+	sna->render_state.gen4.needs_urb = true;
 	sna->render_state.gen4.vb_id = 0;
 	sna->render_state.gen4.ve_id = -1;
 	sna->render_state.gen4.last_primitive = -1;
@@ -3293,7 +3293,7 @@ static uint32_t gen4_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
+	sf_state->sf5.viewport_transform = false;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN4_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
 	sf_state->sf7.trifan_pv = 2;
@@ -3321,7 +3321,7 @@ static uint32_t gen4_create_sampler_state(struct sna_static_stream *stream,
 }
 
 static void gen4_init_wm_state(struct gen4_wm_unit_state *state,
-			       Bool has_mask,
+			       bool has_mask,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
@@ -3415,7 +3415,7 @@ static uint32_t gen4_create_cc_unit_state(struct sna_static_stream *stream)
 	return sna_static_stream_offsetof(stream, base);
 }
 
-static Bool gen4_render_setup(struct sna *sna)
+static bool gen4_render_setup(struct sna *sna)
 {
 	struct gen4_render_state *state = &sna->render_state.gen4;
 	struct sna_static_stream general;
@@ -3490,10 +3490,10 @@ static Bool gen4_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen4_render_init(struct sna *sna)
+bool gen4_render_init(struct sna *sna)
 {
 	if (!gen4_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->render.composite = gen4_render_composite;
 	sna->render.video = gen4_render_video;
@@ -3511,5 +3511,5 @@ Bool gen4_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN4_MAX_3D_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 7a20303..c4b1ecf 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -185,29 +185,29 @@ static const uint32_t ps_kernel_planar_static[][4] = {
 static const struct wm_kernel_info {
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, FALSE),
-	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(WM_KERNEL, ps_kernel_nomask_affine, false),
+	KERNEL(WM_KERNEL_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(WM_KERNEL_MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(WM_KERNEL_MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(WM_KERNEL_MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA,
-	       ps_kernel_maskca_srcalpha_affine, TRUE),
+	       ps_kernel_maskca_srcalpha_affine, true),
 	KERNEL(WM_KERNEL_MASKCA_SRCALPHA_PROJECTIVE,
-	       ps_kernel_maskca_srcalpha_projective, TRUE),
+	       ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, FALSE),
-	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, FALSE),
+	KERNEL(WM_KERNEL_VIDEO_PLANAR, ps_kernel_planar_static, false),
+	KERNEL(WM_KERNEL_VIDEO_PACKED, ps_kernel_packed_static, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen5_blend_op[] = {
@@ -278,7 +278,7 @@ static inline bool too_large(int width, int height)
 }
 
 static int
-gen5_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen5_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -311,7 +311,7 @@ static void gen5_magic_ca_pass(struct sna *sna,
 	gen5_emit_pipelined_pointers
 		(sna, op, PictOpAdd,
 		 gen5_choose_composite_kernel(PictOpAdd,
-					      TRUE, TRUE, op->is_affine));
+					      true, true, op->is_affine));
 
 	OUT_BATCH(GEN5_3DPRIMITIVE |
 		  GEN5_3DPRIMITIVE_VERTEX_SEQUENTIAL |
@@ -491,7 +491,7 @@ static void gen5_vertex_close(struct sna *sna)
 }
 
 static uint32_t gen5_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -552,7 +552,7 @@ static uint32_t gen5_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen5_check_dst_format(PictFormat format)
+static bool gen5_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -567,10 +567,10 @@ static Bool gen5_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -683,10 +683,10 @@ static uint32_t gen5_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown filter: %x\n", __FUNCTION__, picture->filter));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -709,18 +709,18 @@ static uint32_t gen5_repeat(uint32_t repeat)
 static bool gen5_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
 		DBG(("%s: unknown repeat: %x\n",
 		     __FUNCTION__, picture->repeatType));
-		return FALSE;
+		return false;
 	}
 }
 
@@ -745,7 +745,7 @@ gen5_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t domains;
 	uint16_t offset;
@@ -951,7 +951,7 @@ gen5_emit_composite_primitive(struct sna *sna,
 			      const struct sna_composite_rectangles *r)
 {
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
+	bool is_affine = op->is_affine;
 	const float *src_sf = op->src.scale;
 	const float *mask_sf = op->mask.scale;
 
@@ -1289,7 +1289,7 @@ gen5_emit_invariant(struct sna *sna)
 
 	gen5_emit_state_base_address(sna);
 
-	sna->render_state.gen5.needs_invariant = FALSE;
+	sna->render_state.gen5.needs_invariant = false;
 }
 
 static void
@@ -1409,12 +1409,12 @@ gen5_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen5_render_state *render = &sna->render_state.gen5;
-	Bool has_mask = op->mask.bo != NULL;
-	Bool is_affine = op->is_affine;
+	bool has_mask = op->mask.bo != NULL;
+	bool is_affine = op->is_affine;
 	int nelem = has_mask ? 2 : 1;
 	int selem = is_affine ? 2 : 3;
 	uint32_t w_component;
@@ -1518,12 +1518,12 @@ static void gen5_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen5_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo)
 		binding_table[2] =
 			gen5_bind_bo(sna,
@@ -1531,7 +1531,7 @@ static void gen5_bind_surfaces(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table &&
@@ -1706,7 +1706,7 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen5_bind_video_source(sna,
@@ -1721,7 +1721,7 @@ static void gen5_video_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen5_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1740,7 +1740,7 @@ gen5_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1758,7 +1758,7 @@ gen5_render_video(struct sna *sna,
 	tmp.u.gen5.wm_kernel =
 		is_planar_fourcc(frame->id) ? WM_KERNEL_VIDEO_PLANAR : WM_KERNEL_VIDEO_PACKED;
 	tmp.u.gen5.ve_id = 1;
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.priv = frame;
@@ -1822,7 +1822,7 @@ gen5_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen5_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
 static int
@@ -1832,8 +1832,8 @@ gen5_composite_solid_init(struct sna *sna,
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -1846,7 +1846,7 @@ gen5_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen5_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -1976,7 +1976,7 @@ gen5_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2076,7 +2076,7 @@ gen5_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 {
 	struct sna_pixmap *priv;
@@ -2103,7 +2103,7 @@ gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2115,43 +2115,43 @@ gen5_composite_set_target(PicturePtr dst, struct sna_composite_op *op)
 
 	get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
 			    &op->dst.x, &op->dst.y);
-	return TRUE;
+	return true;
 }
 
-static inline Bool
+static inline bool
 picture_is_cpu(PicturePtr picture)
 {
 	if (!picture->pDrawable)
-		return FALSE;
+		return false;
 
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
-		return TRUE;
+		return true;
 
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height))
-		return TRUE;
+		return true;
 
 	/* The blitter is much faster for solids */
 	if (sna_picture_is_solid(src, NULL))
-		return TRUE;
+		return true;
 
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
 	return picture_is_cpu(src);
@@ -2161,14 +2161,14 @@ static bool
 is_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2241,7 +2241,7 @@ gen5_composite_fallback(struct sna *sna,
 	if (!gen5_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2263,11 +2263,11 @@ gen5_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2275,18 +2275,18 @@ gen5_composite_fallback(struct sna *sna,
 	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2296,25 +2296,25 @@ gen5_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2325,40 +2325,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen5_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen5_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2369,10 +2369,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen5_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen5_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2389,7 +2389,7 @@ gen5_render_composite(struct sna *sna,
 
 	if (op >= ARRAY_SIZE(gen5_blend_op)) {
 		DBG(("%s: unhandled blend op %d\n", __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL &&
@@ -2399,10 +2399,10 @@ gen5_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen5_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2414,7 +2414,7 @@ gen5_render_composite(struct sna *sna,
 
 	if (!gen5_composite_set_target(dst, tmp)) {
 		DBG(("%s: failed to set composite target\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
@@ -2423,14 +2423,14 @@ gen5_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
 	    !sna_render_composite_redirect(sna, tmp,
 					   dst_x, dst_y, width, height))
-		return FALSE;
+		return false;
 
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen5_composite_picture(sna, src, &tmp->src,
@@ -2451,13 +2451,13 @@ gen5_render_composite(struct sna *sna,
 
 	tmp->op = op;
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->prim_emit = gen5_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2470,7 +2470,7 @@ gen5_render_composite(struct sna *sna,
 					goto cleanup_src;
 				}
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2536,7 +2536,7 @@ gen5_render_composite(struct sna *sna,
 
 	gen5_bind_surfaces(sna, tmp);
 	gen5_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2547,12 +2547,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen5_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2560,8 +2560,8 @@ gen5_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -2756,7 +2756,7 @@ gen5_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen5_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -2771,13 +2771,13 @@ gen5_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
-		return FALSE;
+		return false;
 
 	if (op >= ARRAY_SIZE(gen5_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen5_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -2786,7 +2786,7 @@ gen5_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -2796,13 +2796,13 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen5_composite_set_target(dst, &tmp->base))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen5_composite_picture(sna, src, &tmp->base.src,
@@ -2822,8 +2822,8 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.mask.bo = NULL;
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen5_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -2837,7 +2837,7 @@ gen5_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen5.wm_kernel =
 		gen5_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen5.ve_id = 1 << 1 | tmp->base.is_affine;
 
@@ -2857,7 +2857,7 @@ gen5_render_composite_spans(struct sna *sna,
 
 	gen5_bind_surfaces(sna, &tmp->base);
 	gen5_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -2865,7 +2865,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -2884,12 +2884,12 @@ gen5_copy_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
@@ -2900,7 +2900,7 @@ gen5_copy_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -2914,12 +2914,12 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
 fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -3013,7 +3013,7 @@ fallback_blt:
 		tmp.src.scale[1] = 1.f/src->drawable.height;
 	}
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
@@ -3066,7 +3066,7 @@ fallback_blt:
 	gen5_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3115,7 +3115,7 @@ gen5_render_copy_done(struct sna *sna,
 	DBG(("%s()\n", __FUNCTION__));
 }
 
-static Bool
+static bool
 gen5_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3128,14 +3128,14 @@ gen5_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3187,7 +3187,7 @@ fallback:
 				 src_bo, dst_bo,
 				 dst->drawable.bitsPerPixel,
 				 op))
-			return TRUE;
+			return true;
 	}
 
 	gen5_copy_bind_surfaces(sna, &op->base);
@@ -3195,7 +3195,7 @@ fallback:
 
 	op->blt  = gen5_render_copy_blt;
 	op->done = gen5_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3213,12 +3213,12 @@ gen5_fill_bind_surfaces(struct sna *sna,
 		gen5_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen5_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen5_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN5_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen5.surface_table) == *(uint64_t*)binding_table) {
@@ -3239,7 +3239,7 @@ static inline bool prefer_blt_fill(struct sna *sna)
 #endif
 }
 
-static Bool
+static bool
 gen5_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3256,7 +3256,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen5_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3280,10 +3280,10 @@ gen5_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen5_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3298,7 +3298,7 @@ gen5_render_fill_boxes(struct sna *sna,
 					  color->blue,
 					  color->alpha,
 					  PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -3314,7 +3314,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
@@ -3356,7 +3356,7 @@ gen5_render_fill_boxes(struct sna *sna,
 
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3448,7 +3448,7 @@ gen5_render_fill_op_done(struct sna *sna,
 	DBG(("%s()\n", __FUNCTION__));
 }
 
-static Bool
+static bool
 gen5_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3461,7 +3461,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3496,7 +3496,7 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
+	op->base.is_affine = true;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
@@ -3514,10 +3514,10 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen5_render_fill_op_box;
 	op->boxes = gen5_render_fill_op_boxes;
 	op->done  = gen5_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen5_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3535,7 +3535,7 @@ gen5_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3553,7 +3553,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna) &&
 	    gen5_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -3584,11 +3584,11 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
@@ -3619,7 +3619,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen5_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3683,7 +3683,7 @@ gen5_render_expire(struct kgem *kgem)
 
 static void gen5_render_reset(struct sna *sna)
 {
-	sna->render_state.gen5.needs_invariant = TRUE;
+	sna->render_state.gen5.needs_invariant = true;
 	sna->render_state.gen5.vb_id = 0;
 	sna->render_state.gen5.ve_id = -1;
 	sna->render_state.gen5.last_primitive = -1;
@@ -3739,7 +3739,7 @@ static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
+	sf_state->sf5.viewport_transform = false;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
 	sf_state->sf7.trifan_pv = 2;
@@ -3767,7 +3767,7 @@ static uint32_t gen5_create_sampler_state(struct sna_static_stream *stream,
 }
 
 static void gen5_init_wm_state(struct gen5_wm_unit_state *state,
-			       Bool has_mask,
+			       bool has_mask,
 			       uint32_t kernel,
 			       uint32_t sampler)
 {
@@ -3866,7 +3866,7 @@ static uint32_t gen5_create_cc_unit_state(struct sna_static_stream *stream)
 	return sna_static_stream_offsetof(stream, base);
 }
 
-static Bool gen5_render_setup(struct sna *sna)
+static bool gen5_render_setup(struct sna *sna)
 {
 	struct gen5_render_state *state = &sna->render_state.gen5;
 	struct sna_static_stream general;
@@ -3941,10 +3941,10 @@ static Bool gen5_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen5_render_init(struct sna *sna)
+bool gen5_render_init(struct sna *sna)
 {
 	if (!gen5_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen5_render_context_switch;
 	sna->kgem.retire = gen5_render_retire;
@@ -3969,5 +3969,5 @@ Bool gen5_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = MAX_3D_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6d8fbfd..ccf27be 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -140,27 +140,27 @@ static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, TRUE),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, TRUE),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, FALSE),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, FALSE),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen6_blend_op[] = {
@@ -290,7 +290,7 @@ static uint32_t gen6_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen6_check_dst_format(PictFormat format)
+static bool gen6_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -305,9 +305,9 @@ static Bool gen6_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	}
-	return FALSE;
+	return false;
 }
 
 static bool gen6_check_format(uint32_t format)
@@ -349,9 +349,9 @@ static uint32_t gen6_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -374,21 +374,21 @@ static uint32_t gen6_repeat(uint32_t repeat)
 static bool gen6_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
 static int
-gen6_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen6_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -553,7 +553,7 @@ gen6_emit_invariant(struct sna *sna)
 	gen6_emit_wm_constants(sna);
 	gen6_emit_null_depth_buffer(sna);
 
-	sna->render_state.gen6.needs_invariant = FALSE;
+	sna->render_state.gen6.needs_invariant = false;
 }
 
 static bool
@@ -608,7 +608,7 @@ gen6_emit_sampler(struct sna *sna, uint32_t state)
 }
 
 static void
-gen6_emit_sf(struct sna *sna, Bool has_mask)
+gen6_emit_sf(struct sna *sna, bool has_mask)
 {
 	int num_sf_outputs = has_mask ? 2 : 1;
 
@@ -745,8 +745,8 @@ gen6_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen6_render_state *render = &sna->render_state.gen6;
 	int nelem = op->mask.bo ? 2 : 1;
@@ -879,10 +879,10 @@ static void gen6_magic_ca_pass(struct sna *sna,
 
 	gen6_emit_flush(sna);
 
-	gen6_emit_cc(sna, PictOpAdd, TRUE, op->dst.format);
+	gen6_emit_cc(sna, PictOpAdd, true, op->dst.format);
 	gen6_emit_wm(sna,
 		     gen6_choose_composite_kernel(PictOpAdd,
-						  TRUE, TRUE,
+						  true, true,
 						  op->is_affine),
 		     3, 2);
 
@@ -1170,7 +1170,7 @@ gen6_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t *ss;
 	uint32_t domains;
@@ -1640,12 +1640,12 @@ static void gen6_emit_composite_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen6_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo) {
 		binding_table[2] =
 			gen6_bind_bo(sna,
@@ -1653,7 +1653,7 @@ static void gen6_emit_composite_state(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 	}
 
 	if (sna->kgem.surface == offset &&
@@ -1871,7 +1871,7 @@ static void gen6_emit_video_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen6_bind_video_source(sna,
@@ -1886,7 +1886,7 @@ static void gen6_emit_video_state(struct sna *sna,
 	gen6_emit_state(sna, op, offset | dirty);
 }
 
-static Bool
+static bool
 gen6_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -1911,7 +1911,7 @@ gen6_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -1928,7 +1928,7 @@ gen6_render_video(struct sna *sna,
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
@@ -2004,10 +2004,10 @@ gen6_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen6_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
@@ -2016,8 +2016,8 @@ gen6_composite_solid_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
@@ -2031,7 +2031,7 @@ gen6_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen6_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -2161,7 +2161,7 @@ gen6_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2264,7 +2264,7 @@ static void gen6_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen6_composite_set_target(struct sna *sna,
 			  struct sna_composite_op *op,
 			  PicturePtr dst)
@@ -2290,7 +2290,7 @@ gen6_composite_set_target(struct sna *sna,
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2307,7 +2307,7 @@ gen6_composite_set_target(struct sna *sna,
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
 	     op->dst.x, op->dst.y));
-	return TRUE;
+	return true;
 }
 
 static bool prefer_blt_ring(struct sna *sna)
@@ -2320,27 +2320,27 @@ static bool can_switch_rings(struct sna *sna)
 	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (prefer_blt_ring(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
 		DBG(("%s: dst too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	if (src->pDrawable &&
@@ -2348,31 +2348,31 @@ try_blt(struct sna *sna,
 		DBG(("%s: src too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__,
 		     src->pDrawable->width, src->pDrawable->height));
-		return TRUE;
+		return true;
 	}
 
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
-			return TRUE;
+			return true;
 		if (src->pDrawable)
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 static bool
 check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2443,7 +2443,7 @@ gen6_composite_fallback(struct sna *sna,
 	if (!gen6_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2465,11 +2465,11 @@ gen6_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2479,18 +2479,18 @@ gen6_composite_fallback(struct sna *sna,
 	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2500,25 +2500,25 @@ gen6_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2529,40 +2529,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen6_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen6_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2573,10 +2573,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen6_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2589,11 +2589,11 @@ gen6_render_composite(struct sna *sna,
 		      struct sna_composite_op *tmp)
 {
 	if (op >= ARRAY_SIZE(gen6_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2612,10 +2612,10 @@ gen6_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen6_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2629,7 +2629,7 @@ gen6_render_composite(struct sna *sna,
 		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen6_composite_set_target(sna, tmp, dst))
-		return FALSE;
+		return false;
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
@@ -2637,14 +2637,14 @@ gen6_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen6_composite_picture(sna, src, &tmp->src,
@@ -2672,12 +2672,12 @@ gen6_render_composite(struct sna *sna,
 		if (tmp->redirect.real_bo)
 			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return TRUE;
+		return true;
 	}
 
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->mask.bo = NULL;
 	tmp->mask.filter = SAMPLER_FILTER_NEAREST;
@@ -2686,7 +2686,7 @@ gen6_render_composite(struct sna *sna,
 	tmp->prim_emit = gen6_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2697,7 +2697,7 @@ gen6_render_composite(struct sna *sna,
 				if (op != PictOpOver)
 					goto cleanup_src;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2785,7 +2785,7 @@ gen6_render_composite(struct sna *sna,
 
 	gen6_emit_composite_state(sna, tmp);
 	gen6_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2796,12 +2796,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen6_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2809,8 +2809,8 @@ gen6_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -3059,7 +3059,7 @@ gen6_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen6_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3074,13 +3074,13 @@ gen6_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
-		return FALSE;
+		return false;
 
 	if (op >= ARRAY_SIZE(gen6_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen6_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3089,7 +3089,7 @@ gen6_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3099,13 +3099,13 @@ gen6_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen6_composite_set_target(sna, &tmp->base, dst))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen6_composite_picture(sna, src, &tmp->base.src,
@@ -3126,8 +3126,8 @@ gen6_render_composite_spans(struct sna *sna,
 	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen6_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -3150,7 +3150,7 @@ gen6_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen6.wm_kernel =
 		gen6_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen6.nr_surfaces = 3;
 	tmp->base.u.gen6.nr_inputs = 2;
@@ -3174,7 +3174,7 @@ gen6_render_composite_spans(struct sna *sna,
 
 	gen6_emit_composite_state(sna, &tmp->base);
 	gen6_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3182,7 +3182,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -3203,12 +3203,12 @@ gen6_emit_copy_state(struct sna *sna,
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) {
@@ -3273,7 +3273,7 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		extents.y1 + src_dy < extents.y2 + dst_dy);
 }
 
-static Bool
+static bool
 gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -3283,7 +3283,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -3306,7 +3306,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    overlaps(src_bo, src_dx, src_dy,
@@ -3404,7 +3404,7 @@ fallback_blt:
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
@@ -3468,7 +3468,7 @@ fallback_blt:
 	gen6_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3513,7 +3513,7 @@ gen6_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen6_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen6_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3521,7 +3521,7 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3540,14 +3540,14 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3605,7 +3605,7 @@ fallback:
 
 	op->blt  = gen6_render_copy_blt;
 	op->done = gen6_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3624,12 +3624,12 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		gen6_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen6_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen6_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN6_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) {
@@ -3649,7 +3649,7 @@ static inline bool prefer_blt_fill(struct sna *sna,
 		untiled_tlb_miss(bo));
 }
 
-static Bool
+static bool
 gen6_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3667,7 +3667,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen6_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3691,10 +3691,10 @@ gen6_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen6_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3702,7 +3702,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear) {
@@ -3714,7 +3714,7 @@ gen6_render_fill_boxes(struct sna *sna,
 				     color->blue,
 				     color->alpha,
 				     PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
@@ -3737,11 +3737,11 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = FALSE;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.has_component_alpha = false;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -3781,7 +3781,7 @@ gen6_render_fill_boxes(struct sna *sna,
 
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3872,7 +3872,7 @@ gen6_render_op_fill_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen6_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -3892,7 +3892,7 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -3924,9 +3924,9 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
-	op->base.has_component_alpha = FALSE;
-	op->base.need_magic_ca_pass = FALSE;
+	op->base.is_affine = true;
+	op->base.has_component_alpha = false;
+	op->base.need_magic_ca_pass = false;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 
@@ -3947,10 +3947,10 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	op->box  = gen6_render_op_fill_box;
 	op->boxes = gen6_render_op_fill_boxes;
 	op->done = gen6_render_op_fill_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -3968,7 +3968,7 @@ gen6_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -3986,7 +3986,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna, bo) &&
 	    gen6_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4017,11 +4017,11 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -4054,10 +4054,10 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen6_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	BoxRec box;
@@ -4072,7 +4072,7 @@ gen6_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 				  0, &box, 1);
 }
 
-static Bool
+static bool
 gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
@@ -4089,7 +4089,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	/* Prefer to use the BLT if, and only if, already engaged */
 	if (sna->kgem.ring == KGEM_BLT &&
 	    gen6_render_clear_try_blt(sna, dst, bo))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height))
@@ -4112,11 +4112,11 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
@@ -4148,7 +4148,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void gen6_render_flush(struct sna *sna)
@@ -4206,7 +4206,7 @@ gen6_render_expire(struct kgem *kgem)
 
 static void gen6_render_reset(struct sna *sna)
 {
-	sna->render_state.gen6.needs_invariant = TRUE;
+	sna->render_state.gen6.needs_invariant = true;
 	sna->render_state.gen6.first_state_packet = true;
 	sna->render_state.gen6.vb_id = 0;
 	sna->render_state.gen6.ve_id = -1;
@@ -4226,7 +4226,7 @@ static void gen6_render_fini(struct sna *sna)
 	kgem_bo_destroy(&sna->kgem, sna->render_state.gen6.general_bo);
 }
 
-static Bool gen6_render_setup(struct sna *sna)
+static bool gen6_render_setup(struct sna *sna)
 {
 	struct gen6_render_state *state = &sna->render_state.gen6;
 	struct sna_static_stream general;
@@ -4272,10 +4272,10 @@ static Bool gen6_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen6_render_init(struct sna *sna)
+bool gen6_render_init(struct sna *sna)
 {
 	if (!gen6_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen6_render_context_switch;
 	sna->kgem.retire = gen6_render_retire;
@@ -4301,5 +4301,5 @@ Bool gen6_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN6_MAX_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index afb4b9b..e76acd8 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -173,27 +173,27 @@ static const struct wm_kernel_info {
 	const char *name;
 	const void *data;
 	unsigned int size;
-	Bool has_mask;
+	bool has_mask;
 } wm_kernels[] = {
-	KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE),
-	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, FALSE),
+	KERNEL(NOMASK, ps_kernel_nomask_affine, false),
+	KERNEL(NOMASK_PROJECTIVE, ps_kernel_nomask_projective, false),
 
-	KERNEL(MASK, ps_kernel_masknoca_affine, TRUE),
-	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, TRUE),
+	KERNEL(MASK, ps_kernel_masknoca_affine, true),
+	KERNEL(MASK_PROJECTIVE, ps_kernel_masknoca_projective, true),
 
-	KERNEL(MASKCA, ps_kernel_maskca_affine, TRUE),
-	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, TRUE),
+	KERNEL(MASKCA, ps_kernel_maskca_affine, true),
+	KERNEL(MASKCA_PROJECTIVE, ps_kernel_maskca_projective, true),
 
-	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, TRUE),
-	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, TRUE),
+	KERNEL(MASKCA_SRCALPHA, ps_kernel_maskca_srcalpha_affine, true),
+	KERNEL(MASKCA_SRCALPHA_PROJECTIVE, ps_kernel_maskca_srcalpha_projective, true),
 
-	KERNEL(VIDEO_PLANAR, ps_kernel_planar, FALSE),
-	KERNEL(VIDEO_PACKED, ps_kernel_packed, FALSE),
+	KERNEL(VIDEO_PLANAR, ps_kernel_planar, false),
+	KERNEL(VIDEO_PACKED, ps_kernel_packed, false),
 };
 #undef KERNEL
 
 static const struct blendinfo {
-	Bool src_alpha;
+	bool src_alpha;
 	uint32_t src_blend;
 	uint32_t dst_blend;
 } gen7_blend_op[] = {
@@ -261,7 +261,7 @@ static inline bool too_large(int width, int height)
 }
 
 static uint32_t gen7_get_blend(int op,
-			       Bool has_component_alpha,
+			       bool has_component_alpha,
 			       uint32_t dst_format)
 {
 	uint32_t src, dst;
@@ -323,7 +323,7 @@ static uint32_t gen7_get_dest_format(PictFormat format)
 	}
 }
 
-static Bool gen7_check_dst_format(PictFormat format)
+static bool gen7_check_dst_format(PictFormat format)
 {
 	switch (format) {
 	case PICT_a8r8g8b8:
@@ -338,9 +338,9 @@ static Bool gen7_check_dst_format(PictFormat format)
 	case PICT_a8:
 	case PICT_a4r4g4b4:
 	case PICT_x4r4g4b4:
-		return TRUE;
+		return true;
 	}
-	return FALSE;
+	return false;
 }
 
 static bool gen7_check_format(uint32_t format)
@@ -382,9 +382,9 @@ static uint32_t gen7_check_filter(PicturePtr picture)
 	switch (picture->filter) {
 	case PictFilterNearest:
 	case PictFilterBilinear:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -407,21 +407,21 @@ static uint32_t gen7_repeat(uint32_t repeat)
 static bool gen7_check_repeat(PicturePtr picture)
 {
 	if (!picture->repeat)
-		return TRUE;
+		return true;
 
 	switch (picture->repeatType) {
 	case RepeatNone:
 	case RepeatNormal:
 	case RepeatPad:
 	case RepeatReflect:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
 static int
-gen7_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
+gen7_choose_composite_kernel(int op, bool has_mask, bool is_ca, bool is_affine)
 {
 	int base;
 
@@ -736,7 +736,7 @@ gen7_emit_invariant(struct sna *sna)
 	gen7_disable_streamout(sna);
 	gen7_emit_null_depth_buffer(sna);
 
-	sna->render_state.gen7.needs_invariant = FALSE;
+	sna->render_state.gen7.needs_invariant = false;
 }
 
 static void
@@ -777,7 +777,7 @@ gen7_emit_sampler(struct sna *sna, uint32_t state)
 }
 
 static void
-gen7_emit_sf(struct sna *sna, Bool has_mask)
+gen7_emit_sf(struct sna *sna, bool has_mask)
 {
 	int num_sf_outputs = has_mask ? 2 : 1;
 
@@ -876,8 +876,8 @@ gen7_emit_vertex_elements(struct sna *sna,
 	/*
 	 * vertex data in vertex buffer
 	 *    position: (x, y)
-	 *    texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0)
-	 *    texture coordinate 1 if (has_mask is TRUE): same as above
+	 *    texture coordinate 0: (u0, v0) if (is_affine is true) else (u0, v0, w0)
+	 *    texture coordinate 1 if (has_mask is true): same as above
 	 */
 	struct gen7_render_state *render = &sna->render_state.gen7;
 	int nelem = op->mask.bo ? 2 : 1;
@@ -1039,10 +1039,10 @@ static void gen7_magic_ca_pass(struct sna *sna,
 
 	gen7_emit_pipe_invalidate(sna, true);
 
-	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, TRUE, op->dst.format));
+	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, true, op->dst.format));
 	gen7_emit_wm(sna,
 		     gen7_choose_composite_kernel(PictOpAdd,
-						  TRUE, TRUE,
+						  true, true,
 						  op->is_affine),
 		     3, 2);
 
@@ -1311,7 +1311,7 @@ gen7_bind_bo(struct sna *sna,
 	     uint32_t width,
 	     uint32_t height,
 	     uint32_t format,
-	     Bool is_dst)
+	     bool is_dst)
 {
 	uint32_t *ss;
 	uint32_t domains;
@@ -1763,12 +1763,12 @@ static void gen7_emit_composite_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			    op->dst.bo, op->dst.width, op->dst.height,
 			    gen7_get_dest_format(op->dst.format),
-			    TRUE);
+			    true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 	if (op->mask.bo) {
 		binding_table[2] =
 			gen7_bind_bo(sna,
@@ -1776,7 +1776,7 @@ static void gen7_emit_composite_state(struct sna *sna,
 				     op->mask.width,
 				     op->mask.height,
 				     op->mask.card_format,
-				     FALSE);
+				     false);
 	}
 
 	if (sna->kgem.surface == offset &&
@@ -1990,7 +1990,7 @@ static void gen7_emit_video_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	for (n = 0; n < n_src; n++) {
 		binding_table[1+n] =
 			gen7_bind_video_source(sna,
@@ -2005,7 +2005,7 @@ static void gen7_emit_video_state(struct sna *sna,
 	gen7_emit_state(sna, op, offset);
 }
 
-static Bool
+static bool
 gen7_render_video(struct sna *sna,
 		  struct sna_video *video,
 		  struct sna_video_frame *frame,
@@ -2030,7 +2030,7 @@ gen7_render_video(struct sna *sna,
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
-		return FALSE;
+		return false;
 
 	memset(&tmp, 0, sizeof(tmp));
 
@@ -2047,7 +2047,7 @@ gen7_render_video(struct sna *sna,
 
 	tmp.mask.bo = NULL;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
@@ -2123,10 +2123,10 @@ gen7_render_video(struct sna *sna,
 	priv->clear = false;
 
 	gen7_vertex_flush(sna);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_composite_solid_init(struct sna *sna,
 			  struct sna_composite_channel *channel,
 			  uint32_t color)
@@ -2135,8 +2135,8 @@ gen7_composite_solid_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
-	channel->is_solid  = TRUE;
+	channel->is_affine = true;
+	channel->is_solid  = true;
 	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
@@ -2150,7 +2150,7 @@ gen7_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static Bool
+static bool
 gen7_composite_linear_init(struct sna *sna,
 			   PicturePtr picture,
 			   struct sna_composite_channel *channel,
@@ -2280,7 +2280,7 @@ gen7_composite_picture(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d), dst=(%d, %d)\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	channel->is_solid = FALSE;
+	channel->is_solid = false;
 	channel->card_format = -1;
 
 	if (sna_picture_is_solid(picture, &color))
@@ -2381,7 +2381,7 @@ static void gen7_render_composite_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, op);
 }
 
-static Bool
+static bool
 gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PicturePtr dst)
 {
 	struct sna_pixmap *priv;
@@ -2405,7 +2405,7 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	if (op->dst.bo == NULL) {
 		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
 		if (priv == NULL)
-			return FALSE;
+			return false;
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
@@ -2422,7 +2422,7 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	     op->dst.width, op->dst.height,
 	     op->dst.bo->pitch,
 	     op->dst.x, op->dst.y));
-	return TRUE;
+	return true;
 }
 
 inline static bool can_switch_rings(struct sna *sna)
@@ -2435,42 +2435,42 @@ inline static bool prefer_blt_ring(struct sna *sna)
 	return sna->kgem.ring != KGEM_RENDER || can_switch_rings(sna);
 }
 
-static Bool
+static bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
 	if (sna->kgem.ring == KGEM_BLT) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
-		return TRUE;
+		return true;
 	}
 
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
 static bool
 check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
-		return FALSE;
+		return false;
 
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
-		return FALSE;
+		return false;
 	default:
-		return TRUE;
+		return true;
 	}
 }
 
@@ -2541,7 +2541,7 @@ gen7_composite_fallback(struct sna *sna,
 	if (!gen7_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
 		     __FUNCTION__, dst->format));
-		return TRUE;
+		return true;
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
@@ -2563,11 +2563,11 @@ gen7_composite_fallback(struct sna *sna,
 	 */
 	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	/* If anything is on the GPU, push everything out to the GPU */
@@ -2577,18 +2577,18 @@ gen7_composite_fallback(struct sna *sna,
 	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src_pixmap && !src_fallback) {
 		DBG(("%s: src is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (mask_pixmap && !mask_fallback) {
 		DBG(("%s: mask is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	/* However if the dst is not on the GPU and we need to
@@ -2598,25 +2598,25 @@ gen7_composite_fallback(struct sna *sna,
 	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	if (too_large(dst_pixmap->drawable.width,
 		      dst_pixmap->drawable.height) &&
 	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
 		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
-		return TRUE;
+		return true;
 	}
 
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
-	return FALSE;
+	return false;
 }
 
 static int
@@ -2627,40 +2627,40 @@ reuse_source(struct sna *sna,
 	uint32_t color;
 
 	if (src_x != msk_x || src_y != msk_y)
-		return FALSE;
+		return false;
 
 	if (src == mask) {
 		DBG(("%s: mask is source\n", __FUNCTION__));
 		*mc = *sc;
 		mc->bo = kgem_bo_reference(mc->bo);
-		return TRUE;
+		return true;
 	}
 
 	if (sna_picture_is_solid(mask, &color))
 		return gen7_composite_solid_init(sna, mc, color);
 
 	if (sc->is_solid)
-		return FALSE;
+		return false;
 
 	if (src->pDrawable == NULL || mask->pDrawable != src->pDrawable)
-		return FALSE;
+		return false;
 
 	DBG(("%s: mask reuses source drawable\n", __FUNCTION__));
 
 	if (!sna_transform_equal(src->transform, mask->transform))
-		return FALSE;
+		return false;
 
 	if (!sna_picture_alphamap_equal(src, mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_repeat(mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_filter(mask))
-		return FALSE;
+		return false;
 
 	if (!gen7_check_format(mask->format))
-		return FALSE;
+		return false;
 
 	DBG(("%s: reusing source channel for mask with a twist\n",
 	     __FUNCTION__));
@@ -2671,10 +2671,10 @@ reuse_source(struct sna *sna,
 	mc->pict_format = mask->format;
 	mc->card_format = gen7_get_card_format(mask->format);
 	mc->bo = kgem_bo_reference(mc->bo);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_composite(struct sna *sna,
 		      uint8_t op,
 		      PicturePtr src,
@@ -2687,11 +2687,11 @@ gen7_render_composite(struct sna *sna,
 		      struct sna_composite_op *tmp)
 {
 	if (op >= ARRAY_SIZE(gen7_blend_op))
-		return FALSE;
+		return false;
 
 #if NO_COMPOSITE
 	if (mask)
-		return FALSE;
+		return false;
 
 	return sna_blt_composite(sna, op,
 				 src, dst,
@@ -2710,10 +2710,10 @@ gen7_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	if (gen7_composite_fallback(sna, src, mask, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height))
 		return sna_tiling_composite(op, src, mask, dst,
@@ -2727,7 +2727,7 @@ gen7_render_composite(struct sna *sna,
 		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen7_composite_set_target(sna, tmp, dst))
-		return FALSE;
+		return false;
 
 	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
@@ -2735,14 +2735,14 @@ gen7_render_composite(struct sna *sna,
 			      src_x, src_y,
 			      dst_x, dst_y,
 			      width, height, tmp))
-		return TRUE;
+		return true;
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->dst.width, tmp->dst.height)) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen7_composite_picture(sna, src, &tmp->src,
@@ -2770,12 +2770,12 @@ gen7_render_composite(struct sna *sna,
 		if (tmp->redirect.real_bo)
 			kgem_bo_destroy(&sna->kgem, tmp->redirect.real_bo);
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-		return TRUE;
+		return true;
 	}
 
 	tmp->is_affine = tmp->src.is_affine;
-	tmp->has_component_alpha = FALSE;
-	tmp->need_magic_ca_pass = FALSE;
+	tmp->has_component_alpha = false;
+	tmp->need_magic_ca_pass = false;
 
 	tmp->mask.bo = NULL;
 	tmp->mask.filter = SAMPLER_FILTER_NEAREST;
@@ -2784,7 +2784,7 @@ gen7_render_composite(struct sna *sna,
 	tmp->prim_emit = gen7_emit_composite_primitive;
 	if (mask) {
 		if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format)) {
-			tmp->has_component_alpha = TRUE;
+			tmp->has_component_alpha = true;
 
 			/* Check if it's component alpha that relies on a source alpha and on
 			 * the source value.  We can only get one of those into the single
@@ -2795,7 +2795,7 @@ gen7_render_composite(struct sna *sna,
 				if (op != PictOpOver)
 					goto cleanup_src;
 
-				tmp->need_magic_ca_pass = TRUE;
+				tmp->need_magic_ca_pass = true;
 				tmp->op = PictOpOutReverse;
 			}
 		}
@@ -2874,7 +2874,7 @@ gen7_render_composite(struct sna *sna,
 
 	gen7_emit_composite_state(sna, tmp);
 	gen7_align_vertex(sna, tmp);
-	return TRUE;
+	return true;
 
 cleanup_mask:
 	if (tmp->mask.bo)
@@ -2885,12 +2885,12 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->dst.bo);
-	return FALSE;
+	return false;
 }
 
 /* A poor man's span interface. But better than nothing? */
 #if !NO_COMPOSITE_SPANS
-static Bool
+static bool
 gen7_composite_alpha_gradient_init(struct sna *sna,
 				   struct sna_composite_channel *channel)
 {
@@ -2898,8 +2898,8 @@ gen7_composite_alpha_gradient_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatPad;
-	channel->is_affine = TRUE;
-	channel->is_solid  = FALSE;
+	channel->is_affine = true;
+	channel->is_solid  = false;
 	channel->transform = NULL;
 	channel->width  = 256;
 	channel->height = 1;
@@ -3148,7 +3148,7 @@ gen7_render_composite_spans_done(struct sna *sna,
 	sna_render_composite_redirect_done(sna, &op->base);
 }
 
-static Bool
+static bool
 gen7_render_composite_spans(struct sna *sna,
 			    uint8_t op,
 			    PicturePtr src,
@@ -3163,10 +3163,10 @@ gen7_render_composite_spans(struct sna *sna,
 	     width, height, flags, sna->kgem.ring));
 
 	if (op >= ARRAY_SIZE(gen7_blend_op))
-		return FALSE;
+		return false;
 
 	if (gen7_composite_fallback(sna, src, NULL, dst))
-		return FALSE;
+		return false;
 
 	if (need_tiling(sna, width, height)) {
 		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
@@ -3175,7 +3175,7 @@ gen7_render_composite_spans(struct sna *sna,
 		if (!is_gpu(dst->pDrawable)) {
 			DBG(("%s: fallback, tiled operation not on GPU\n",
 			     __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 
 		return sna_tiling_composite_spans(op, src, dst,
@@ -3185,13 +3185,13 @@ gen7_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (!gen7_composite_set_target(sna, &tmp->base, dst))
-		return FALSE;
+		return false;
 	sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
 
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
-			return FALSE;
+			return false;
 	}
 
 	switch (gen7_composite_picture(sna, src, &tmp->base.src,
@@ -3212,8 +3212,8 @@ gen7_render_composite_spans(struct sna *sna,
 	tmp->base.mask.bo = NULL;
 
 	tmp->base.is_affine = tmp->base.src.is_affine;
-	tmp->base.has_component_alpha = FALSE;
-	tmp->base.need_magic_ca_pass = FALSE;
+	tmp->base.has_component_alpha = false;
+	tmp->base.need_magic_ca_pass = false;
 
 	gen7_composite_alpha_gradient_init(sna, &tmp->base.mask);
 
@@ -3236,7 +3236,7 @@ gen7_render_composite_spans(struct sna *sna,
 
 	tmp->base.u.gen7.wm_kernel =
 		gen7_choose_composite_kernel(tmp->base.op,
-					     TRUE, FALSE,
+					     true, false,
 					     tmp->base.is_affine);
 	tmp->base.u.gen7.nr_surfaces = 3;
 	tmp->base.u.gen7.nr_inputs = 2;
@@ -3260,7 +3260,7 @@ gen7_render_composite_spans(struct sna *sna,
 
 	gen7_emit_composite_state(sna, &tmp->base);
 	gen7_align_vertex(sna, &tmp->base);
-	return TRUE;
+	return true;
 
 cleanup_src:
 	if (tmp->base.src.bo)
@@ -3268,7 +3268,7 @@ cleanup_src:
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -3287,12 +3287,12 @@ gen7_emit_copy_state(struct sna *sna,
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, op->src.width, op->src.height,
 			     op->src.card_format,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
@@ -3357,7 +3357,7 @@ overlaps(struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		extents.y1 + src_dy < extents.y2 + dst_dy);
 }
 
-static Bool
+static bool
 gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -3367,7 +3367,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy_boxes(sna, alu,
 				  src_bo, src_dx, src_dy,
@@ -3390,7 +3390,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       dst_bo, dst_dx, dst_dy,
 			       dst->drawable.bitsPerPixel,
 			       box, n))
-		return TRUE;
+		return true;
 
 	if ((too_large(dst->drawable.width, dst->drawable.height) ||
 	     too_large(src->drawable.width, src->drawable.height)) &&
@@ -3415,7 +3415,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 				       dst_bo, dst_dx, dst_dy,
 				       dst->drawable.bitsPerPixel,
 				       box, n))
-			return TRUE;
+			return true;
 	}
 
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -3514,7 +3514,7 @@ fallback_blt:
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
@@ -3577,7 +3577,7 @@ fallback_blt:
 	gen7_vertex_flush(sna);
 	sna_render_composite_redirect_done(sna, &tmp);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 
 fallback_tiled_src:
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
@@ -3620,7 +3620,7 @@ gen7_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 		gen7_vertex_flush(sna);
 }
 
-static Bool
+static bool
 gen7_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -3628,7 +3628,7 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 {
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		return FALSE;
+		return false;
 
 	return sna_blt_copy(sna, alu,
 			    src_bo, dst_bo,
@@ -3647,14 +3647,14 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy(sna, alu, src_bo, dst_bo,
 				    dst->drawable.bitsPerPixel,
@@ -3712,7 +3712,7 @@ fallback:
 
 	op->blt  = gen7_render_copy_blt;
 	op->done = gen7_render_copy_done;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3735,12 +3735,12 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 		gen7_bind_bo(sna,
 			     op->dst.bo, op->dst.width, op->dst.height,
 			     gen7_get_dest_format(op->dst.format),
-			     TRUE);
+			     true);
 	binding_table[1] =
 		gen7_bind_bo(sna,
 			     op->src.bo, 1, 1,
 			     GEN7_SURFACEFORMAT_B8G8R8A8_UNORM,
-			     FALSE);
+			     false);
 
 	if (sna->kgem.surface == offset &&
 	    *(uint64_t *)(sna->kgem.batch + sna->render_state.gen7.surface_table) == *(uint64_t*)binding_table) {
@@ -3758,7 +3758,7 @@ static inline bool prefer_blt_fill(struct sna *sna,
 	return prefer_blt_ring(sna) || untiled_tlb_miss(bo);
 }
 
-static Bool
+static bool
 gen7_render_fill_boxes(struct sna *sna,
 		       CARD8 op,
 		       PictFormat format,
@@ -3776,7 +3776,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen7_blend_op)) {
 		DBG(("%s: fallback due to unhandled blend op: %d\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (op <= PictOpSrc &&
@@ -3800,10 +3800,10 @@ gen7_render_fill_boxes(struct sna *sna,
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
-			return TRUE;
+			return true;
 
 		if (!gen7_check_dst_format(format))
-			return FALSE;
+			return false;
 
 		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
@@ -3811,7 +3811,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	}
 
 #if NO_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	if (op == PictOpClear) {
@@ -3823,7 +3823,7 @@ gen7_render_fill_boxes(struct sna *sna,
 				     color->blue,
 				     color->alpha,
 				     PICT_a8r8g8b8))
-		return FALSE;
+		return false;
 
 	DBG(("%s(%08x x %d [(%d, %d), (%d, %d) ...])\n",
 	     __FUNCTION__, pixel, n,
@@ -3846,11 +3846,11 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
-	tmp.has_component_alpha = FALSE;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.has_component_alpha = false;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -3893,7 +3893,7 @@ gen7_render_fill_boxes(struct sna *sna,
 
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -3982,7 +3982,7 @@ gen7_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
-static Bool
+static bool
 gen7_render_fill(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 uint32_t color,
@@ -4002,7 +4002,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 			 dst_bo, dst->drawable.bitsPerPixel,
 			 color,
 			 op))
-		return TRUE;
+		return true;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height))
@@ -4034,9 +4034,9 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->base.mask.filter = SAMPLER_FILTER_NEAREST;
 	op->base.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	op->base.is_affine = TRUE;
-	op->base.has_component_alpha = FALSE;
-	op->base.need_magic_ca_pass = FALSE;
+	op->base.is_affine = true;
+	op->base.has_component_alpha = false;
+	op->base.need_magic_ca_pass = false;
 	op->base.floats_per_vertex = 3;
 	op->base.floats_per_rect = 9;
 
@@ -4057,10 +4057,10 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->box   = gen7_render_fill_op_box;
 	op->boxes = gen7_render_fill_op_boxes;
 	op->done  = gen7_render_fill_op_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 			     uint32_t color,
 			     int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -4078,7 +4078,7 @@ gen7_render_fill_one_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		     uint32_t color,
 		     int16_t x1, int16_t y1,
@@ -4096,7 +4096,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (prefer_blt_fill(sna, bo) &&
 	    gen7_render_fill_one_try_blt(sna, dst, bo, color,
 					 x1, y1, x2, y2, alu))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
@@ -4127,11 +4127,11 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -4164,10 +4164,10 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 gen7_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	BoxRec box;
@@ -4182,7 +4182,7 @@ gen7_render_clear_try_blt(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 				  0, &box, 1);
 }
 
-static Bool
+static bool
 gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	struct sna_composite_op tmp;
@@ -4199,7 +4199,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	/* Prefer to use the BLT if already engaged */
 	if (sna->kgem.ring == KGEM_BLT &&
 	    gen7_render_clear_try_blt(sna, dst, bo))
-		return TRUE;
+		return true;
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height))
@@ -4222,11 +4222,11 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
 	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
-	tmp.is_affine = TRUE;
+	tmp.is_affine = true;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 	tmp.has_component_alpha = 0;
-	tmp.need_magic_ca_pass = FALSE;
+	tmp.need_magic_ca_pass = false;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -4258,7 +4258,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen7_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
-	return TRUE;
+	return true;
 }
 
 static void gen7_render_flush(struct sna *sna)
@@ -4317,7 +4317,7 @@ gen7_render_expire(struct kgem *kgem)
 
 static void gen7_render_reset(struct sna *sna)
 {
-	sna->render_state.gen7.needs_invariant = TRUE;
+	sna->render_state.gen7.needs_invariant = true;
 	sna->render_state.gen7.vb_id = 0;
 	sna->render_state.gen7.ve_id = -1;
 	sna->render_state.gen7.last_primitive = -1;
@@ -4336,7 +4336,7 @@ static void gen7_render_fini(struct sna *sna)
 	kgem_bo_destroy(&sna->kgem, sna->render_state.gen7.general_bo);
 }
 
-static Bool gen7_render_setup(struct sna *sna)
+static bool gen7_render_setup(struct sna *sna)
 {
 	struct gen7_render_state *state = &sna->render_state.gen7;
 	struct sna_static_stream general;
@@ -4385,10 +4385,10 @@ static Bool gen7_render_setup(struct sna *sna)
 	return state->general_bo != NULL;
 }
 
-Bool gen7_render_init(struct sna *sna)
+bool gen7_render_init(struct sna *sna)
 {
 	if (!gen7_render_setup(sna))
-		return FALSE;
+		return false;
 
 	sna->kgem.context_switch = gen7_render_context_switch;
 	sna->kgem.retire = gen7_render_retire;
@@ -4414,5 +4414,5 @@ Bool gen7_render_init(struct sna *sna)
 
 	sna->render.max_3d_size = GEN7_MAX_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
-	return TRUE;
+	return true;
 }
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bea7aea..e59811f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -359,7 +359,7 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 	bo->domain = DOMAIN_NONE;
 }
 
-Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
+bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		   const void *data, int length)
 {
 	assert(bo->refcnt);
@@ -369,11 +369,11 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 	assert(length <= bytes(bo));
 	if (gem_write(kgem->fd, bo->handle, 0, length, data))
-		return FALSE;
+		return false;
 
 	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
 	kgem_bo_retire(kgem, bo);
-	return TRUE;
+	return true;
 }
 
 static uint32_t gem_create(int fd, int num_pages)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 273240f..ba110b6 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -406,7 +406,7 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
-Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
+bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		   const void *data, int length);
 
 int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
@@ -513,7 +513,7 @@ static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 {
 	if (bo == NULL)
-		return FALSE;
+		return false;
 
 	return bo->dirty;
 }
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6920343..d7fa71b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -261,7 +261,7 @@ struct sna {
 	OptionInfoPtr Options;
 
 	/* Driver phase/state information */
-	Bool suspended;
+	bool suspended;
 
 #if HAVE_UDEV
 	struct udev_monitor *uevent_monitor;
@@ -281,7 +281,7 @@ struct sna {
 #endif
 };
 
-Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
+bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 void sna_mode_adjust_frame(struct sna *sna, int x, int y);
 extern void sna_mode_update(struct sna *sna);
 extern void sna_mode_disable_unused(struct sna *sna);
@@ -343,7 +343,7 @@ extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
 
 #if HAVE_DRI2_H
-Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
+bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_destroy_window(WindowPtr win);
@@ -478,7 +478,7 @@ static inline struct kgem_bo *sna_pixmap_pin(PixmapPtr pixmap)
 }
 
 
-static inline Bool
+static inline bool
 _sna_transform_point(const PictTransform *transform,
 		     int64_t x, int64_t y, int64_t result[3])
 {
@@ -515,10 +515,10 @@ sna_get_transformed_coordinates_3d(int x, int y,
 				   const PictTransform *transform,
 				   float *x_out, float *y_out, float *z_out);
 
-Bool sna_transform_is_affine(const PictTransform *t);
-Bool sna_transform_is_integer_translation(const PictTransform *t,
+bool sna_transform_is_affine(const PictTransform *t);
+bool sna_transform_is_integer_translation(const PictTransform *t,
 					  int16_t *tx, int16_t *ty);
-Bool sna_transform_is_translation(const PictTransform *t,
+bool sna_transform_is_translation(const PictTransform *t,
 				  pixman_fixed_t *tx, pixman_fixed_t *ty);
 
 static inline bool
@@ -564,10 +564,10 @@ void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
-Bool sna_accel_create(struct sna *sna);
+bool sna_accel_create(struct sna *sna);
 void sna_copy_fbcon(struct sna *sna);
 
-Bool sna_composite_create(struct sna *sna);
+bool sna_composite_create(struct sna *sna);
 void sna_composite_close(struct sna *sna);
 
 void sna_composite(CARD8 op,
@@ -612,7 +612,7 @@ void sna_composite_trifan(CARD8 op,
 			  INT16 xSrc, INT16 ySrc,
 			  int npoints, xPointFixed *points);
 
-Bool sna_gradients_create(struct sna *sna);
+bool sna_gradients_create(struct sna *sna);
 void sna_gradients_close(struct sna *sna);
 
 bool sna_glyphs_create(struct sna *sna);
@@ -651,14 +651,14 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
 				 const void *src, int stride,
 				 uint32_t and, uint32_t or);
 
-Bool
+bool
 sna_compute_composite_extents(BoxPtr extents,
 			      PicturePtr src, PicturePtr mask, PicturePtr dst,
 			      INT16 src_x,  INT16 src_y,
 			      INT16 mask_x, INT16 mask_y,
 			      INT16 dst_x,  INT16 dst_y,
 			      CARD16 width, CARD16 height);
-Bool
+bool
 sna_compute_composite_region(RegionPtr region,
 			     PicturePtr src, PicturePtr mask, PicturePtr dst,
 			     INT16 src_x,  INT16 src_y,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f4921b2..406cbfa 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -353,7 +353,7 @@ sna_fill_init_blt(struct sna_fill_op *fill,
 	return sna->render.fill(sna, alu, pixmap, bo, pixel, fill);
 }
 
-static Bool
+static bool
 sna_copy_init_blt(struct sna_copy_op *copy,
 		  struct sna *sna,
 		  PixmapPtr src, struct kgem_bo *src_bo,
@@ -1077,11 +1077,11 @@ static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
 {
 #if 0
 	if (pixmap->devPrivate.ptr == NULL)
-		return TRUE;
+		return true;
 #endif
 
 	if (priv->cpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
 }
@@ -1284,7 +1284,7 @@ skip_inplace_map:
 
 		n = sna_damage_get_boxes(priv->gpu_damage, &box);
 		if (n) {
-			Bool ok = FALSE;
+			bool ok = false;
 
 			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
@@ -1332,7 +1332,7 @@ done:
 	return true;
 }
 
-static Bool
+static bool
 region_subsumes_drawable(RegionPtr region, DrawablePtr drawable)
 {
 	const BoxRec *extents;
@@ -1738,13 +1738,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		BoxPtr box;
 		int n = sna_damage_get_boxes(priv->gpu_damage, &box);
 		if (n) {
-			Bool ok;
+			bool ok;
 
 			DBG(("%s: forced migration\n", __FUNCTION__));
 
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
-			ok = FALSE;
+			ok = false;
 			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1851,7 +1851,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				n = sna_damage_get_boxes(priv->gpu_damage,
 							 &box);
 				if (n) {
-					Bool ok = FALSE;
+					bool ok = false;
 
 					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
@@ -1873,7 +1873,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 								      &r->extents)) {
 				BoxPtr box = REGION_RECTS(r);
 				int n = REGION_NUM_RECTS(r);
-				Bool ok = FALSE;
+				bool ok = false;
 
 				DBG(("%s: region wholly inside damage\n",
 				     __FUNCTION__));
@@ -1898,7 +1898,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (sna_damage_intersect(priv->gpu_damage, r, &need)) {
 					BoxPtr box = REGION_RECTS(&need);
 					int n = REGION_NUM_RECTS(&need);
-					Bool ok = FALSE;
+					bool ok = false;
 
 					DBG(("%s: region intersects damage\n",
 					     __FUNCTION__));
@@ -2160,7 +2160,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 		n = sna_damage_get_boxes(priv->cpu_damage, (BoxPtr *)&box);
 		if (n) {
-			Bool ok = FALSE;
+			bool ok = false;
 
 			if (use_cpu_bo_for_read(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
@@ -2199,7 +2199,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		priv->undamaged = true;
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
-		Bool ok = FALSE;
+		bool ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2225,10 +2225,10 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		priv->undamaged = true;
 	} else if (sna_damage_intersect(priv->cpu_damage, &r, &i)) {
 		int n = REGION_NUM_RECTS(&i);
-		Bool ok;
+		bool ok;
 
 		box = REGION_RECTS(&i);
-		ok = FALSE;
+		ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2664,12 +2664,12 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	n = sna_damage_get_boxes(priv->cpu_damage, &box);
 	if (n) {
-		Bool ok;
+		bool ok;
 
 		assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
-		ok = FALSE;
+		ok = false;
 		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
@@ -2937,7 +2937,7 @@ static inline void box32_add_rect(Box32Rec *box, const xRectangle *r)
 		box->y2 = v;
 }
 
-static Bool
+static bool
 sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			 int x, int y, int w, int h, char *bits, int stride)
 {
@@ -2956,11 +2956,11 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	if (gc->alu != GXcopy)
-		return FALSE;
+		return false;
 
 	if (priv->gpu_bo == NULL &&
 	    !sna_pixmap_create_mappable_gpu(pixmap))
-		return FALSE;
+		return false;
 
 	assert(priv->gpu_bo);
 	assert(priv->gpu_bo->proxy == NULL);
@@ -3023,7 +3023,7 @@ static bool upload_inplace(struct sna *sna,
 	return false;
 }
 
-static Bool
+static bool
 sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		    int x, int y, int w, int  h, char *bits, int stride)
 {
@@ -3286,7 +3286,7 @@ static inline uint8_t blt_depth(int depth)
 	}
 }
 
-static Bool
+static bool
 sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		     int x, int y, int w, int  h, char *bits)
 {
@@ -3409,7 +3409,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	return true;
 }
 
-static Bool
+static bool
 sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		     int x, int y, int w, int  h, int left,char *bits)
 {
@@ -3825,7 +3825,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	int n = RegionNumRects(region);
 	int stride, bpp;
 	char *bits;
-	Bool replaces;
+	bool replaces;
 
 	if (n == 0)
 		return;
@@ -4482,7 +4482,7 @@ out:
 			   0, NULL);
 }
 
-inline static Bool
+inline static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
@@ -4875,7 +4875,7 @@ sna_fill_spans__dash_clip_boxes(DrawablePtr drawable,
 		sna_fill_spans__fill_clip_boxes(drawable, gc, n, pt, width, sorted);
 }
 
-static Bool
+static bool
 sna_fill_spans_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo, struct sna_damage **damage,
 		   GCPtr gc, uint32_t pixel,
@@ -4971,7 +4971,7 @@ no_damage_clipped:
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		assert(dx + clip.extents.x1 >= 0);
 		assert(dy + clip.extents.y1 >= 0);
@@ -5072,7 +5072,7 @@ damage_clipped:
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		assert(dx + clip.extents.x1 >= 0);
 		assert(dy + clip.extents.y1 >= 0);
@@ -5176,10 +5176,10 @@ damage_clipped:
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
 			     struct sna_damage **damage,
@@ -5952,7 +5952,7 @@ empty:
 				 dst_x, dst_y, bit);
 }
 
-static Bool
+static bool
 sna_poly_point_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo,
 		   struct sna_damage **damage,
@@ -5970,7 +5970,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 	     __FUNCTION__, gc->alu, gc->fgPixel, clipped));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6045,7 +6045,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static unsigned
@@ -6183,7 +6183,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, pixel=%lx, n=%d, clipped=%d, damage=%p\n",
 	     __FUNCTION__, gc->alu, gc->fgPixel, _n, clipped, damage));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6191,7 +6191,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 	if (clipped) {
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 	}
 
 	jump = _jump[(damage != NULL) | !!(dx|dy) << 1];
@@ -6536,7 +6536,7 @@ damage_offset:
 	goto *ret;
 }
 
-static Bool
+static bool
 sna_poly_line_blt(DrawablePtr drawable,
 		  struct kgem_bo *bo,
 		  struct sna_damage **damage,
@@ -6554,7 +6554,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, fg=%08x\n", __FUNCTION__, gc->alu, (unsigned)pixel));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -6616,7 +6616,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 
 		last.x = pt->x + drawable->x;
 		last.y = pt->y + drawable->y;
@@ -6750,7 +6750,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static unsigned
@@ -7217,7 +7217,7 @@ static inline void box_from_seg(BoxPtr b, xSegment *seg, GCPtr gc)
 	     b->x1, b->y1, b->x2, b->y2));
 }
 
-static Bool
+static bool
 sna_poly_segment_blt(DrawablePtr drawable,
 		     struct kgem_bo *bo,
 		     struct sna_damage **damage,
@@ -7235,7 +7235,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 	     __FUNCTION__, n, gc->alu, gc->fgPixel, clipped));
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -7357,7 +7357,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -7389,7 +7389,7 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, pixel=%lx, n=%d, clipped=%d, damage=%p\n",
 	     __FUNCTION__, gc->alu, gc->fgPixel, _n, clipped, damage));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -7397,7 +7397,7 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 	if (clipped) {
 		region_maybe_clip(&clip, gc->pCompositeClip);
 		if (!RegionNotEmpty(&clip))
-			return TRUE;
+			return true;
 	}
 	DBG(("%s: [clipped] extents=(%d, %d), (%d, %d), delta=(%d, %d)\n",
 	     __FUNCTION__,
@@ -8060,7 +8060,7 @@ sna_poly_rectangle_extents(DrawablePtr drawable, GCPtr gc,
 	return 1 | clipped << 1;
 }
 
-static Bool
+static bool
 sna_poly_rectangle_blt(DrawablePtr drawable,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
@@ -8082,7 +8082,7 @@ sna_poly_rectangle_blt(DrawablePtr drawable,
 	DBG(("%s: n=%d, alu=%d, width=%d, fg=%08lx, damge=%p, clipped?=%d\n",
 	     __FUNCTION__, n, gc->alu, gc->lineWidth, gc->fgPixel, damage, clipped));
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
-		return FALSE;
+		return false;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
@@ -8534,7 +8534,7 @@ done:
 	}
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static void
@@ -8804,7 +8804,7 @@ out:
 	RegionUninit(&data.region);
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_blt(DrawablePtr drawable,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
@@ -8873,7 +8873,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 
 	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -8995,7 +8995,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 done:
 	fill.done(sna, &fill);
 	assert_pixmap_damage(pixmap);
-	return TRUE;
+	return true;
 }
 
 static uint32_t
@@ -9192,7 +9192,7 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap)
 	return kgem_bo_reference(priv->gpu_bo);
 }
 
-static Bool
+static bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
 			     struct sna_damage **damage,
@@ -9232,13 +9232,13 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	if (tile_bo == NULL) {
 		DBG(("%s: unable to move tile go GPU, fallback\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_copy_init_blt(&copy, sna, tile, tile_bo, pixmap, bo, alu)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
 		kgem_bo_destroy(&sna->kgem, tile_bo);
-		return FALSE;
+		return false;
 	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -9434,7 +9434,7 @@ done:
 	copy.done(sna, &copy);
 	assert_pixmap_damage(pixmap);
 	kgem_bo_destroy(&sna->kgem, tile_bo);
-	return TRUE;
+	return true;
 }
 
 static bool
@@ -12132,7 +12132,7 @@ static int sna_create_gc(GCPtr gc)
 
 	gc->funcs = (GCFuncs *)&sna_gc_funcs;
 	gc->ops = (GCOps *)&sna_gc_ops;
-	return TRUE;
+	return true;
 }
 
 static void
@@ -12257,7 +12257,7 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 		ret &= sna_validate_pixmap(&win->drawable, win->background.pixmap);
 	}
 
-	if (mask & CWBorderPixmap && win->borderIsPixel == FALSE) {
+	if (mask & CWBorderPixmap && win->borderIsPixel == false) {
 		DBG(("%s: flushing border pixmap\n", __FUNCTION__));
 		ret &= sna_validate_pixmap(&win->drawable, win->border.pixmap);
 	}
@@ -12881,21 +12881,21 @@ bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 		   "SNA initialized with %s backend\n",
 		   backend);
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_accel_create(struct sna *sna)
+bool sna_accel_create(struct sna *sna)
 {
 	if (!sna_glyphs_create(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_gradients_create(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_composite_create(sna))
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 void sna_accel_watch_flush(struct sna *sna, int enable)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 1d2678a..80fad6d 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -184,7 +184,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 		sna->blt_state.fill_alu = alu;
 	}
 
-	return TRUE;
+	return true;
 }
 
 noinline static void sna_blt_fill_begin(struct sna *sna,
@@ -242,7 +242,7 @@ inline static void sna_blt_fill_one(struct sna *sna,
 	b[2] = b[1] + (height << 16 | width);
 }
 
-static Bool sna_blt_copy_init(struct sna *sna,
+static bool sna_blt_copy_init(struct sna *sna,
 			      struct sna_blt_state *blt,
 			      struct kgem_bo *src,
 			      struct kgem_bo *dst,
@@ -288,15 +288,15 @@ static Bool sna_blt_copy_init(struct sna *sna,
 	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
 		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
-			return FALSE;
+			return false;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
 	sna->blt_state.fill_bo = 0;
-	return TRUE;
+	return true;
 }
 
-static Bool sna_blt_alpha_fixup_init(struct sna *sna,
+static bool sna_blt_alpha_fixup_init(struct sna *sna,
 				     struct sna_blt_state *blt,
 				     struct kgem_bo *src,
 				     struct kgem_bo *dst,
@@ -340,12 +340,12 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
 		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
-			return FALSE;
+			return false;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
 	sna->blt_state.fill_bo = 0;
-	return TRUE;
+	return true;
 }
 
 static void sna_blt_alpha_fixup_one(struct sna *sna,
@@ -468,7 +468,7 @@ static void sna_blt_copy_one(struct sna *sna,
 	kgem->nbatch += 8;
 }
 
-Bool
+bool
 sna_get_rgba_from_pixel(uint32_t pixel,
 			uint16_t *red,
 			uint16_t *green,
@@ -504,7 +504,7 @@ sna_get_rgba_from_pixel(uint32_t pixel,
 		gshift = rshift + rbits;
 		bshift = gshift + gbits;
 	} else {
-		return FALSE;
+		return false;
 	}
 
 	if (rbits) {
@@ -543,10 +543,10 @@ sna_get_rgba_from_pixel(uint32_t pixel,
 	} else
 		*alpha = 0xffff;
 
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 _sna_get_pixel_from_rgba(uint32_t * pixel,
 			uint16_t red,
 			uint16_t green,
@@ -566,11 +566,11 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_A) {
 		*pixel = alpha >> (16 - abits);
-		return TRUE;
+		return true;
 	}
 
 	if (!PICT_FORMAT_COLOR(format))
-		return FALSE;
+		return false;
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_ARGB) {
 		bshift = 0;
@@ -588,7 +588,7 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 		gshift = rshift + rbits;
 		bshift = gshift + gbits;
 	} else
-		return FALSE;
+		return false;
 
 	*pixel = 0;
 	*pixel |= (blue  >> (16 - bbits)) << bshift;
@@ -596,7 +596,7 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 	*pixel |= (red   >> (16 - rbits)) << rshift;
 	*pixel |= (alpha >> (16 - abits)) << ashift;
 
-	return TRUE;
+	return true;
 }
 
 uint32_t
@@ -638,43 +638,43 @@ get_solid_color(PicturePtr picture, uint32_t format)
 		return color_convert(get_pixel(picture), picture->format, format);
 }
 
-static Bool
+static bool
 is_solid(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
 		if (picture->pSourcePict->type == SourcePictTypeSolidFill)
-			return TRUE;
+			return true;
 	}
 
 	if (picture->pDrawable) {
 		if (picture->pDrawable->width  == 1 &&
 		    picture->pDrawable->height == 1 &&
 		    picture->repeat)
-			return TRUE;
+			return true;
 	}
 
-	return FALSE;
+	return false;
 }
 
-Bool
+bool
 sna_picture_is_solid(PicturePtr picture, uint32_t *color)
 {
 	if (!is_solid(picture))
-		return FALSE;
+		return false;
 
 	if (color)
 		*color = get_solid_color(picture, PICT_a8r8g8b8);
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 pixel_is_opaque(uint32_t pixel, uint32_t format)
 {
 	unsigned int abits;
 
 	abits = PICT_FORMAT_A(format);
 	if (!abits)
-		return TRUE;
+		return true;
 
 	if (PICT_FORMAT_TYPE(format) == PICT_TYPE_A ||
 	    PICT_FORMAT_TYPE(format) == PICT_TYPE_BGRA) {
@@ -684,10 +684,10 @@ pixel_is_opaque(uint32_t pixel, uint32_t format)
 		unsigned int ashift = PICT_FORMAT_BPP(format) - abits;
 		return (pixel >> ashift) == (unsigned)((1 << abits) - 1);
 	} else
-		return FALSE;
+		return false;
 }
 
-static Bool
+static bool
 pixel_is_white(uint32_t pixel, uint32_t format)
 {
 	switch (PICT_FORMAT_TYPE(format)) {
@@ -697,11 +697,11 @@ pixel_is_white(uint32_t pixel, uint32_t format)
 	case PICT_TYPE_BGRA:
 		return pixel == ((1U << PICT_FORMAT_BPP(format)) - 1);
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
-static Bool
+static bool
 is_opaque_solid(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
@@ -711,7 +711,7 @@ is_opaque_solid(PicturePtr picture)
 		return pixel_is_opaque(get_pixel(picture), picture->format);
 }
 
-static Bool
+static bool
 is_white(PicturePtr picture)
 {
 	if (picture->pSourcePict) {
@@ -727,7 +727,7 @@ sna_composite_mask_is_opaque(PicturePtr mask)
 	if (mask->componentAlpha && PICT_FORMAT_RGB(mask->format))
 		return is_solid(mask) && is_white(mask);
 	else if (!PICT_FORMAT_A(mask->format))
-		return TRUE;
+		return true;
 	else
 		return is_solid(mask) && is_opaque_solid(mask);
 }
@@ -900,7 +900,20 @@ static void blt_composite_nop_boxes(struct sna *sna,
 {
 }
 
-static Bool
+static bool
+begin_blt(struct sna *sna,
+	  struct sna_composite_op *op)
+{
+	if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo)) {
+		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo_fenced(&sna->kgem, op->dst.bo));
+		_kgem_set_mode(&sna->kgem, KGEM_BLT);
+	}
+
+	return true;
+}
+
+static bool
 prepare_blt_nop(struct sna *sna,
 		struct sna_composite_op *op)
 {
@@ -910,10 +923,10 @@ prepare_blt_nop(struct sna *sna,
 	op->box   = blt_composite_nop_box;
 	op->boxes = blt_composite_nop_boxes;
 	op->done  = nop_done;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 prepare_blt_clear(struct sna *sna,
 		  struct sna_composite_op *op)
 {
@@ -929,10 +942,13 @@ prepare_blt_clear(struct sna *sna,
 	}
 	op->done = nop_done;
 
-	return sna_blt_fill_init(sna, &op->u.blt,
+	if (!sna_blt_fill_init(sna, &op->u.blt,
 				 op->dst.bo,
 				 op->dst.pixmap->drawable.bitsPerPixel,
-				 GXclear, 0);
+				 GXclear, 0))
+		return false;
+
+	return begin_blt(sna, op);
 }
 
 static bool
@@ -952,10 +968,13 @@ prepare_blt_fill(struct sna *sna,
 	}
 	op->done = nop_done;
 
-	return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
-				 op->dst.pixmap->drawable.bitsPerPixel,
-				 GXcopy,
-				 get_solid_color(source, op->dst.format));
+	if (!sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
+			       op->dst.pixmap->drawable.bitsPerPixel,
+			       GXcopy,
+			       get_solid_color(source, op->dst.format)))
+		return false;
+
+	return begin_blt(sna, op);
 }
 
 fastcall static void
@@ -1116,7 +1135,7 @@ blt_composite_copy_boxes_with_alpha(struct sna *sna,
 	} while(--nbox);
 }
 
-static Bool
+static bool
 prepare_blt_copy(struct sna *sna,
 		 struct sna_composite_op *op,
 		 uint32_t alpha_fixup)
@@ -1126,7 +1145,7 @@ prepare_blt_copy(struct sna *sna,
 
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
 		DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
@@ -1134,7 +1153,7 @@ prepare_blt_copy(struct sna *sna,
 		if (!kgem_check_many_bo_fenced(&sna->kgem,
 					       op->dst.bo, priv->gpu_bo, NULL)) {
 			DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
-			return FALSE;
+			return false;
 		}
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
@@ -1151,22 +1170,26 @@ prepare_blt_copy(struct sna *sna,
 		op->box   = blt_composite_copy_box_with_alpha;
 		op->boxes = blt_composite_copy_boxes_with_alpha;
 
-		return sna_blt_alpha_fixup_init(sna, &op->u.blt,
-						priv->gpu_bo,
-						op->dst.bo,
-						src->drawable.bitsPerPixel,
-						alpha_fixup);
+		if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
+					      priv->gpu_bo,
+					      op->dst.bo,
+					      src->drawable.bitsPerPixel,
+					      alpha_fixup))
+			return false;
 	} else {
 		op->blt   = blt_composite_copy;
 		op->box   = blt_composite_copy_box;
 		op->boxes = blt_composite_copy_boxes;
 
-		return sna_blt_copy_init(sna, &op->u.blt,
-					 priv->gpu_bo,
-					 op->dst.bo,
-					 src->drawable.bitsPerPixel,
-					 GXcopy);
+		if (!sna_blt_copy_init(sna, &op->u.blt,
+				       priv->gpu_bo,
+				       op->dst.bo,
+				       src->drawable.bitsPerPixel,
+				       GXcopy))
+			return false;
 	}
+
+	return begin_blt(sna, op);
 }
 
 fastcall static void
@@ -1405,7 +1428,7 @@ blt_put_composite_boxes_with_alpha(struct sna *sna,
 	}
 }
 
-static Bool
+static bool
 prepare_blt_put(struct sna *sna,
 		struct sna_composite_op *op,
 		uint32_t alpha_fixup)
@@ -1428,29 +1451,33 @@ prepare_blt_put(struct sna *sna,
 			op->box   = blt_composite_copy_box_with_alpha;
 			op->boxes = blt_composite_copy_boxes_with_alpha;
 
-			return sna_blt_alpha_fixup_init(sna, &op->u.blt,
-							src_bo, op->dst.bo,
-							op->dst.pixmap->drawable.bitsPerPixel,
-							alpha_fixup);
+			if (!sna_blt_alpha_fixup_init(sna, &op->u.blt,
+						      src_bo, op->dst.bo,
+						      op->dst.pixmap->drawable.bitsPerPixel,
+						      alpha_fixup))
+				return false;
 		} else {
 			op->blt   = blt_composite_copy;
 			op->box   = blt_composite_copy_box;
 			op->boxes = blt_composite_copy_boxes;
 
-			return sna_blt_copy_init(sna, &op->u.blt,
-						 src_bo, op->dst.bo,
-						 op->dst.pixmap->drawable.bitsPerPixel,
-						 GXcopy);
+			if (!sna_blt_copy_init(sna, &op->u.blt,
+					       src_bo, op->dst.bo,
+					       op->dst.pixmap->drawable.bitsPerPixel,
+					       GXcopy))
+				return false;
 		}
+
+		return begin_blt(sna, op);
 	} else {
 		if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
-			return FALSE;
+			return false;
 
 		assert(src->devKind);
 		assert(src->devPrivate.ptr);
 
 		if (alpha_fixup)
-			return FALSE; /* XXX */
+			return false; /* XXX */
 
 		if (alpha_fixup) {
 			op->u.blt.pixel = alpha_fixup;
@@ -1464,24 +1491,24 @@ prepare_blt_put(struct sna *sna,
 		}
 	}
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	BoxRec area;
 
 	if (!priv)
-		return FALSE;
+		return false;
 	if (!priv->gpu_bo)
-		return FALSE;
+		return false;
 
 	if (priv->cpu_damage == NULL)
-		return TRUE;
+		return true;
 	if (priv->cpu_damage->mode == DAMAGE_ALL)
-		return FALSE;
+		return false;
 
 	area.x1 = x;
 	area.y1 = y;
@@ -1489,24 +1516,24 @@ has_gpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 	area.y2 = y + h;
 	if (priv->gpu_damage &&
 	    sna_damage_contains_box__no_reduce(priv->gpu_damage, &area))
-		return TRUE;
+		return true;
 
 	return sna_damage_contains_box(priv->cpu_damage,
 				       &area) == PIXMAN_REGION_OUT;
 }
 
-static Bool
+static bool
 has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	BoxRec area;
 
 	if (!priv)
-		return TRUE;
+		return true;
 	if (priv->gpu_damage == NULL)
-		return TRUE;
+		return true;
 	if (priv->gpu_damage->mode == DAMAGE_ALL)
-		return FALSE;
+		return false;
 
 	area.x1 = x;
 	area.y1 = y;
@@ -1514,7 +1541,7 @@ has_cpu_area(PixmapPtr pixmap, int x, int y, int w, int h)
 	area.y2 = y + h;
 	if (priv->cpu_damage &&
 	    sna_damage_contains_box__no_reduce(priv->cpu_damage, &area))
-		return TRUE;
+		return true;
 
 	return sna_damage_contains_box(priv->gpu_damage,
 				       &area) == PIXMAN_REGION_OUT;
@@ -1552,7 +1579,7 @@ reduce_damage(struct sna_composite_op *op,
 				      PICT_FORMAT_G(format),		\
 				      PICT_FORMAT_B(format))
 
-Bool
+bool
 sna_blt_composite(struct sna *sna,
 		  uint32_t op,
 		  PicturePtr src,
@@ -1568,10 +1595,10 @@ sna_blt_composite(struct sna *sna,
 	int16_t tx, ty;
 	uint32_t alpha_fixup;
 	bool was_clear;
-	Bool ret;
+	bool ret;
 
 #if DEBUG_NO_BLT || NO_BLT_COMPOSITE
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s (%d, %d), (%d, %d), %dx%d\n",
@@ -1585,7 +1612,7 @@ sna_blt_composite(struct sna *sna,
 	default:
 		DBG(("%s: unhandled bpp: %d\n", __FUNCTION__,
 		     dst->pDrawable->bitsPerPixel));
-		return FALSE;
+		return false;
 	}
 
 	was_clear = sna_drawable_is_clear(dst->pDrawable);
@@ -1593,12 +1620,12 @@ sna_blt_composite(struct sna *sna,
 	priv = sna_pixmap_move_to_gpu(tmp->dst.pixmap, MOVE_WRITE | MOVE_READ);
 	if (priv == NULL) {
 		DBG(("%s: dst not attached\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
 		DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
 		     __FUNCTION__, priv->gpu_bo->tiling, priv->gpu_bo->pitch));
-		return FALSE;
+		return false;
 	}
 
 	tmp->dst.format = dst->format;
@@ -1613,12 +1640,6 @@ sna_blt_composite(struct sna *sna,
 	if (width && height)
 		reduce_damage(tmp, dst_x, dst_y, width, height);
 
-	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo)) {
-		_kgem_submit(&sna->kgem);
-		assert(kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo));
-		_kgem_set_mode(&sna->kgem, KGEM_BLT);
-	}
-
 	if (op == PictOpClear) {
 clear:
 		if (was_clear)
@@ -1640,7 +1661,7 @@ clear:
 		if (op != PictOpSrc) {
 			DBG(("%s: unsuported op [%d] for blitting\n",
 			     __FUNCTION__, op));
-			return FALSE;
+			return false;
 		}
 
 		return prepare_blt_fill(sna, tmp, src);
@@ -1649,13 +1670,13 @@ clear:
 	if (!src->pDrawable) {
 		DBG(("%s: unsuported procedural source\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (src->filter == PictFilterConvolution) {
 		DBG(("%s: convolutions filters not handled\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (op == PictOpOver && PICT_FORMAT_A(src_format) == 0)
@@ -1664,25 +1685,25 @@ clear:
 	if (op != PictOpSrc) {
 		DBG(("%s: unsuported op [%d] for blitting\n",
 		     __FUNCTION__, op));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_transform_is_integer_translation(src->transform, &tx, &ty)) {
 		DBG(("%s: source transform is not an integer translation\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 	x += tx;
 	y += ty;
 
-	if ((x > src->pDrawable->width ||
-	     y > src->pDrawable->height ||
-	     x + width < 0 ||
-	     y + height < 0) &&
+	if ((x >= src->pDrawable->width ||
+	     y >= src->pDrawable->height ||
+	     x + width <= 0 ||
+	     y + height <= 0) &&
 	    (!src->repeat || src->repeatType == RepeatNone)) {
 		DBG(("%s: source is outside of valid area, converting to clear\n",
 		     __FUNCTION__));
-		return prepare_blt_clear(sna, tmp);
+		goto clear;
 	}
 
 	alpha_fixup = 0;
@@ -1694,7 +1715,7 @@ clear:
 				       dst->format)))) {
 		DBG(("%s: incompatible src/dst formats src=%08x, dst=%08x\n",
 		     __FUNCTION__, (unsigned)src_format, dst->format));
-		return FALSE;
+		return false;
 	}
 
 	/* XXX tiling? fixup extend none? */
@@ -1704,7 +1725,7 @@ clear:
 		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid drawable %dx%d\n",
 		     __FUNCTION__,
 		     x, y, x+width, y+width, src->pDrawable->width, src->pDrawable->height));
-		return FALSE;
+		return false;
 	}
 
 	src_pixmap = get_drawable_pixmap(src->pDrawable);
@@ -1717,7 +1738,7 @@ clear:
 		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid pixmap %dx%d\n",
 		     __FUNCTION__,
 		     x, y, x+width, y+width, src_pixmap->drawable.width, src_pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	tmp->u.blt.src_pixmap = src_pixmap;
@@ -1773,7 +1794,7 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
 		  struct sna_fill_op *fill)
 {
 #if DEBUG_NO_BLT || NO_BLT_FILL
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s(alu=%d, pixel=%x, bpp=%d)\n", __FUNCTION__, alu, pixel, bpp));
@@ -1781,18 +1802,18 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
 	if (!kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: rejected due to incompatible Y-tiling\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (!sna_blt_fill_init(sna, &fill->base.u.blt,
 			       bo, bpp, alu, pixel))
-		return FALSE;
+		return false;
 
 	fill->blt   = sna_blt_fill_op_blt;
 	fill->box   = sna_blt_fill_op_box;
 	fill->boxes = sna_blt_fill_op_boxes;
 	fill->done  = sna_blt_fill_op_done;
-	return TRUE;
+	return true;
 }
 
 static void sna_blt_copy_op_blt(struct sna *sna,
@@ -1825,26 +1846,26 @@ bool sna_blt_copy(struct sna *sna, uint8_t alu,
 		  struct sna_copy_op *op)
 {
 #if DEBUG_NO_BLT || NO_BLT_COPY
-	return FALSE;
+	return false;
 #endif
 
 	if (!kgem_bo_can_blt(&sna->kgem, src))
-		return FALSE;
+		return false;
 
 	if (!kgem_bo_can_blt(&sna->kgem, dst))
-		return FALSE;
+		return false;
 
 	if (!sna_blt_copy_init(sna, &op->base.u.blt,
 			       src, dst,
 			       bpp, alu))
-		return FALSE;
+		return false;
 
 	op->blt  = sna_blt_copy_op_blt;
 	if (sna->kgem.gen >= 60)
 		op->done = gen6_blt_copy_op_done;
 	else
 		op->done = sna_blt_copy_op_done;
-	return TRUE;
+	return true;
 }
 
 static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
@@ -1945,7 +1966,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	return true;
 }
 
-Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *bo, int bpp,
 			uint32_t pixel,
 			const BoxRec *box, int nbox)
@@ -1954,7 +1975,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	uint32_t br13, cmd;
 
 #if DEBUG_NO_BLT || NO_BLT_FILL_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s (%d, %08x, %d) x %d\n",
@@ -1962,7 +1983,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 
 	if (!kgem_bo_can_blt(kgem, bo)) {
 		DBG(("%s: fallback -- dst uses Y-tiling\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (alu == GXclear)
@@ -1975,7 +1996,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	if (nbox == 1 && sna_blt_fill_box(sna, alu, bo, bpp, pixel, box))
-		return TRUE;
+		return true;
 
 	br13 = bo->pitch;
 	cmd = XY_SCANLINE_BLT;
@@ -2090,10 +2111,10 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 		}
 	} while (nbox);
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			int bpp, const BoxRec *box, int nbox)
@@ -2102,7 +2123,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	unsigned src_pitch, br13, cmd;
 
 #if DEBUG_NO_BLT || NO_BLT_COPY_BOXES
-	return FALSE;
+	return false;
 #endif
 
 	DBG(("%s src=(%d, %d) -> (%d, %d) x %d, tiling=(%d, %d), pitch=(%d, %d)\n",
@@ -2115,7 +2136,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		     __FUNCTION__,
 		     kgem_bo_can_blt(kgem, src_bo),
 		     kgem_bo_can_blt(kgem, dst_bo)));
-		return FALSE;
+		return false;
 	}
 
 	cmd = XY_SRC_COPY_BLT_CMD;
@@ -2227,7 +2248,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		kgem->nbatch += 3;
 	}
 
-	return TRUE;
+	return true;
 }
 
 static void box_extents(const BoxRec *box, int n, BoxRec *extents)
@@ -2247,20 +2268,20 @@ static void box_extents(const BoxRec *box, int n, BoxRec *extents)
 	}
 }
 
-Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				 PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 				 PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 				 const BoxRec *box, int nbox)
 {
 	struct kgem_bo *free_bo = NULL;
-	Bool ret;
+	bool ret;
 
 	DBG(("%s: alu=%d, n=%d\n", __FUNCTION__, alu, nbox));
 
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable)) {
 		DBG(("%s: mismatching depths %d -> %d\n",
 		     __FUNCTION__, src->drawable.depth, dst->drawable.depth));
-		return FALSE;
+		return false;
 	}
 
 	if (src_bo == dst_bo) {
@@ -2289,7 +2310,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				if (free_bo == NULL) {
 					DBG(("%s: fallback -- temp allocation failed\n",
 					     __FUNCTION__));
-					return FALSE;
+					return false;
 				}
 
 				if (!sna_blt_copy_boxes(sna, GXcopy,
@@ -2300,7 +2321,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 					DBG(("%s: fallback -- temp copy failed\n",
 					     __FUNCTION__));
 					kgem_bo_destroy(&sna->kgem, free_bo);
-					return FALSE;
+					return false;
 				}
 
 				src_dx = -extents.x1;
@@ -2318,7 +2339,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 			if (src_bo == NULL) {
 				DBG(("%s: fallback -- src y-tiling conversion failed\n",
 				     __FUNCTION__));
-				return FALSE;
+				return false;
 			}
 		}
 
@@ -2330,7 +2351,7 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 			if (dst_bo == NULL) {
 				DBG(("%s: fallback -- dst y-tiling conversion failed\n",
 				     __FUNCTION__));
-				return FALSE;
+				return false;
 			}
 		}
 	}
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 2fcc0d9..60179c4 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -41,7 +41,7 @@
 
 #define BOUND(v)	(INT16) ((v) < MINSHORT ? MINSHORT : (v) > MAXSHORT ? MAXSHORT : (v))
 
-Bool sna_composite_create(struct sna *sna)
+bool sna_composite_create(struct sna *sna)
 {
 	xRenderColor color ={ 0 };
 	int error;
@@ -101,7 +101,7 @@ clip_to_dst(pixman_region16_t *region,
 			return FALSE;
 		}
 
-		return TRUE;
+		return true;
 	} else if (region_is_empty(clip)) {
 		return FALSE;
 	} else {
@@ -116,13 +116,13 @@ clip_to_dst(pixman_region16_t *region,
 	}
 }
 
-static inline Bool
+static inline bool
 clip_to_src(RegionPtr region, PicturePtr p, int dx, int	 dy)
 {
-	Bool result;
+	bool result;
 
 	if (p->clientClipType == CT_NONE)
-		return TRUE;
+		return true;
 
 	pixman_region_translate(p->clientClip,
 				p->clipOrigin.x + dx,
@@ -137,7 +137,7 @@ clip_to_src(RegionPtr region, PicturePtr p, int dx, int	 dy)
 	return result && !region_is_empty(region);
 }
 
-Bool
+bool
 sna_compute_composite_region(RegionPtr region,
 			     PicturePtr src, PicturePtr mask, PicturePtr dst,
 			     INT16 src_x,  INT16 src_y,
@@ -302,7 +302,7 @@ trim_source_extents(BoxPtr extents, const PicturePtr p, int dx, int dy)
 	     extents->x2, extents->y2));
 }
 
-Bool
+bool
 sna_compute_composite_extents(BoxPtr extents,
 			      PicturePtr src, PicturePtr mask, PicturePtr dst,
 			      INT16 src_x,  INT16 src_y,
@@ -604,7 +604,7 @@ static int16_t bound(int16_t a, uint16_t b)
 	return v;
 }
 
-static Bool
+static bool
 _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 				       unsigned int num_rects,
 				       xRectangle *rects,
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 4bd4b9b..7d78372 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1310,23 +1310,23 @@ bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage,
 						(BoxPtr)box) == PIXMAN_REGION_IN;
 }
 
-static Bool __sna_damage_intersect(struct sna_damage *damage,
+static bool __sna_damage_intersect(struct sna_damage *damage,
 				   RegionPtr region, RegionPtr result)
 {
 	assert(damage && damage->mode != DAMAGE_ALL);
 	if (region->extents.x2 <= damage->extents.x1 ||
 	    region->extents.x1 >= damage->extents.x2)
-		return FALSE;
+		return false;
 
 	if (region->extents.y2 <= damage->extents.y1 ||
 	    region->extents.y1 >= damage->extents.y2)
-		return FALSE;
+		return false;
 
 	if (damage->dirty)
 		__sna_damage_reduce(damage);
 
 	if (!pixman_region_not_empty(&damage->region))
-		return FALSE;
+		return false;
 
 	RegionNull(result);
 	RegionIntersect(result, &damage->region, region);
@@ -1335,12 +1335,12 @@ static Bool __sna_damage_intersect(struct sna_damage *damage,
 }
 
 #if HAS_DEBUG_FULL
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			   RegionPtr region, RegionPtr result)
 {
 	char damage_buf[1000];
 	char region_buf[120];
-	Bool ret;
+	bool ret;
 
 	ErrorF("%s(%s, %s)...\n", __FUNCTION__,
 	       _debug_describe_damage(damage_buf, sizeof(damage_buf), damage),
@@ -1356,7 +1356,7 @@ Bool _sna_damage_intersect(struct sna_damage *damage,
 	return ret;
 }
 #else
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			  RegionPtr region, RegionPtr result)
 {
 	return __sna_damage_intersect(damage, region, result);
@@ -1563,16 +1563,16 @@ static bool st_check_equal(struct sna_damage_selftest *test,
 	if (d_num != r_num) {
 		ErrorF("%s: damage and ref contain different number of rectangles\n",
 		       __FUNCTION__);
-		return FALSE;
+		return false;
 	}
 
 	if (memcmp(d_boxes, r_boxes, d_num*sizeof(BoxRec))) {
 		ErrorF("%s: damage and ref contain different rectangles\n",
 		       __FUNCTION__);
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
 void sna_damage_selftest(void)
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 21db3e3..5e800b7 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -178,10 +178,10 @@ static inline void sna_damage_subtract_boxes(struct sna_damage **damage,
 	assert(*damage == NULL || (*damage)->mode != DAMAGE_ALL);
 }
 
-Bool _sna_damage_intersect(struct sna_damage *damage,
+bool _sna_damage_intersect(struct sna_damage *damage,
 			  RegionPtr region, RegionPtr result);
 
-static inline Bool sna_damage_intersect(struct sna_damage *damage,
+static inline bool sna_damage_intersect(struct sna_damage *damage,
 					RegionPtr region, RegionPtr result)
 {
 	assert(damage);
@@ -197,13 +197,13 @@ sna_damage_overlaps_box(const struct sna_damage *damage,
 {
 	if (box->x2 <= damage->extents.x1 ||
 	    box->x1 >= damage->extents.x2)
-		return FALSE;
+		return false;
 
 	if (box->y2 <= damage->extents.y1 ||
 	    box->y1 >= damage->extents.y2)
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 int _sna_damage_contains_box(struct sna_damage *damage,
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9140caf..0928f6a 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -82,7 +82,7 @@ struct sna_output {
 	int num_props;
 	struct sna_property *props;
 
-	Bool has_panel_limits;
+	bool has_panel_limits;
 	int panel_hdisplay;
 	int panel_vdisplay;
 
@@ -506,7 +506,7 @@ sna_crtc_force_outputs_on(xf86CrtcPtr crtc)
 	}
 }
 
-static Bool
+static bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
@@ -515,7 +515,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	struct drm_mode_crtc arg;
 	uint32_t output_ids[16];
 	int output_count = 0;
-	int i, ret = FALSE;
+	int i;
 
 	DBG(("%s\n", __FUNCTION__));
 	kgem_bo_submit(&sna->kgem, sna_crtc->bo);
@@ -560,12 +560,11 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     sna_crtc->shadow ? " [shadow]" : "",
 	     output_count));
 
-	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
-	if (ret)
-		return FALSE;
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg))
+		return false;
 
 	sna_crtc_force_outputs_on(crtc);
-	return TRUE;
+	return true;
 }
 
 static bool sna_mode_enable_shadow(struct sna *sna)
@@ -1357,7 +1356,7 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	     __FUNCTION__, num, sna_crtc->id, sna_crtc->pipe));
 }
 
-static Bool
+static bool
 is_panel(int type)
 {
 	return (type == DRM_MODE_CONNECTOR_LVDS ||
@@ -1561,7 +1560,7 @@ sna_output_get_modes(xf86OutputPtr output)
 	 * the fullscreen experience.
 	 * If it is incorrect, please fix me.
 	 */
-	sna_output->has_panel_limits = FALSE;
+	sna_output->has_panel_limits = false;
 	if (is_panel(koutput->connector_type)) {
 		for (i = 0; i < koutput->count_modes; i++) {
 			drmModeModeInfo *mode_ptr;
@@ -1684,22 +1683,22 @@ sna_output_dpms_status(xf86OutputPtr output)
 	return sna_output->dpms_mode;
 }
 
-static Bool
+static bool
 sna_property_ignore(drmModePropertyPtr prop)
 {
 	if (!prop)
-		return TRUE;
+		return true;
 
 	/* ignore blob prop */
 	if (prop->flags & DRM_MODE_PROP_BLOB)
-		return TRUE;
+		return true;
 
 	/* ignore standard property */
 	if (!strcmp(prop->name, "EDID") ||
 	    !strcmp(prop->name, "DPMS"))
-		return TRUE;
+		return true;
 
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2015,7 +2014,7 @@ sna_zaphod_match(const char *s, const char *output)
 		s++;
 	} while (i < sizeof(t));
 
-	return FALSE;
+	return false;
 }
 
 static void
@@ -2347,7 +2346,7 @@ static void set_size_range(struct sna *sna)
 	xf86CrtcSetSizeRange(sna->scrn, 320, 200, INT16_MAX, INT16_MAX);
 }
 
-Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
+bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 {
 	struct sna_mode *mode = &sna->mode;
 	int i;
@@ -2361,7 +2360,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 	if (!mode->kmode) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "failed to get resources: %s\n", strerror(errno));
-		return FALSE;
+		return false;
 	}
 
 	set_size_range(sna);
@@ -2374,7 +2373,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 
 	xf86InitialConfiguration(scrn, TRUE);
 
-	return TRUE;
+	return true;
 }
 
 void
@@ -2576,7 +2575,7 @@ sna_wait_for_scanline(struct sna *sna,
 		      xf86CrtcPtr crtc,
 		      const BoxRec *clip)
 {
-	Bool full_height;
+	bool full_height;
 	int y1, y2, pipe;
 
 	assert(crtc);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 405a7cd..6afeb51 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -864,7 +864,7 @@ sna_dri_frame_event_info_free(struct sna *sna,
 	free(info);
 }
 
-static Bool
+static bool
 sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 {
 	struct kgem_bo *bo = get_private(info->back)->bo;
@@ -873,7 +873,7 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
-		return FALSE;
+		return false;
 
 	info->old_front.name = info->front->name;
 	info->old_front.bo = get_private(info->front)->bo;
@@ -882,10 +882,10 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	info->front->name = info->back->name;
 	get_private(info->front)->bo = bo;
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 can_flip(struct sna * sna,
 	 DrawablePtr draw,
 	 DRI2BufferPtr front,
@@ -895,22 +895,22 @@ can_flip(struct sna * sna,
 	PixmapPtr pixmap;
 
 	if (draw->type == DRAWABLE_PIXMAP)
-		return FALSE;
+		return false;
 
 	if (!sna->scrn->vtSema) {
 		DBG(("%s: no, not attached to VT\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (sna->flags & SNA_NO_FLIP) {
 		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
 		     __FUNCTION__, front->format, back->format));
-		return FALSE;
+		return false;
 	}
 
 	if (front->attachment != DRI2BufferFrontLeft) {
@@ -918,19 +918,19 @@ can_flip(struct sna * sna,
 		     __FUNCTION__,
 		     front->attachment,
 		     DRI2BufferFrontLeft));
-		return FALSE;
+		return false;
 	}
 
 	if (sna->mode.shadow_active) {
 		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	pixmap = get_drawable_pixmap(draw);
 	if (pixmap != sna->front) {
 		DBG(("%s: no, window is not on the front buffer\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	DBG(("%s: window size: %dx%d, clip=(%d, %d), (%d, %d)\n",
@@ -949,7 +949,7 @@ can_flip(struct sna * sna,
 		     draw->pScreen->root->winSize.extents.y1,
 		     draw->pScreen->root->winSize.extents.x2,
 		     draw->pScreen->root->winSize.extents.y2));
-		return FALSE;
+		return false;
 	}
 
 	if (draw->x != 0 || draw->y != 0 ||
@@ -964,7 +964,7 @@ can_flip(struct sna * sna,
 		     draw->width, draw->height,
 		     pixmap->drawable.width,
 		     pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
 	/* prevent an implicit tiling mode change */
@@ -973,13 +973,13 @@ can_flip(struct sna * sna,
 		     __FUNCTION__,
 		     get_private(front)->bo->tiling,
 		     get_private(back)->bo->tiling));
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 can_exchange(struct sna * sna,
 	     DrawablePtr draw,
 	     DRI2BufferPtr front,
@@ -989,19 +989,19 @@ can_exchange(struct sna * sna,
 	PixmapPtr pixmap;
 
 	if (draw->type == DRAWABLE_PIXMAP)
-		return TRUE;
+		return true;
 
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
 		     __FUNCTION__, front->format, back->format));
-		return FALSE;
+		return false;
 	}
 
 	pixmap = get_window_pixmap(win);
 	if (pixmap == sna->front) {
 		DBG(("%s: no, window is attached to the front buffer\n",
 		     __FUNCTION__));
-		return FALSE;
+		return false;
 	}
 
 	if (pixmap->drawable.width != win->drawable.width ||
@@ -1012,10 +1012,10 @@ can_exchange(struct sna * sna,
 		     win->drawable.height,
 		     pixmap->drawable.width,
 		     pixmap->drawable.height));
-		return FALSE;
+		return false;
 	}
 
-	return TRUE;
+	return true;
 }
 
 inline static uint32_t pipe_select(int pipe)
@@ -1234,7 +1234,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
-		return FALSE;
+		return false;
 
 	set_bo(sna->front, bo);
 
@@ -1251,7 +1251,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	sna->dri.flip_pending = info;
 
-	return TRUE;
+	return true;
 }
 
 static void sna_dri_flip_event(struct sna *sna,
@@ -1404,7 +1404,7 @@ sna_dri_page_flip_handler(struct sna *sna,
 	sna_dri_flip_event(sna, info);
 }
 
-static int
+static bool
 sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		      DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
 		      CARD64 remainder, DRI2SwapEventPtr func, void *data)
@@ -1425,7 +1425,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1)
-		return FALSE;
+		return false;
 
 	/* Truncate to match kernel interfaces; means occasional overflow
 	 * misses, but that's generally not a big deal */
@@ -1441,7 +1441,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			if (info->draw == draw) {
 				DBG(("%s: chaining flip\n", __FUNCTION__));
 				info->next_front.name = 1;
-				return TRUE;
+				return true;
 			} else {
 				/* We need to first wait (one vblank) for the
 				 * async flips to complete before this client
@@ -1455,7 +1455,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (info == NULL)
-			return FALSE;
+			return false;
 
 		info->type = type;
 
@@ -1474,7 +1474,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (!sna_dri_page_flip(sna, info)) {
 			DBG(("%s: failed to queue page flip\n", __FUNCTION__));
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		if (type != DRI2_FLIP) {
@@ -1497,7 +1497,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	} else {
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (info == NULL)
-			return FALSE;
+			return false;
 
 		info->draw = draw;
 		info->client = client;
@@ -1517,7 +1517,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence = 0;
 		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		current_msc = vbl.reply.sequence;
@@ -1573,13 +1573,13 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(sna, draw, info);
-			return FALSE;
+			return false;
 		}
 
 		info->frame = *target_msc;
 	}
 
-	return TRUE;
+	return true;
 }
 
 static void
@@ -2121,7 +2121,7 @@ out_complete:
 }
 #endif
 
-Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
+bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 {
 	DRI2InfoRec info;
 	int major = 1, minor = 0;
@@ -2134,7 +2134,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable DRI2 whilst the GPU is wedged\n");
-		return FALSE;
+		return false;
 	}
 
 	if (xf86LoaderCheckSymbol("DRI2Version"))
@@ -2143,7 +2143,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	if (minor < 1) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "DRI2 requires DRI2 module version 1.1.0 or later\n");
-		return FALSE;
+		return false;
 	}
 
 	sna->deviceName = drmGetDeviceNameFromFd(sna->kgem.fd);
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index d9f6293..a364c11 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -85,12 +85,12 @@ sna_gradient_sample_width(PictGradient *gradient)
 	return min(width, 1024);
 }
 
-static Bool
+static bool
 _gradient_color_stops_equal(PictGradient *pattern,
 			    struct sna_gradient_cache *cache)
 {
     if (cache->nstops != pattern->nstops)
-	    return FALSE;
+	    return false;
 
     return memcmp(cache->stops,
 		  pattern->stops,
@@ -323,7 +323,7 @@ done:
 	return kgem_bo_reference(cache->bo[i]);
 }
 
-static Bool sna_alpha_cache_init(struct sna *sna)
+static bool sna_alpha_cache_init(struct sna *sna)
 {
 	struct sna_alpha_cache *cache = &sna->render.alpha_cache;
 	uint32_t color[256];
@@ -333,7 +333,7 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(color), 0);
 	if (!cache->cache_bo)
-		return FALSE;
+		return false;
 
 	for (i = 0; i < 256; i++) {
 		color[i] = i << 24;
@@ -342,14 +342,14 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 						 sizeof(uint32_t)*i,
 						 sizeof(uint32_t));
 		if (cache->bo[i] == NULL)
-			return FALSE;
+			return false;
 
 		cache->bo[i]->pitch = 4;
 	}
 	return kgem_bo_write(&sna->kgem, cache->cache_bo, color, sizeof(color));
 }
 
-static Bool sna_solid_cache_init(struct sna *sna)
+static bool sna_solid_cache_init(struct sna *sna)
 {
 	struct sna_solid_cache *cache = &sna->render.solid_cache;
 
@@ -358,7 +358,7 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	cache->cache_bo =
 		kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
 	if (!cache->cache_bo)
-		return FALSE;
+		return false;
 
 	/*
 	 * Initialise [0] with white since it is very common and filling the
@@ -368,27 +368,27 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 0, sizeof(uint32_t));
 	if (cache->bo[0] == NULL)
-		return FALSE;
+		return false;
 
 	cache->bo[0]->pitch = 4;
 	cache->dirty = 1;
 	cache->size = 1;
 	cache->last = 0;
 
-	return TRUE;
+	return true;
 }
 
-Bool sna_gradients_create(struct sna *sna)
+bool sna_gradients_create(struct sna *sna)
 {
 	DBG(("%s\n", __FUNCTION__));
 
 	if (!sna_alpha_cache_init(sna))
-		return FALSE;
+		return false;
 
 	if (!sna_solid_cache_init(sna))
-		return FALSE;
+		return false;
 
-	return TRUE;
+	return true;
 }
 
 void sna_gradients_close(struct sna *sna)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 2baee4c..f1df84a 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -41,7 +41,7 @@
 
 /* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
 
-static Bool
+static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 24922b3..546148d 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -70,7 +70,7 @@ sna_render_format_for_depth(int depth)
 	}
 }
 
-static Bool
+static bool
 no_render_composite(struct sna *sna,
 		    uint8_t op,
 		    PicturePtr src,
@@ -98,7 +98,7 @@ no_render_composite(struct sna *sna,
 	(void)mask_y;
 }
 
-static Bool
+static bool
 no_render_copy_boxes(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		     PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -116,7 +116,7 @@ no_render_copy_boxes(struct sna *sna, uint8_t alu,
 				  box, n);
 }
 
-static Bool
+static bool
 no_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr src, struct kgem_bo *src_bo,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
@@ -133,7 +133,7 @@ no_render_copy(struct sna *sna, uint8_t alu,
 	return FALSE;
 }
 
-static Bool
+static bool
 no_render_fill_boxes(struct sna *sna,
 		     CARD8 op,
 		     PictFormat format,
@@ -176,7 +176,7 @@ no_render_fill_boxes(struct sna *sna,
 				  pixel, box, n);
 }
 
-static Bool
+static bool
 no_render_fill(struct sna *sna, uint8_t alu,
 	       PixmapPtr dst, struct kgem_bo *dst_bo,
 	       uint32_t color,
@@ -189,7 +189,7 @@ no_render_fill(struct sna *sna, uint8_t alu,
 			    tmp);
 }
 
-static Bool
+static bool
 no_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		   uint32_t color,
 		   int16_t x1, int16_t y1, int16_t x2, int16_t y2,
@@ -209,7 +209,7 @@ no_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 				  color, &box, 1);
 }
 
-static Bool
+static bool
 no_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 {
 	DBG(("%s: pixmap=%ld %dx%d\n", __FUNCTION__,
@@ -1717,7 +1717,7 @@ sna_render_picture_convert(struct sna *sna,
 	return 1;
 }
 
-Bool
+bool
 sna_render_composite_redirect(struct sna *sna,
 			      struct sna_composite_op *op,
 			      int x, int y, int width, int height)
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index fae5872..b003e7b 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -192,7 +192,7 @@ struct sna_render {
 	int max_3d_size;
 	int max_3d_pitch;
 
-	Bool (*composite)(struct sna *sna, uint8_t op,
+	bool (*composite)(struct sna *sna, uint8_t op,
 			  PicturePtr dst, PicturePtr src, PicturePtr mask,
 			  int16_t src_x, int16_t src_y,
 			  int16_t msk_x, int16_t msk_y,
@@ -200,7 +200,7 @@ struct sna_render {
 			  int16_t w, int16_t h,
 			  struct sna_composite_op *tmp);
 
-	Bool (*composite_spans)(struct sna *sna, uint8_t op,
+	bool (*composite_spans)(struct sna *sna, uint8_t op,
 				PicturePtr dst, PicturePtr src,
 				int16_t src_x, int16_t src_y,
 				int16_t dst_x, int16_t dst_y,
@@ -210,7 +210,7 @@ struct sna_render {
 #define COMPOSITE_SPANS_RECTILINEAR 0x1
 #define COMPOSITE_SPANS_INPLACE_HINT 0x2
 
-	Bool (*video)(struct sna *sna,
+	bool (*video)(struct sna *sna,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
 		      RegionPtr dstRegion,
@@ -218,29 +218,29 @@ struct sna_render {
 		      short drw_w, short drw_h,
 		      PixmapPtr pixmap);
 
-	Bool (*fill_boxes)(struct sna *sna,
+	bool (*fill_boxes)(struct sna *sna,
 			   CARD8 op,
 			   PictFormat format,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
-	Bool (*fill)(struct sna *sna, uint8_t alu,
+	bool (*fill)(struct sna *sna, uint8_t alu,
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
 		     uint32_t color,
 		     struct sna_fill_op *tmp);
-	Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
+	bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo,
 			 uint32_t color,
 			 int16_t x1, int16_t y1, int16_t x2, int16_t y2,
 			 uint8_t alu);
-	Bool (*clear)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo);
+	bool (*clear)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo);
 
-	Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
+	bool (*copy_boxes)(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			   const BoxRec *box, int n, unsigned flags);
 #define COPY_LAST 0x1
 
-	Bool (*copy)(struct sna *sna, uint8_t alu,
+	bool (*copy)(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo,
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
 		     struct sna_copy_op *op);
@@ -295,7 +295,7 @@ struct sna_render {
 
 struct gen2_render_state {
 	uint32_t target;
-	Bool need_invariant;
+	bool need_invariant;
 	uint32_t logic_op_enabled;
 	uint32_t ls1, ls2, vft;
 	uint32_t diffuse;
@@ -305,7 +305,7 @@ struct gen2_render_state {
 
 struct gen3_render_state {
 	uint32_t current_dst;
-	Bool need_invariant;
+	bool need_invariant;
 	uint32_t tex_count;
 	uint32_t last_drawrect_limit;
 	uint32_t last_target;
@@ -344,8 +344,8 @@ struct gen4_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool needs_urb;
+	bool needs_invariant;
+	bool needs_urb;
 };
 
 struct gen5_render_state {
@@ -366,7 +366,7 @@ struct gen5_render_state {
 	uint16_t surface_table;
 	uint16_t last_pipelined_pointers;
 
-	Bool needs_invariant;
+	bool needs_invariant;
 };
 
 enum {
@@ -413,8 +413,8 @@ struct gen6_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool first_state_packet;
+	bool needs_invariant;
+	bool first_state_packet;
 };
 
 enum {
@@ -462,8 +462,8 @@ struct gen7_render_state {
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
 
-	Bool needs_invariant;
-	Bool emit_flush;
+	bool needs_invariant;
+	bool emit_flush;
 };
 
 struct sna_static_stream {
@@ -494,24 +494,24 @@ sna_render_get_gradient(struct sna *sna,
 
 uint32_t sna_rgba_for_color(uint32_t color, int depth);
 uint32_t sna_rgba_to_color(uint32_t rgba, uint32_t format);
-Bool sna_get_rgba_from_pixel(uint32_t pixel,
+bool sna_get_rgba_from_pixel(uint32_t pixel,
 			     uint16_t *red,
 			     uint16_t *green,
 			     uint16_t *blue,
 			     uint16_t *alpha,
 			     uint32_t format);
-Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
+bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
 
 void no_render_init(struct sna *sna);
 
-Bool gen2_render_init(struct sna *sna);
-Bool gen3_render_init(struct sna *sna);
-Bool gen4_render_init(struct sna *sna);
-Bool gen5_render_init(struct sna *sna);
-Bool gen6_render_init(struct sna *sna);
-Bool gen7_render_init(struct sna *sna);
+bool gen2_render_init(struct sna *sna);
+bool gen3_render_init(struct sna *sna);
+bool gen4_render_init(struct sna *sna);
+bool gen5_render_init(struct sna *sna);
+bool gen6_render_init(struct sna *sna);
+bool gen7_render_init(struct sna *sna);
 
-Bool sna_tiling_composite(uint32_t op,
+bool sna_tiling_composite(uint32_t op,
 			  PicturePtr src,
 			  PicturePtr mask,
 			  PicturePtr dst,
@@ -520,7 +520,7 @@ Bool sna_tiling_composite(uint32_t op,
 			  int16_t dst_x, int16_t dst_y,
 			  int16_t width, int16_t height,
 			  struct sna_composite_op *tmp);
-Bool sna_tiling_composite_spans(uint32_t op,
+bool sna_tiling_composite_spans(uint32_t op,
 				PicturePtr src,
 				PicturePtr dst,
 				int16_t src_x,  int16_t src_y,
@@ -528,24 +528,24 @@ Bool sna_tiling_composite_spans(uint32_t op,
 				int16_t width,  int16_t height,
 				unsigned flags,
 				struct sna_composite_spans_op *tmp);
-Bool sna_tiling_fill_boxes(struct sna *sna,
+bool sna_tiling_fill_boxes(struct sna *sna,
 			   CARD8 op,
 			   PictFormat format,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
 
-Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			   const BoxRec *box, int n);
 
-Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			       int bpp, const BoxRec *box, int nbox);
 
-Bool sna_blt_composite(struct sna *sna,
+bool sna_blt_composite(struct sna *sna,
 		       uint32_t op,
 		       PicturePtr src,
 		       PicturePtr dst,
@@ -566,30 +566,30 @@ bool sna_blt_copy(struct sna *sna, uint8_t alu,
 		  int bpp,
 		  struct sna_copy_op *copy);
 
-Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *bo,
 			int bpp,
 			uint32_t pixel,
 			const BoxRec *box, int n);
 
-Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			int bpp,
 			const BoxRec *box, int n);
-Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
+bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				 PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 				 PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 				 const BoxRec *box, int nbox);
 
-Bool _sna_get_pixel_from_rgba(uint32_t *pixel,
+bool _sna_get_pixel_from_rgba(uint32_t *pixel,
 			     uint16_t red,
 			     uint16_t green,
 			     uint16_t blue,
 			     uint16_t alpha,
 			     uint32_t format);
 
-static inline Bool
+static inline bool
 sna_get_pixel_from_rgba(uint32_t * pixel,
 			uint16_t red,
 			uint16_t green,
@@ -671,7 +671,7 @@ inline static void sna_render_composite_redirect_init(struct sna_composite_op *o
 	t->damage = NULL;
 }
 
-Bool
+bool
 sna_render_composite_redirect(struct sna *sna,
 			      struct sna_composite_op *op,
 			      int x, int y, int width, int height);
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 2210127..32eb54e 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -67,7 +67,7 @@ static inline void batch_emit_float(struct sna *sna, float f)
 	batch_emit(sna, u.dw);
 }
 
-static inline Bool
+static inline bool
 is_gpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -81,7 +81,7 @@ is_gpu(DrawablePtr drawable)
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
 }
 
-static inline Bool
+static inline bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -98,7 +98,7 @@ is_cpu(DrawablePtr drawable)
 	return true;
 }
 
-static inline Bool
+static inline bool
 is_dirty(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
@@ -119,36 +119,36 @@ too_small(struct sna_pixmap *priv)
 	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
 }
 
-static inline Bool
+static inline bool
 unattached(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
 	return priv == NULL || (priv->gpu_damage == NULL && priv->cpu_damage);
 }
 
-static inline Bool
+static inline bool
 picture_is_gpu(PicturePtr picture)
 {
 	if (!picture || !picture->pDrawable)
-		return FALSE;
+		return false;
 	return is_gpu(picture->pDrawable);
 }
 
-static inline Bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
+static inline bool sna_blt_compare_depth(DrawablePtr src, DrawablePtr dst)
 {
 	if (src->depth == dst->depth)
-		return TRUE;
+		return true;
 
 	/* Also allow for the alpha to be discarded on a copy */
 	if (src->bitsPerPixel != dst->bitsPerPixel)
-		return FALSE;
+		return false;
 
 	if (dst->depth == 24 && src->depth == 32)
-		return TRUE;
+		return true;
 
 	/* Note that a depth-16 pixmap is r5g6b5, not x1r5g5b5. */
 
-	return FALSE;
+	return false;
 }
 
 static inline struct kgem_bo *
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index fdc297a..e048361 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -265,7 +265,7 @@ done:
 	free(tile);
 }
 
-Bool
+bool
 sna_tiling_composite(uint32_t op,
 		     PicturePtr src,
 		     PicturePtr mask,
@@ -285,11 +285,11 @@ sna_tiling_composite(uint32_t op,
 
 	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
 	if (priv == NULL || priv->gpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	tile = malloc(sizeof(*tile));
 	if (!tile)
-		return FALSE;
+		return false;
 
 	tile->op = op;
 
@@ -315,7 +315,7 @@ sna_tiling_composite(uint32_t op,
 	tmp->done  = sna_tiling_composite_done;
 
 	tmp->priv = tile;
-	return TRUE;
+	return true;
 }
 
 fastcall static void
@@ -522,7 +522,7 @@ done:
 	free(tile);
 }
 
-Bool
+bool
 sna_tiling_composite_spans(uint32_t op,
 			   PicturePtr src,
 			   PicturePtr dst,
@@ -541,11 +541,11 @@ sna_tiling_composite_spans(uint32_t op,
 
 	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
 	if (priv == NULL || priv->gpu_bo == NULL)
-		return FALSE;
+		return false;
 
 	tile = malloc(sizeof(*tile));
 	if (!tile)
-		return FALSE;
+		return false;
 
 	tile->op = op;
 	tile->flags = flags;
@@ -571,10 +571,10 @@ sna_tiling_composite_spans(uint32_t op,
 	tmp->done  = sna_tiling_composite_spans_done;
 
 	tmp->base.priv = tile;
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 sna_tiling_fill_boxes(struct sna *sna,
 		      CARD8 op,
 		      PictFormat format,
@@ -585,7 +585,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
 	int step;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	pixman_region_init_rects(&region, box, n);
 
@@ -674,7 +674,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 	goto done;
 err:
 	kgem_bo_destroy(&sna->kgem, bo);
@@ -684,7 +684,7 @@ done:
 	return ret;
 }
 
-Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 			       int bpp, const BoxRec *box, int nbox)
@@ -692,7 +692,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
 	int step;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
 	    !kgem_bo_can_blt(&sna->kgem, dst_bo)) {
@@ -701,7 +701,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		     __FUNCTION__,
 		     kgem_bo_can_blt(&sna->kgem, src_bo),
 		     kgem_bo_can_blt(&sna->kgem, dst_bo)));
-		return FALSE;
+		return false;
 	}
 
 	pixman_region_init_rects(&region, box, nbox);
@@ -773,7 +773,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 	goto done;
 err:
 	kgem_bo_destroy(&sna->kgem, bo);
@@ -783,7 +783,7 @@ done:
 	return ret;
 }
 
-static Bool
+static bool
 box_intersect(BoxPtr a, const BoxRec *b)
 {
 	if (a->x1 < b->x1)
@@ -798,7 +798,7 @@ box_intersect(BoxPtr a, const BoxRec *b)
 	return a->x1 < a->x2 && a->y1 < a->y2;
 }
 
-Bool
+bool
 sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 		      PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		      PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
@@ -807,7 +807,7 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 	BoxRec extents, tile, stack[64], *clipped, *c;
 	PixmapRec p;
 	int i, step, tiling;
-	Bool ret = FALSE;
+	bool ret = false;
 
 	extents = box[0];
 	for (i = 1; i < n; i++) {
@@ -905,7 +905,7 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 		}
 	}
 
-	ret = TRUE;
+	ret = true;
 tiled_error:
 	if (clipped != stack)
 		free(clipped);
diff --git a/src/sna/sna_transform.c b/src/sna/sna_transform.c
index 54852b1..55cc1ad 100644
--- a/src/sna/sna_transform.c
+++ b/src/sna/sna_transform.c
@@ -38,22 +38,22 @@
  *
  * transform may be null.
  */
-Bool sna_transform_is_affine(const PictTransform *t)
+bool sna_transform_is_affine(const PictTransform *t)
 {
 	if (t == NULL)
-		return TRUE;
+		return true;
 
 	return t->matrix[2][0] == 0 && t->matrix[2][1] == 0;
 }
 
-Bool
+bool
 sna_transform_is_translation(const PictTransform *t,
 			     pixman_fixed_t *tx,
 			     pixman_fixed_t *ty)
 {
 	if (t == NULL) {
 		*tx = *ty = 0;
-		return TRUE;
+		return true;
 	}
 
 	if (t->matrix[0][0] != IntToxFixed(1) ||
@@ -63,19 +63,19 @@ sna_transform_is_translation(const PictTransform *t,
 	    t->matrix[2][0] != 0 ||
 	    t->matrix[2][1] != 0 ||
 	    t->matrix[2][2] != IntToxFixed(1))
-		return FALSE;
+		return false;
 
 	*tx = t->matrix[0][2];
 	*ty = t->matrix[1][2];
-	return TRUE;
+	return true;
 }
 
-Bool
+bool
 sna_transform_is_integer_translation(const PictTransform *t, int16_t *tx, int16_t *ty)
 {
 	if (t == NULL) {
 		*tx = *ty = 0;
-		return TRUE;
+		return true;
 	}
 
 	if (t->matrix[0][0] != IntToxFixed(1) ||
@@ -85,15 +85,15 @@ sna_transform_is_integer_translation(const PictTransform *t, int16_t *tx, int16_
 	    t->matrix[2][0] != 0 ||
 	    t->matrix[2][1] != 0 ||
 	    t->matrix[2][2] != IntToxFixed(1))
-		return FALSE;
+		return false;
 
 	if (pixman_fixed_fraction(t->matrix[0][2]) ||
 	    pixman_fixed_fraction(t->matrix[1][2]))
-		return FALSE;
+		return false;
 
 	*tx = pixman_fixed_to_int(t->matrix[0][2]);
 	*ty = pixman_fixed_to_int(t->matrix[1][2]);
-	return TRUE;
+	return true;
 }
 
 /**
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2341cb3..1553f58 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2221,9 +2221,9 @@ static int operator_is_bounded(uint8_t op)
 	case PictOpOver:
 	case PictOpOutReverse:
 	case PictOpAdd:
-		return TRUE;
+		return true;
 	default:
-		return FALSE;
+		return false;
 	}
 }
 
@@ -2366,7 +2366,7 @@ trapezoids_inplace_fallback(CARD8 op,
 
 	image = NULL;
 	if (sna_drawable_move_to_cpu(dst->pDrawable, MOVE_READ | MOVE_WRITE))
-		image = image_from_pict(dst, FALSE, &dx, &dy);
+		image = image_from_pict(dst, false, &dx, &dy);
 	if (image) {
 		dx += dst->pDrawable->x;
 		dy += dst->pDrawable->y;
@@ -2529,7 +2529,7 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 }
 
-static Bool
+static bool
 composite_aligned_boxes(struct sna *sna,
 			CARD8 op,
 			PicturePtr src,
@@ -2542,7 +2542,7 @@ composite_aligned_boxes(struct sna *sna,
 	BoxRec stack_boxes[64], *boxes;
 	pixman_region16_t region, clip;
 	struct sna_composite_op tmp;
-	Bool ret = true;
+	bool ret = true;
 	int dx, dy, n, num_boxes;
 
 	if (NO_ALIGNED_BOXES)
@@ -3306,7 +3306,7 @@ pixman:
 			continue;
 		}
 
-		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
+		pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
 		pi.source = pixman_image_create_bits(PIXMAN_a8r8g8b8, 1, 1, NULL, 0);
 		pixman_image_set_repeat(pi.source, PIXMAN_REPEAT_NORMAL);
 		pi.bits = pixman_image_get_data(pi.source);
@@ -3467,8 +3467,8 @@ composite_unaligned_boxes_inplace(CARD8 op,
 			}
 		}
 
-		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
-		pi.source = image_from_pict(src, FALSE, &pi.sx, &pi.sy);
+		pi.image = image_from_pict(dst, false, &pi.dx, &pi.dy);
+		pi.source = image_from_pict(src, false, &pi.sx, &pi.sy);
 		pi.sx += src_x;
 		pi.sy += src_y;
 		pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
@@ -4743,10 +4743,10 @@ unbounded_pass:
 
 		op = 0;
 	} else {
-		inplace.composite.dst = image_from_pict(dst, FALSE,
+		inplace.composite.dst = image_from_pict(dst, false,
 							&inplace.composite.dx,
 							&inplace.composite.dy);
-		inplace.composite.src = image_from_pict(src, FALSE,
+		inplace.composite.src = image_from_pict(src, false,
 							&inplace.composite.sx,
 							&inplace.composite.sy);
 		inplace.composite.sx +=
@@ -5879,7 +5879,7 @@ sna_add_traps(PicturePtr picture, INT16 x, INT16 y, int n, xTrap *t)
 		pixman_image_t *image;
 		int dx, dy;
 
-		if (!(image = image_from_pict(picture, FALSE, &dx, &dy)))
+		if (!(image = image_from_pict(picture, false, &dx, &dy)))
 			return;
 
 		pixman_add_traps(image, x + dx, y + dy, n, (pixman_trap_t *)t);
@@ -5910,9 +5910,9 @@ xTriangleValid(const xTriangle *t)
 
 	/* if the length of any edge is zero, the area must be zero */
 	if (v1.x == 0 && v1.y == 0)
-		return FALSE;
+		return false;
 	if (v2.x == 0 && v2.y == 0)
-		return FALSE;
+		return false;
 
 	/* if the cross-product is zero, so it the size */
 	return v2.y * v1.x != v1.y * v2.x;
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 71d1bbc..b76a3c4 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -66,11 +66,11 @@
 #define _SNA_XVMC_SERVER_
 #include "sna_video_hwmc.h"
 #else
-static inline Bool sna_video_xvmc_setup(struct sna *sna,
+static inline bool sna_video_xvmc_setup(struct sna *sna,
 					ScreenPtr ptr,
 					XF86VideoAdaptorPtr target)
 {
-	return FALSE;
+	return false;
 }
 #endif
 
@@ -123,7 +123,7 @@ void sna_video_buffer_fini(struct sna *sna,
 	video->buf = bo;
 }
 
-Bool
+bool
 sna_video_clip_helper(ScrnInfoPtr scrn,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
@@ -135,7 +135,7 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 		      short drw_w, short drw_h,
 		      RegionPtr reg)
 {
-	Bool ret;
+	bool ret;
 	RegionRec crtc_region_local;
 	RegionPtr crtc_region = reg;
 	INT32 x1, x2, y1, y2;
@@ -432,7 +432,7 @@ sna_copy_packed_data(struct sna_video *video,
 	}
 }
 
-Bool
+bool
 sna_video_copy_data(struct sna *sna,
 		    struct sna_video *video,
 		    struct sna_video_frame *frame,
@@ -465,7 +465,7 @@ sna_video_copy_data(struct sna *sna,
 								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 								       (void **)&dst);
 					if (frame->bo == NULL)
-						return FALSE;
+						return false;
 
 					memcpy(dst, buf,
 					       pitch[1]*frame->height +
@@ -477,7 +477,7 @@ sna_video_copy_data(struct sna *sna,
 					frame->VBufOffset = frame->UBufOffset;
 					frame->UBufOffset = tmp;
 				}
-				return TRUE;
+				return true;
 			}
 		} else {
 			if (frame->width*2 == frame->pitch[0]) {
@@ -490,13 +490,13 @@ sna_video_copy_data(struct sna *sna,
 								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 								       (void **)&dst);
 					if (frame->bo == NULL)
-						return FALSE;
+						return false;
 
 					memcpy(dst,
 					       buf + (frame->top * frame->width*2) + (frame->left << 1),
 					       frame->nlines*frame->width*2);
 				}
-				return TRUE;
+				return true;
 			}
 		}
 	}
@@ -505,13 +505,13 @@ sna_video_copy_data(struct sna *sna,
 	if (frame->bo) {
 		dst = kgem_bo_map__gtt(&sna->kgem, frame->bo);
 		if (dst == NULL)
-			return FALSE;
+			return false;
 	} else {
 		frame->bo = kgem_create_buffer(&sna->kgem, frame->size,
 					       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
 					       (void **)&dst);
 		if (frame->bo == NULL)
-			return FALSE;
+			return false;
 	}
 
 	if (is_planar_fourcc(frame->id))
@@ -519,7 +519,7 @@ sna_video_copy_data(struct sna *sna,
 	else
 		sna_copy_packed_data(video, frame, buf, dst);
 
-	return TRUE;
+	return true;
 }
 
 void sna_video_init(struct sna *sna, ScreenPtr screen)
@@ -528,7 +528,7 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 	XF86VideoAdaptorPtr textured, overlay;
 	int num_adaptors;
 	int prefer_overlay =
-	    xf86ReturnOptValBool(sna->Options, OPTION_PREFER_OVERLAY, FALSE);
+	    xf86ReturnOptValBool(sna->Options, OPTION_PREFER_OVERLAY, false);
 
 	if (!xf86LoaderCheckSymbol("xf86XVListGenericAdaptors"))
 		return;
diff --git a/src/sna/sna_video.h b/src/sna/sna_video.h
index 7bfc971..3ce72c0 100644
--- a/src/sna/sna_video.h
+++ b/src/sna/sna_video.h
@@ -57,7 +57,7 @@ struct sna_video {
 	struct kgem_bo *old_buf[2];
 	struct kgem_bo *buf;
 
-	Bool textured;
+	bool textured;
 	Rotation rotation;
 	int plane;
 
@@ -100,7 +100,7 @@ static inline int is_planar_fourcc(int id)
 	}
 }
 
-Bool
+bool
 sna_video_clip_helper(ScrnInfoPtr scrn,
 		      struct sna_video *adaptor_priv,
 		      struct sna_video_frame *frame,
@@ -123,7 +123,7 @@ sna_video_buffer(struct sna *sna,
 		 struct sna_video *video,
 		 struct sna_video_frame *frame);
 
-Bool
+bool
 sna_video_copy_data(struct sna *sna,
 		    struct sna_video *video,
 		    struct sna_video_frame *frame,
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 99f9ca5..068f234 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -100,7 +100,7 @@ static const XF86ImageRec Images[NUM_IMAGES] = {
 };
 
 /* kernel modesetting overlay functions */
-static Bool sna_has_overlay(struct sna *sna)
+static bool sna_has_overlay(struct sna *sna)
 {
 	struct drm_i915_getparam gp;
 	int has_overlay = 0;
@@ -113,7 +113,7 @@ static Bool sna_has_overlay(struct sna *sna)
 	return ret == 0 && has_overlay;
 }
 
-static Bool sna_video_overlay_update_attrs(struct sna *sna,
+static bool sna_video_overlay_update_attrs(struct sna *sna,
 					   struct sna_video *video)
 {
 	struct drm_intel_overlay_attrs attrs;
@@ -348,7 +348,7 @@ update_dst_box_to_crtc_coords(struct sna *sna, xf86CrtcPtr crtc, BoxPtr dstBox)
 	return;
 }
 
-static Bool
+static bool
 sna_video_overlay_show(struct sna *sna,
 		       struct sna_video *video,
 		       struct sna_video_frame *frame,
@@ -684,7 +684,7 @@ XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna,
 	adaptor->PutImage = sna_video_overlay_put_image;
 	adaptor->QueryImageAttributes = sna_video_overlay_query_video_attributes;
 
-	video->textured = FALSE;
+	video->textured = false;
 	video->color_key = sna_video_overlay_color_key(sna);
 	video->brightness = -19;	/* (255/219) * -16 */
 	video->contrast = 75;	/* 255/219 * 64 */
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index d0a4808..87c5845 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -43,7 +43,7 @@
 #define IMAGE_MAX_WIDTH		2048
 #define IMAGE_MAX_HEIGHT	2048
 
-#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
+#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, true)
 
 static Atom xvColorKey;
 
@@ -85,7 +85,7 @@ static int sna_video_sprite_set_attr(ScrnInfoPtr scrn,
 	struct sna_video *video = data;
 
 	if (attribute == xvColorKey) {
-		video->color_key_changed = TRUE;
+		video->color_key_changed = true;
 		video->color_key = value;
 		DBG(("COLORKEY = %d\n", value));
 	} else
@@ -167,7 +167,7 @@ update_dst_box_to_crtc_coords(struct sna *sna, xf86CrtcPtr crtc, BoxPtr dstBox)
 	}
 }
 
-static Bool
+static bool
 sna_video_sprite_show(struct sna *sna,
 		      struct sna_video *video,
 		      struct sna_video_frame *frame,
@@ -199,7 +199,7 @@ sna_video_sprite_show(struct sna *sna,
 			xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
 				   "failed to update color key\n");
 
-		video->color_key_changed = FALSE;
+		video->color_key_changed = false;
 	}
 #endif
 
@@ -406,9 +406,9 @@ XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna,
 	adaptor->PutImage = sna_video_sprite_put_image;
 	adaptor->QueryImageAttributes = sna_video_sprite_query_attrs;
 
-	video->textured = FALSE;
+	video->textured = false;
 	video->color_key = sna_video_sprite_color_key(sna);
-	video->color_key_changed = TRUE;
+	video->color_key_changed = true;
 	video->brightness = -19;	/* (255/219) * -16 */
 	video->contrast = 75;	/* 255/219 * 64 */
 	video->saturation = 146;	/* 128/112 * 128 */
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 805aee7..110bb00 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -39,7 +39,7 @@
 #include "sna_video_hwmc.h"
 #endif
 
-#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
+#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, true)
 
 static Atom xvBrightness, xvContrast, xvSyncToVblank;
 
@@ -197,7 +197,7 @@ sna_video_textured_best_size(ScrnInfoPtr scrn,
  * id is a fourcc code for the format of the video.
  * buf is the pointer to the source data in system memory.
  * width and height are the w/h of the source data.
- * If "sync" is TRUE, then we must be finished with *buf at the point of return
+ * If "sync" is true, then we must be finished with *buf at the point of return
  * (which we always are).
  * clip is the clipping region in screen space.
  * data is a pointer to our port private.
@@ -221,8 +221,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	BoxRec dstBox;
 	xf86CrtcPtr crtc;
-	Bool flush = false;
-	Bool ret;
+	bool flush = false;
+	bool ret;
 
 	DBG(("%s: src=(%d, %d),(%d, %d), dst=(%d, %d),(%d, %d), id=%d, sizep=%dx%d, sync?=%d\n",
 	     __FUNCTION__,
@@ -385,7 +385,7 @@ XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
 	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable XVideo whilst the GPU is wedged\n");
-		return FALSE;
+		return NULL;
 	}
 
 	adaptor = calloc(1, sizeof(XF86VideoAdaptorRec));
@@ -448,7 +448,7 @@ XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
 	for (i = 0; i < nports; i++) {
 		struct sna_video *v = &video[i];
 
-		v->textured = TRUE;
+		v->textured = true;
 		v->rotation = RR_Rotate_0;
 		v->SyncToVblank = 1;
 
commit a05c3547bba52288bae872ea672ffe2f4dab2ffa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 15:06:15 2012 +0100

    sna/gen4: Simplify comparing the pipeline-pointers against the previous
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ed85554..44504c5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1335,7 +1335,8 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 			     const struct sna_composite_op *op,
 			     int blend, int kernel)
 {
-	uint16_t offset = sna->kgem.nbatch, last;
+	uint32_t key;
+	uint16_t sp, bp;
 
 	DBG(("%s: has_mask=%d, src=(%d, %d), mask=(%d, %d),kernel=%d, blend=%d, ca=%d, format=%x\n",
 	     __FUNCTION__, op->mask.bo != NULL,
@@ -1343,28 +1344,28 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 	     op->mask.filter, op->mask.repeat,
 	     kernel, blend, op->has_component_alpha, (int)op->dst.format));
 
+	sp = SAMPLER_OFFSET(op->src.filter, op->src.repeat,
+			      op->mask.filter, op->mask.repeat,
+			      kernel);
+	bp = gen4_get_blend(blend, op->has_component_alpha, op->dst.format);
+
+	key = op->mask.bo != NULL;
+	key |= sp << 1;
+	key |= bp << 16;
+
+	if (key == sna->render_state.gen4.last_pipelined_pointers)
+		return;
+
 	OUT_BATCH(GEN4_3DSTATE_PIPELINED_POINTERS | 5);
 	OUT_BATCH(sna->render_state.gen4.vs);
 	OUT_BATCH(GEN4_GS_DISABLE); /* passthrough */
 	OUT_BATCH(GEN4_CLIP_DISABLE); /* passthrough */
 	OUT_BATCH(sna->render_state.gen4.sf[op->mask.bo != NULL]);
-	OUT_BATCH(sna->render_state.gen4.wm +
-		  SAMPLER_OFFSET(op->src.filter, op->src.repeat,
-				 op->mask.filter, op->mask.repeat,
-				 kernel));
-	OUT_BATCH(sna->render_state.gen4.cc +
-		  gen4_get_blend(blend, op->has_component_alpha, op->dst.format));
-
-	last = sna->render_state.gen4.last_pipelined_pointers;
-	if (last &&
-	    sna->kgem.batch[offset + 4] == sna->kgem.batch[last + 4] &&
-	    sna->kgem.batch[offset + 5] == sna->kgem.batch[last + 5] &&
-	    sna->kgem.batch[offset + 6] == sna->kgem.batch[last + 6]) {
-		sna->kgem.nbatch = offset;
-	} else {
-		sna->render_state.gen4.last_pipelined_pointers = offset;
-		gen4_emit_urb(sna);
-	}
+	OUT_BATCH(sna->render_state.gen4.wm + sp);
+	OUT_BATCH(sna->render_state.gen4.cc + bp);
+
+	sna->render_state.gen4.last_pipelined_pointers = key;
+	gen4_emit_urb(sna);
 }
 
 static void
@@ -3240,7 +3241,7 @@ static void gen4_render_reset(struct sna *sna)
 	sna->render_state.gen4.vb_id = 0;
 	sna->render_state.gen4.ve_id = -1;
 	sna->render_state.gen4.last_primitive = -1;
-	sna->render_state.gen4.last_pipelined_pointers = 0;
+	sna->render_state.gen4.last_pipelined_pointers = -1;
 
 	sna->render_state.gen4.drawrect_offset = -1;
 	sna->render_state.gen4.drawrect_limit = -1;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 0eb7e90..fae5872 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -338,11 +338,11 @@ struct gen4_render_state {
 	uint32_t drawrect_offset;
 	uint32_t drawrect_limit;
 	uint32_t vb_id;
+	uint32_t last_pipelined_pointers;
 	uint16_t vertex_offset;
 	uint16_t last_primitive;
 	int16_t floats_per_vertex;
 	uint16_t surface_table;
-	uint16_t last_pipelined_pointers;
 
 	Bool needs_invariant;
 	Bool needs_urb;
commit ea9ec18505645dfec85ab96bbbbbc1793830c737
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 11:39:01 2012 +0100

    sna: Split CloseScreen into two phases
    
    In order to get the ordering correct we need to free the xf86_cursors
    before calling the miPointerCloseScreen. This requires us to insert a
    hook at the top of the CloseScreen chain. However we still require the
    final CloseScreen hook in order to do the fundamental clean up, hence
    split the CloseScreen callback into two phases.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 01e256a..6920343 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -235,6 +235,7 @@ struct sna {
 
 	ScreenBlockHandlerProcPtr BlockHandler;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
+	CloseScreenProcPtr CloseScreen;
 
 	PicturePtr clear;
 	struct {
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 0eefa35..3871ab6 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -704,6 +704,8 @@ sna_uevent_fini(ScrnInfoPtr scrn)
 		sna->uevent_monitor = NULL;
 	}
 }
+#else
+static void sna_uevent_fini(ScrnInfoPtr scrn) { }
 #endif /* HAVE_UDEV */
 
 static void sna_leave_vt(VT_FUNC_ARGS_DECL)
@@ -735,49 +737,57 @@ static Bool sna_mode_has_pending_events(struct sna *sna)
 	return poll(&pfd, 1, 0) == 1;
 }
 
-static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
+static Bool sna_early_close_screen(CLOSE_SCREEN_ARGS_DECL)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
-	DepthPtr depths;
-	int d;
 
 	DBG(("%s\n", __FUNCTION__));
 
-#if HAVE_UDEV
 	sna_uevent_fini(scrn);
-#endif
 
 	/* drain the event queues */
 	if (sna_mode_has_pending_events(sna))
 		sna_mode_wakeup(sna);
 
-	if (scrn->vtSema == TRUE)
+	if (scrn->vtSema == TRUE) {
 		sna_leave_vt(VT_FUNC_ARGS(0));
+		scrn->vtSema = FALSE;
+	}
 
-	sna_accel_close(sna);
+	if (sna->dri_open) {
+		sna_dri_close(sna, screen);
+		sna->dri_open = false;
+	}
 
 	xf86_cursors_fini(screen);
 
-	depths = screen->allowedDepths;
-	for (d = 0; d < screen->numDepths; d++)
-		free(depths[d].vids);
-	free(depths);
+	return TRUE;
+}
 
-	free(screen->visuals);
+static Bool sna_late_close_screen(CLOSE_SCREEN_ARGS_DECL)
+{
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
+	struct sna *sna = to_sna(scrn);
+	DepthPtr depths;
+	int d;
 
-	if (sna->dri_open) {
-		sna_dri_close(sna, screen);
-		sna->dri_open = false;
-	}
+	DBG(("%s\n", __FUNCTION__));
 
 	if (sna->front) {
 		screen->DestroyPixmap(sna->front);
 		sna->front = NULL;
 	}
-	xf86GARTCloseScreen(scrn->scrnIndex);
 
-	scrn->vtSema = FALSE;
+	sna_accel_close(sna);
+
+	depths = screen->allowedDepths;
+	for (d = 0; d < screen->numDepths; d++)
+		free(depths[d].vids);
+	free(depths);
+
+	free(screen->visuals);
+
 	return TRUE;
 }
 
@@ -878,7 +888,7 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	}
 
 	assert(screen->CloseScreen == NULL);
-	screen->CloseScreen = sna_close_screen;
+	screen->CloseScreen = sna_late_close_screen;
 	if (!sna_accel_init(screen, sna)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "Hardware acceleration initialization failed\n");
@@ -893,8 +903,7 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	if (!miDCInitialize(screen, xf86GetPointerScreenFuncs()))
 		return FALSE;
 
-	xf86DrvMsg(scrn->scrnIndex, X_INFO, "Initializing HW Cursor\n");
-	if (!xf86_cursors_init(screen, SNA_CURSOR_X, SNA_CURSOR_Y,
+	if (xf86_cursors_init(screen, SNA_CURSOR_X, SNA_CURSOR_Y,
 			       HARDWARE_CURSOR_TRUECOLOR_AT_8BPP |
 			       HARDWARE_CURSOR_BIT_ORDER_MSBFIRST |
 			       HARDWARE_CURSOR_INVERT_MASK |
@@ -902,10 +911,8 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 			       HARDWARE_CURSOR_AND_SOURCE_WITH_MASK |
 			       HARDWARE_CURSOR_SOURCE_MASK_INTERLEAVE_64 |
 			       HARDWARE_CURSOR_UPDATE_UNHIDDEN |
-			       HARDWARE_CURSOR_ARGB)) {
-		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "Hardware cursor initialization failed\n");
-	}
+			       HARDWARE_CURSOR_ARGB))
+		xf86DrvMsg(scrn->scrnIndex, X_INFO, "HW Cursor enabled\n");
 
 	/* Must force it before EnterVT, so we are in control of VT and
 	 * later memory should be bound when allocating, e.g rotate_mem */
@@ -920,6 +927,9 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	screen->SaveScreen = xf86SaveScreen;
 	screen->CreateScreenResources = sna_create_screen_resources;
 
+	sna->CloseScreen = screen->CloseScreen;
+	screen->CloseScreen = sna_early_close_screen;
+
 	if (!xf86CrtcScreenInit(screen))
 		return FALSE;
 
commit 0f03f7b4359fcbcde651bc1554ddff4fe10bc53b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 13 10:35:27 2012 +0100

    sna: Only try to enable DRI2 if the module is available at runtime
    
    Blatantly cribbed from Michel Danzer's patch for ati...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index abb010c..01e256a 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -252,7 +252,8 @@ struct sna {
 	} render_state;
 	uint32_t have_render;
 
-	Bool directRenderingOpen;
+	bool dri_available;
+	bool dri_open;
 	char *deviceName;
 
 	/* Broken-out options. */
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 1433971..0eefa35 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -549,8 +549,7 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	/* Set display resolution */
 	xf86SetDpi(scrn, 0, 0);
 
-	/* Load the dri2 module if requested. */
-	xf86LoadSubModule(scrn, "dri2");
+	sna->dri_available = !!xf86LoadSubModule(scrn, "dri2");
 
 	return TRUE;
 }
@@ -767,9 +766,9 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 
 	free(screen->visuals);
 
-	if (sna->directRenderingOpen) {
+	if (sna->dri_open) {
 		sna_dri_close(sna, screen);
-		sna->directRenderingOpen = FALSE;
+		sna->dri_open = false;
 	}
 
 	if (sna->front) {
@@ -941,8 +940,9 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	xf86DPMSInit(screen, xf86DPMSSet, 0);
 
 	sna_video_init(sna, screen);
-	sna->directRenderingOpen = sna_dri_open(sna, screen);
-	if (sna->directRenderingOpen)
+	if (sna->dri_available)
+		sna->dri_open = sna_dri_open(sna, screen);
+	if (sna->dri_open)
 		xf86DrvMsg(scrn->scrnIndex, X_INFO,
 			   "direct rendering: DRI2 Enabled\n");
 
commit b5d6a57f12025aef9850c7d9baa6905f776be971
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 22:16:27 2012 +0100

    Enable compilation of SNA by default
    
    But only if we meet the required versions of Xorg and leave UXA as the
    default AccelMethod for the time being.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 128a30c..d323da7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -120,13 +120,17 @@ required_pixman_version=0.24
 
 AC_ARG_ENABLE(sna,
 	      AS_HELP_STRING([--enable-sna],
-			     [Enable SandyBridge's New Acceleration (SNA) [default=no]]),
+			     [Enable SandyBridge's New Acceleration (SNA) [default=auto]]),
 	      [SNA="$enableval"],
-	      [SNA=no])
+	      [SNA=auto])
+
+AC_CHECK_HEADERS([sys/sysinfo.h], , SNA=no)
+if test "x$SNA" = "xauto" && pkg-config --exists "xorg-server >= 1.10"; then
+	SNA=yes
+fi
 if test "x$SNA" != "xno"; then
 	required_xorg_xserver_version=1.10
 	AC_DEFINE(USE_SNA, 1, [Enable SNA support])
-	AC_CHECK_HEADERS([sys/sysinfo.h])
 fi
 AC_MSG_CHECKING([whether to include SNA support])
 AM_CONDITIONAL(SNA, test x$SNA != xno)
commit 6c2975ab2943478b3a246b5fb231f9f3df2d8475
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 22:14:39 2012 +0100

    Fix the reporting of whether SNA is configured
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 55c1251..128a30c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -115,30 +115,31 @@ AC_ARG_ENABLE(kms-only, AS_HELP_STRING([--enable-kms-only],
               [KMS_ONLY="$enableval"],
               [KMS_ONLY=no])
 
-AC_MSG_CHECKING([whether to include SNA support])
+required_xorg_xserver_version=1.6
+required_pixman_version=0.24
+
 AC_ARG_ENABLE(sna,
 	      AS_HELP_STRING([--enable-sna],
 			     [Enable SandyBridge's New Acceleration (SNA) [default=no]]),
 	      [SNA="$enableval"],
 	      [SNA=no])
-AM_CONDITIONAL(SNA, test x$SNA != xno)
-required_xorg_xserver_version=1.6
-required_pixman_version=0.24
 if test "x$SNA" != "xno"; then
 	required_xorg_xserver_version=1.10
 	AC_DEFINE(USE_SNA, 1, [Enable SNA support])
 	AC_CHECK_HEADERS([sys/sysinfo.h])
 fi
+AC_MSG_CHECKING([whether to include SNA support])
+AM_CONDITIONAL(SNA, test x$SNA != xno)
 AC_MSG_RESULT([$SNA])
 
-AC_MSG_CHECKING([whether to include UXA support])
 AC_ARG_ENABLE(uxa,
 	      AS_HELP_STRING([--enable-uxa],
 			     [Enable Unified Acceleration Architecture (UXA) [default=yes]]),
 	      [UXA="$enableval"],
 	      [UXA=yes])
-AC_MSG_RESULT([$UXA])
+AC_MSG_CHECKING([whether to include UXA support])
 AM_CONDITIONAL(UXA, test x$UXA != xno)
+AC_MSG_RESULT([$UXA])
 if test "x$UXA" != "xno"; then
 	AC_DEFINE(USE_UXA, 1, [Enable UXA support])
 	PKG_CHECK_MODULES(DRMINTEL, [libdrm_intel >= 2.4.29])
commit fd15ce65ab8ce5cf571e37daa7db1ee245616cd4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 20:32:14 2012 +0100

    sna: Fix build without DRI2
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 323f283..55c1251 100644
--- a/configure.ac
+++ b/configure.ac
@@ -168,7 +168,7 @@ AC_ARG_ENABLE(xaa,
 	      [XAA=auto])
 if test "x$XAA" != xno; then
         save_CFLAGS=$CFLAGS
-        CFLAGS=$XSERVER_CFLAGS
+        CFLAGS=$XORG_CFLAGS
 	AC_CHECK_HEADERS([xaa.h], XAA=yes, XAA=no)
         CFLAGS=$save_CFLAGS
 fi
@@ -183,8 +183,8 @@ AC_ARG_ENABLE(dga,
 	      [DGA=auto])
 if test "x$DGA" != xno; then
         save_CFLAGS=$CFLAGS
-        CFLAGS=$XSERVER_CFLAGS
-	AC_CHECK_HEADERS([dgaproc.h], DGA=yes, DGA=no)
+        CFLAGS=$XORG_CFLAGS
+	AC_CHECK_HEADERS([dgaproc.h], DGA=yes, DGA=no, [#include <dixstruct.h>])
         CFLAGS=$save_CFLAGS
 fi
 AC_MSG_CHECKING([whether to include DGA support])
@@ -324,10 +324,16 @@ else
                 AC_MSG_ERROR([DRI requested but prerequisites not found])
         fi
 fi
-AM_CONDITIONAL(DRI2, test x$DRI2 != xno)
+
 if test "x$DRI2" != "xno"; then
-        AC_DEFINE(USE_DRI2,1,[Enable DRI2 driver support])
+	save_CFLAGS=$CFLAGS
+	CFLAGS="$XORG_CFLAGS $DRM_CFLAGS $DRI_CFLAGS $DRI2_CFLAGS"
+	AC_CHECK_HEADERS([dri2.h], DRI2=yes, DRI2=no, [#include <dixstruct.h>])
+	CFLAGS=$save_CFLAGS
 fi
+AC_MSG_CHECKING([whether to include DRI2 support])
+AM_CONDITIONAL(DRI2, test "x$DRI2" = xyes)
+AC_MSG_RESULT([$DRI2])
 
 if test "$XVMC" = yes; then
 	PKG_CHECK_MODULES(XVMCLIB,
diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 604a5db..8463a80 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -27,7 +27,6 @@ AM_CFLAGS = \
 	@XORG_CFLAGS@ \
 	@UDEV_CFLAGS@ \
 	@DRM_CFLAGS@ \
-	@DRI_CFLAGS@ \
 	$(NULL)
 
 if DEBUG
@@ -83,12 +82,9 @@ libsna_la_SOURCES = \
 	$(NULL)
 
 if DRI2
-libsna_la_SOURCES += \
-	sna_dri.c \
-	$(NULL)
-libsna_la_LIBADD += \
-	$(DRI_LIBS) \
-	$(NULL)
+AM_CFLAGS += @DRI_CFLAGS@
+libsna_la_SOURCES += sna_dri.c
+libsna_la_LIBADD += $(DRI_LIBS)
 endif
 
 if XVMC
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d4f6dec..abb010c 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -58,11 +58,13 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xf86drmMode.h>
 
 #include "../compat-api.h"
-#define _XF86DRI_SERVER_
 #include <drm.h>
-#include <dri2.h>
 #include <i915_drm.h>
 
+#ifdef HAVE_DRI2_H
+#include <dri2.h>
+#endif
+
 #if HAVE_UDEV
 #include <libudev.h>
 #endif
@@ -338,11 +340,19 @@ extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
 
+#if HAVE_DRI2_H
 Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_destroy_window(WindowPtr win);
 void sna_dri_close(struct sna *sna, ScreenPtr pScreen);
+#else
+static inline bool sna_dri_open(struct sna *sna, ScreenPtr pScreen) { return false; }
+static inline void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event) { }
+static inline void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event) { }
+static inline void sna_dri_destroy_window(WindowPtr win) { }
+static inline void sna_dri_close(struct sna *sna, ScreenPtr pScreen) { }
+#endif
 
 extern bool sna_crtc_on(xf86CrtcPtr crtc);
 int sna_crtc_to_pipe(xf86CrtcPtr crtc);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index eee9527..1433971 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -941,12 +941,10 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	xf86DPMSInit(screen, xf86DPMSSet, 0);
 
 	sna_video_init(sna, screen);
-#if USE_DRI2
 	sna->directRenderingOpen = sna_dri_open(sna, screen);
 	if (sna->directRenderingOpen)
 		xf86DrvMsg(scrn->scrnIndex, X_INFO,
 			   "direct rendering: DRI2 Enabled\n");
-#endif
 
 	if (serverGeneration == 1)
 		xf86ShowUnusedOptions(scrn->scrnIndex, scrn->options);
commit 22be9988b933f33fc5247a9abc3b00a7f2e4a202
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 19:49:34 2012 +0100

    sna: Check for failure to initialize the sprite pointers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 0bb5c40..eee9527 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -891,7 +891,8 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	miInitializeBackingStore(screen);
 	xf86SetBackingStore(screen);
 	xf86SetSilkenMouse(screen);
-	miDCInitialize(screen, xf86GetPointerScreenFuncs());
+	if (!miDCInitialize(screen, xf86GetPointerScreenFuncs()))
+		return FALSE;
 
 	xf86DrvMsg(scrn->scrnIndex, X_INFO, "Initializing HW Cursor\n");
 	if (!xf86_cursors_init(screen, SNA_CURSOR_X, SNA_CURSOR_Y,
commit 32e7f4ee64867779b2def6fcd882708d7b0e2cf5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 19:39:10 2012 +0100

    sna/glyphs: Fix array allocation for list_extents
    
    Originally I intended to skip assigning the box on the last list.
    However, loop simplicity failed and now we run the risk of writing
    beyond the end of stack_extents, and overwriting the list_extents
    pointer.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index cd29b07..f8959e1 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1049,8 +1049,8 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 	BoxRec stack_extents[64], *list_extents = stack_extents;
 	int i, j;
 
-	if (nlist > ARRAY_SIZE(stack_extents) + 1) {
-		list_extents = malloc(sizeof(BoxRec) * (nlist-1));
+	if (nlist > ARRAY_SIZE(stack_extents)) {
+		list_extents = malloc(sizeof(BoxRec) * nlist);
 		if (list_extents == NULL)
 			return NULL;
 	}
commit 0477b5fb6f040f3bad86bb314a24df1bcd660aed
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 19:37:02 2012 +0100

    sna/glyphs: Apply mask reduction along fallback paths as well
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index aba5fef..cd29b07 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -108,6 +108,19 @@ static inline struct sna_glyph *sna_glyph(GlyphPtr glyph)
 
 #define NeedsComponent(f) (PICT_FORMAT_A(f) != 0 && PICT_FORMAT_RGB(f) != 0)
 
+static bool op_is_bounded(uint8_t op)
+{
+	switch (op) {
+	case PictOpOver:
+	case PictOpOutReverse:
+	case PictOpAdd:
+	case PictOpXor:
+		return true;
+	default:
+		return false;
+	}
+}
+
 void sna_glyphs_close(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
@@ -1208,6 +1221,11 @@ glyphs_fallback(CARD8 op,
 	src_x += dx - list->xOff;
 	src_y += dy - list->yOff;
 
+	if (mask_format &&
+	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
+	    mask_format == glyphs_format(nlist, list, glyphs))
+		mask_format = NULL;
+
 	if (mask_format) {
 		DBG(("%s: create mask (%d, %d)x(%d,%d) + (%d,%d) + (%d,%d), depth=%d, format=%lx [%lx], ca? %d\n",
 		     __FUNCTION__,
@@ -1258,8 +1276,7 @@ glyphs_fallback(CARD8 op,
 				if (picture == NULL)
 					goto next_glyph;
 
-				glyph_image = image_from_pict(picture,
-							      FALSE,
+				glyph_image = image_from_pict(picture, FALSE,
 							      &gx, &gy);
 				if (!glyph_image)
 					goto next_glyph;
@@ -1360,19 +1377,6 @@ cleanup_region:
 	RegionUninit(&region);
 }
 
-static bool op_is_bounded(uint8_t op)
-{
-	switch (op) {
-	case PictOpOver:
-	case PictOpOutReverse:
-	case PictOpAdd:
-	case PictOpXor:
-		return true;
-	default:
-		return false;
-	}
-}
-
 void
 sna_glyphs(CARD8 op,
 	   PicturePtr src,
commit 16aaa51b5d326f44974489f5b29716c7ff5ab48e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 15:26:54 2012 +0100

    sna: Rearrange the tests for dropping the glyph mask
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 88e1c9e..aba5fef 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1417,9 +1417,7 @@ sna_glyphs(CARD8 op,
 		goto fallback;
 	}
 
-	if (!mask ||
-	    (((nlist == 1 && list->len == 1) || op == PictOpAdd) &&
-	     dst->format == (mask->depth << 24 | mask->format))) {
+	if (mask == NULL) {
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
 				  src_x, src_y,
@@ -1428,9 +1426,8 @@ sna_glyphs(CARD8 op,
 	}
 
 	/* Try to discard the mask for non-overlapping glyphs */
-	if (mask &&
-	    op_is_bounded(op) &&
-	    dst->pCompositeClip->data == NULL &&
+	if (mask && dst->pCompositeClip->data == NULL &&
+	    (op_is_bounded(op) || (nlist == 1 && list->len == 1)) &&
 	    mask == glyphs_format(nlist, list, glyphs)) {
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
commit 600746f923b046187a66b60b5e5d2b187475730e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 12 14:16:31 2012 +0100

    i810: Remove vestigial reference to xaa.h
    
    i810_hwmc.c can pull it in via i810.h like everybody else. As for
    xaalocal.h, I have no idea what that is... Both appear to be cut'n'paste
    includes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_hwmc.c b/src/legacy/i810/i810_hwmc.c
index c7100e0..4d3bd1a 100644
--- a/src/legacy/i810/i810_hwmc.c
+++ b/src/legacy/i810/i810_hwmc.c
@@ -53,8 +53,6 @@ THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xf86xvmc.h"
 #include <X11/extensions/Xv.h>
 #include <X11/extensions/XvMC.h>
-#include "xaa.h"
-#include "xaalocal.h"
 #include "dixstruct.h"
 #include "fourcc.h"
 
commit 1d9ab2e7101167075112a472ee82530dc0365183
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Thu Jul 12 18:47:50 2012 +0800

    uxa/dri: Fix a buffer leak on pageflipping when enabling glamor.
    
    We need to put current front_buffer to back buffer thus we
    don't need to create a new back buffer next time. This behaviou
    should be the same with or without glamor. Previous code
    incorrectly discard the previous front_buffer and cause a
    big buffer leak problem.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index ed5078e..0405937 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1023,9 +1023,10 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 	priv = info->front->driverPrivate;
 
 	/* Exchange the current front-buffer with the fresh bo */
+
+	intel->back_buffer = intel->front_buffer;
+	drm_intel_bo_reference(intel->back_buffer);
 	if (!(intel->uxa_flags & UXA_USE_GLAMOR)) {
-		intel->back_buffer = intel->front_buffer;
-		drm_intel_bo_reference(intel->back_buffer);
 		intel_set_pixmap_bo(priv->pixmap, new_back);
 		drm_intel_bo_unreference(new_back);
 	}
commit fa89e0614563cbe0cc45e6d7e7584cb333244c27
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Thu Jul 12 18:47:51 2012 +0800

    uxa/glyphs: Bypass uxa glyphs operations if using glamor.
    
    glamor_glyphs will never fallback. We don't need to keep a
    uxa glyphs cache picture here. Thus simply bypass the
    corresponding operations.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 6bdf101..527942a 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -112,6 +112,9 @@ static void uxa_unrealize_glyph_caches(ScreenPtr pScreen)
 	uxa_screen_t *uxa_screen = uxa_get_screen(pScreen);
 	int i;
 
+	if (uxa_screen->info->flags & UXA_USE_GLAMOR)
+		return;
+
 	if (!uxa_screen->glyph_cache_initialized)
 		return;
 
@@ -211,6 +214,11 @@ bail:
 
 Bool uxa_glyphs_init(ScreenPtr pScreen)
 {
+
+	uxa_screen_t *uxa_screen = uxa_get_screen(pScreen);
+
+	if (uxa_screen->info->flags & UXA_USE_GLAMOR)
+		return TRUE;
 #if HAS_DIXREGISTERPRIVATEKEY
 	if (!dixRegisterPrivateKey(&uxa_glyph_key, PRIVATE_GLYPH, 0))
 		return FALSE;
@@ -307,8 +315,10 @@ uxa_glyph_unrealize(ScreenPtr screen,
 	struct uxa_glyph *priv;
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 
-	if (uxa_screen->info->flags & UXA_USE_GLAMOR)
+	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
 		glamor_glyph_unrealize(screen, glyph);
+		return;
+	}
 
 	/* Use Lookup in case we have not attached to this glyph. */
 	priv = dixLookupPrivate(&glyph->devPrivates, &uxa_glyph_key);
commit d25000e1f31e78aff2ab43adb12aec0aac36f56f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 11 22:37:25 2012 +0100

    i810: Tidy configure detection for XAA/DGA
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index dde23f1..323f283 100644
--- a/configure.ac
+++ b/configure.ac
@@ -161,43 +161,35 @@ fi
 
 PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto pixman-1 >= $required_pixman_version $REQUIRED_MODULES])
 
-AC_MSG_CHECKING([whether to include XAA support])
 AC_ARG_ENABLE(xaa,
 	      AS_HELP_STRING([--enable-xaa],
 			     [Enable legacy X Acceleration Architecture (XAA) [default=auto]]),
 	      [XAA="$enableval"],
 	      [XAA=auto])
-AC_MSG_RESULT([$XAA])
-AM_CONDITIONAL(XAA, test "x$XAA" != xno)
 if test "x$XAA" != xno; then
         save_CFLAGS=$CFLAGS
         CFLAGS=$XSERVER_CFLAGS
 	AC_CHECK_HEADERS([xaa.h], XAA=yes, XAA=no)
         CFLAGS=$save_CFLAGS
 fi
-if test "x$XAA" = xyes; then
-	AC_DEFINE(USE_XAA, 1, [Enable XAA support])
-fi
+AC_MSG_CHECKING([whether to include XAA support])
 AM_CONDITIONAL(XAA, test "x$XAA" = xyes)
+AC_MSG_RESULT([$XAA])
 
-AC_MSG_CHECKING([whether to include DGA support])
 AC_ARG_ENABLE(dga,
 	      AS_HELP_STRING([--enable-dga],
 			     [Enable legacy Direct Graphics Access (DGA) [default=auto]]),
 	      [DGA="$enableval"],
 	      [DGA=auto])
-AC_MSG_RESULT([$DGA])
-AM_CONDITIONAL(DGA, test "x$DGA" != xno)
 if test "x$DGA" != xno; then
         save_CFLAGS=$CFLAGS
         CFLAGS=$XSERVER_CFLAGS
 	AC_CHECK_HEADERS([dgaproc.h], DGA=yes, DGA=no)
         CFLAGS=$save_CFLAGS
 fi
-if test "x$DGA" = xyes; then
-	AC_DEFINE(USE_DGA, 1, [Enable DGA support])
-fi
+AC_MSG_CHECKING([whether to include DGA support])
 AM_CONDITIONAL(DGA, test "x$DGA" = xyes)
+AC_MSG_RESULT([$DGA])
 
 AC_ARG_WITH(default-accel,
 	    AS_HELP_STRING([--with-default-accel],
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index 512d07e..a07fb69 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -42,7 +42,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "compiler.h"
 #include "xf86Pci.h"
 #include "i810_reg.h"
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
 #include "xaa.h"
 #endif
 #include "xf86Cursor.h"
@@ -204,7 +204,7 @@ typedef struct _I810Rec {
    I810RegRec SavedReg;
    I810RegRec ModeReg;
 
-#ifdef XAA
+#ifdef HAVE_XAA_H
    XAAInfoRecPtr AccelInfoRec;
 #endif
    xf86CursorInfoPtr CursorInfoRec;
diff --git a/src/legacy/i810/i810_dga.c b/src/legacy/i810/i810_dga.c
index e258360..81e1767 100644
--- a/src/legacy/i810/i810_dga.c
+++ b/src/legacy/i810/i810_dga.c
@@ -40,7 +40,7 @@ static Bool I810_SetMode(ScrnInfoPtr, DGAModePtr);
 static int I810_GetViewport(ScrnInfoPtr);
 static void I810_SetViewport(ScrnInfoPtr, int, int, int);
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
 static void I810_Sync(ScrnInfoPtr);
 static void I810_FillRect(ScrnInfoPtr, int, int, int, int, unsigned long);
 static void I810_BlitRect(ScrnInfoPtr, int, int, int, int, int, int);
@@ -58,7 +58,7 @@ DGAFunctionRec I810DGAFuncs = {
    I810_SetMode,
    I810_SetViewport,
    I810_GetViewport,
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    I810_Sync,
    I810_FillRect,
    I810_BlitRect,
@@ -193,7 +193,7 @@ I810_SetViewport(ScrnInfoPtr pScrn, int x, int y, int flags)
    pI810->DGAViewportStatus = 0;
 }
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
 static void
 I810_FillRect(ScrnInfoPtr pScrn,
 	      int x, int y, int w, int h, unsigned long color)
diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 0f891bb..f2643f5 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -1011,7 +1011,7 @@ I810DRISwapContext(ScreenPtr pScreen, DRISyncType syncType,
 static void
 I810DRISetNeedSync(ScrnInfoPtr pScrn)
 {
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    I810Ptr pI810 = I810PTR(pScrn);
    if (pI810->AccelInfoRec)
 	pI810->AccelInfoRec->NeedToSync = TRUE;
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index f4f7f3e..6fc17bd 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -1095,7 +1095,7 @@ DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
    hwp->writeCrtc(hwp, IO_CTNL, temp);
 }
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
 static void
 I810SetRingRegs(ScrnInfoPtr scrn)
 {
@@ -1693,7 +1693,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
    }
 #endif
 
-#ifdef HAVE_DGA
+#ifdef HAVE_DGAPROC_H
    I810DGAInit(screen);
 #endif
 
@@ -1703,7 +1703,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
       return FALSE;
    }
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_NOACCEL, FALSE)) {
       if (pI810->LpRing->mem.Size != 0) {
 	 I810SetRingRegs(scrn);
@@ -1943,7 +1943,7 @@ I810LeaveVT(VT_FUNC_ARGS_DECL)
    }
 #endif
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    if (pI810->AccelInfoRec != NULL) {
       I810RefreshRing(scrn);
       I810Sync(scrn);
@@ -1968,12 +1968,12 @@ I810CloseScreen(CLOSE_SCREEN_ARGS_DECL)
    ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
    vgaHWPtr hwp = VGAHWPTR(scrn);
    I810Ptr pI810 = I810PTR(scrn);
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
 #endif
 
    if (scrn->vtSema == TRUE) {
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
       if (pI810->AccelInfoRec != NULL) {
 	 I810RefreshRing(scrn);
 	 I810Sync(scrn);
@@ -2004,7 +2004,7 @@ I810CloseScreen(CLOSE_SCREEN_ARGS_DECL)
       pI810->ScanlineColorExpandBuffers = NULL;
    }
 
-#ifdef HAVE_XAA
+#ifdef HAVE_XAA_H
    if (infoPtr) {
       if (infoPtr->ScanlineColorExpandBuffers)
 	 free(infoPtr->ScanlineColorExpandBuffers);
commit 44a1528c15eec9b3f93651e779013137864d4d2b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 11 15:05:32 2012 +0100

    sna: Remove unused cached partials list
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4fd4d1f..bea7aea 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -712,7 +712,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	list_init(&kgem->batch_partials);
 	list_init(&kgem->active_partials);
-	list_init(&kgem->cached_partials);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -1387,16 +1386,11 @@ static void kgem_partial_buffer_release(struct kgem *kgem,
 
 static bool kgem_retire__partials(struct kgem *kgem)
 {
-	struct list *list[] = {
-		&kgem->active_partials,
-		&kgem->cached_partials,
-		NULL
-	}, **head = list;
 	bool retired = false;
 
-	do while (!list_is_empty(*head)) {
+	while (!list_is_empty(&kgem->active_partials)) {
 		struct kgem_partial_bo *bo =
-			list_last_entry(*head,
+			list_last_entry(&kgem->active_partials,
 					struct kgem_partial_bo,
 					base.list);
 
@@ -1409,7 +1403,7 @@ static bool kgem_retire__partials(struct kgem *kgem)
 		kgem_partial_buffer_release(kgem, bo);
 		kgem_bo_unref(kgem, &bo->base);
 		retired = true;
-	} while (*++head);
+	}
 
 	return retired;
 }
@@ -1765,12 +1759,8 @@ static void kgem_finish_partials(struct kgem *kgem)
 decouple:
 		DBG(("%s: releasing handle=%d\n",
 		     __FUNCTION__, bo->base.handle));
-		if (!list_is_empty(&bo->base.vma)) {
-			list_move(&bo->base.list, &kgem->cached_partials);
-		} else {
-			list_del(&bo->base.list);
-			kgem_bo_unref(kgem, &bo->base);
-		}
+		list_del(&bo->base.list);
+		kgem_bo_unref(kgem, &bo->base);
 	}
 }
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 44d4327..273240f 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list batch_partials, active_partials, cached_partials;
+	struct list batch_partials, active_partials;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
commit 94d489ae43a2c4d4d9ddc9ce30ff1a9142b77d4a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 11 14:25:20 2012 +0100

    sna: Minor tweak to upload in place if the CPU bo is busy
    
    Since we have to pay the price of the stall anyway...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0e9f47e..f4921b2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1466,12 +1466,12 @@ static inline bool region_inplace(struct sna *sna,
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
 	     region->extents.y2 - region->extents.y1,
-	     ((region->extents.x2 - region->extents.x1) *
-	      (region->extents.y2 - region->extents.y1) *
+	     ((int)(region->extents.x2 - region->extents.x1) *
+	      (int)(region->extents.y2 - region->extents.y1) *
 	      pixmap->drawable.bitsPerPixel >> 12)
 	     >= sna->kgem.half_cpu_cache_pages));
-	return ((region->extents.x2 - region->extents.x1) *
-		(region->extents.y2 - region->extents.y1) *
+	return ((int)(region->extents.x2 - region->extents.x1) *
+		(int)(region->extents.y2 - region->extents.y1) *
 		pixmap->drawable.bitsPerPixel >> 12)
 		>= sna->kgem.half_cpu_cache_pages;
 }
@@ -2986,24 +2986,41 @@ static bool upload_inplace(struct sna *sna,
 			   struct sna_pixmap *priv,
 			   RegionRec *region)
 {
-	if (!region_inplace(sna, pixmap, region, priv, true))
+	if (!region_inplace(sna, pixmap, region, priv, true)) {
+		DBG(("%s? no, region not suitable\n", __FUNCTION__));
 		return false;
+	}
 
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
-		if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo))
+		if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo)) {
+			DBG(("%s? no, GPU bo not mappable\n", __FUNCTION__));
 			return false;
+		}
 
-		if (!kgem_bo_is_busy(priv->gpu_bo))
+		if (!kgem_bo_is_busy(priv->gpu_bo)) {
+			DBG(("%s? yes, GPU bo is idle\n", __FUNCTION__));
 			return true;
+		}
 
 		if (!priv->pinned &&
-		    region_subsumes_drawable(region, &pixmap->drawable))
+		    region_subsumes_drawable(region, &pixmap->drawable)) {
+			DBG(("%s? yes, will replace busy GPU\n", __FUNCTION__));
 			return true;
+		}
+
 	}
 
-	return priv->gpu_bo == NULL && priv->cpu_bo == NULL;
+	if (priv->cpu_bo) {
+		if (kgem_bo_is_busy(priv->cpu_bo)) {
+			DBG(("%s? yes, CPU bo is busy\n", __FUNCTION__));
+			return true;
+		}
+	}
+
+	DBG(("%s? no\n", __FUNCTION__));
+	return false;
 }
 
 static Bool
commit b5db90aa52f10897ad2d7795df94c0e3d2878aea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 11 12:52:24 2012 +0100

    sna: Discard the mask for "non-overlapping" glyphs
    
    If we can acheive the same rasterisation results without the mask,
    rendering the glyphs-to-dst is so much faster that it outweighs the cost
    of checking for overlapping glyphs.
    
    The penalty is then for code that correctly declared that it required
    a mask, who now have an extra ~10% overhead in the processing of their
    glyphs.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 84ee13c..88e1c9e 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1087,9 +1087,15 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 				extents.y2 = y2;
 				first = FALSE;
 			} else {
-				/* Potential overlap */
-				if (x1 < extents.x2 && x2 > extents.x1 &&
-				    y1 < extents.y2 && y2 > extents.y1) {
+				/* Potential overlap?
+				 * We cheat and ignore the boundary pixels, as
+				 * the likelihood of an actual overlap of
+				 * inkedk pixels being noticeable in the
+				 * boundary is small, yet glyphs frequently
+				 * overlap on the boundaries.
+				 */
+				if (x1 < extents.x2-1 && x2 > extents.x1+1 &&
+				    y1 < extents.y2-1 && y2 > extents.y1+1) {
 					format = NULL;
 					goto out;
 				}
@@ -1112,10 +1118,10 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 		 * of the previous boxes and walk those.
 		 */
 		for (j = 0; j < i; j++) {
-			if (extents.x2 < list_extents[j].x1 &&
-			    extents.x1 > list_extents[j].x2 &&
-			    extents.y2 < list_extents[j].y1 &&
-			    extents.y1 > list_extents[j].y2) {
+			if (extents.x1 < list_extents[j].x2-1 &&
+			    extents.x2 > list_extents[j].x1+1 &&
+			    extents.y1 < list_extents[j].y2-1 &&
+			    extents.y2 > list_extents[j].y1+1) {
 				format = NULL;
 				goto out;
 			}
@@ -1354,6 +1360,19 @@ cleanup_region:
 	RegionUninit(&region);
 }
 
+static bool op_is_bounded(uint8_t op)
+{
+	switch (op) {
+	case PictOpOver:
+	case PictOpOutReverse:
+	case PictOpAdd:
+	case PictOpXor:
+		return true;
+	default:
+		return false;
+	}
+}
+
 void
 sna_glyphs(CARD8 op,
 	   PicturePtr src,
@@ -1365,7 +1384,6 @@ sna_glyphs(CARD8 op,
 	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv;
-	PictFormatPtr _mask;
 
 	DBG(("%s(op=%d, nlist=%d, src=(%d, %d))\n",
 	     __FUNCTION__, op, nlist, src_x, src_y));
@@ -1399,12 +1417,21 @@ sna_glyphs(CARD8 op,
 		goto fallback;
 	}
 
-	_mask = mask;
-	/* XXX discard the mask for non-overlapping glyphs? */
-
-	if (!_mask ||
+	if (!mask ||
 	    (((nlist == 1 && list->len == 1) || op == PictOpAdd) &&
-	     dst->format == (_mask->depth << 24 | _mask->format))) {
+	     dst->format == (mask->depth << 24 | mask->format))) {
+		if (glyphs_to_dst(sna, op,
+				  src, dst,
+				  src_x, src_y,
+				  nlist, list, glyphs))
+			return;
+	}
+
+	/* Try to discard the mask for non-overlapping glyphs */
+	if (mask &&
+	    op_is_bounded(op) &&
+	    dst->pCompositeClip->data == NULL &&
+	    mask == glyphs_format(nlist, list, glyphs)) {
 		if (glyphs_to_dst(sna, op,
 				  src, dst,
 				  src_x, src_y,
@@ -1412,11 +1439,12 @@ sna_glyphs(CARD8 op,
 			return;
 	}
 
-	if (!_mask)
-		_mask = glyphs_format(nlist, list, glyphs);
-	if (_mask) {
+	/* Otherwise see if we can substitute a mask */
+	if (!mask)
+		mask = glyphs_format(nlist, list, glyphs);
+	if (mask) {
 		if (glyphs_via_mask(sna, op,
-				    src, dst, _mask,
+				    src, dst, mask,
 				    src_x, src_y,
 				    nlist, list, glyphs))
 			return;
commit 665c9cbdcf2c493cac29d316eaffa2abe197a183
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 10 19:01:12 2012 +0100

    sna: Disable periodic scanout refresh if all outputs are disconnected
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 192d843..0e9f47e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12292,7 +12292,12 @@ sna_accel_flush_callback(CallbackListPtr *list,
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 {
-	struct sna_pixmap *priv = sna_pixmap(sna->front);
+	struct sna_pixmap *priv;
+
+	if (sna->vblank_interval == 0)
+		return NULL;
+
+	priv = sna_pixmap(sna->front);
 	return priv && priv->gpu_bo ? priv : NULL;
 }
 
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 1c808d1..9140caf 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -821,9 +821,10 @@ static void update_flush_interval(struct sna *sna)
 	}
 
 	if (max_vrefresh == 0)
-		max_vrefresh = 40;
+		sna->vblank_interval = 0;
+	else
+		sna->vblank_interval = 1000 / max_vrefresh; /* Hz -> ms */
 
-	sna->vblank_interval = 1000 / max_vrefresh; /* Hz -> ms */
 	DBG(("max_vrefresh=%d, vblank_interval=%d ms\n",
 	       max_vrefresh, sna->vblank_interval));
 }
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 484c982..0bb5c40 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -499,9 +499,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (xf86ReturnOptValBool(sna->Options, OPTION_TILING_FB, FALSE))
 		sna->tiling &= ~SNA_TILING_FB;
 
-	/* Default fail-safe value of 75 Hz */
-	sna->vblank_interval = 1000 * 1000 * 1000 / 75;
-
 	sna->flags = 0;
 	if (!xf86ReturnOptValBool(sna->Options, OPTION_THROTTLE, TRUE))
 		sna->flags |= SNA_NO_THROTTLE;
commit 85fdc3143b157fd7ba3453efc86da8238d9de316
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 10 12:43:46 2012 +0100

    sna: Avoid the expensive recomputation of the region when copying
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 8d01807..44d4327 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -433,7 +433,7 @@ static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
 		pitch /= 4;
 	if (pitch > MAXSHORT) {
 		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
-		     __FUNCTION__, pitch));
+		     __FUNCTION__, bo->handle, pitch));
 		return false;
 	}
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5a37a28..192d843 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3673,10 +3673,8 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 
 static void
 sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
-		    BoxPtr box, int n,
-		    int dx, int dy,
-		    Bool reverse, Bool upsidedown, Pixel bitplane,
-		    void *closure)
+		    RegionPtr region,int dx, int dy,
+		    Pixel bitplane, void *closure)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(src);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -3684,12 +3682,14 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	int alu = gc ? gc->alu : GXcopy;
 	int16_t tx, ty;
 
-	if (n == 0 || ((dx | dy) == 0 && alu == GXcopy))
+	assert(RegionNumRects(region));
+	if (((dx | dy) == 0 && alu == GXcopy))
 		return;
 
 	DBG(("%s (boxes=%dx[(%d, %d), (%d, %d)...], src=+(%d, %d), alu=%d, pix.size=%dx%d)\n",
-	     __FUNCTION__, n,
-	     box[0].x1, box[0].y1, box[0].x2, box[0].y2,
+	     __FUNCTION__, RegionNumRects(region),
+	     region->extents.x1, region->extents.y1,
+	     region->extents.x2, region->extents.y2,
 	     dx, dy, alu,
 	     pixmap->drawable.width, pixmap->drawable.height));
 
@@ -3713,27 +3713,32 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (!sna->render.copy_boxes(sna, alu,
 					    pixmap, priv->gpu_bo, dx, dy,
 					    pixmap, priv->gpu_bo, tx, ty,
-					    box, n, 0)) {
+					    RegionRects(region),
+					    RegionNumRects(region),
+					    0)) {
 			DBG(("%s: fallback - accelerated copy boxes failed\n",
 			     __FUNCTION__));
 			goto fallback;
 		}
 
-		if (!DAMAGE_IS_ALL(priv->gpu_damage))
-			sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty);
+		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
+			RegionTranslate(region, tx, ty);
+			sna_damage_add(&priv->gpu_damage, region);
+		}
 		assert_pixmap_damage(pixmap);
 	} else {
-		FbBits *dst_bits, *src_bits;
-		int stride, bpp;
-
 fallback:
 		DBG(("%s: fallback", __FUNCTION__));
 		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
 			return;
 
-		stride = pixmap->devKind;
-		bpp = pixmap->drawable.bitsPerPixel;
-		if (alu == GXcopy && bpp >= 8) {
+		if (alu == GXcopy && pixmap->drawable.bitsPerPixel >= 8) {
+			BoxPtr box = RegionRects(region);
+			int n = RegionNumRects(region);
+			FbBits *dst_bits, *src_bits;
+			int stride = pixmap->devKind;
+			int bpp = pixmap->drawable.bitsPerPixel;
+
 			dst_bits = (FbBits *)
 				((char *)pixmap->devPrivate.ptr +
 				 ty * stride + tx * bpp / 8);
@@ -3748,26 +3753,15 @@ fallback:
 				box++;
 			} while (--n);
 		} else {
-			DBG(("%s: alu==GXcopy? %d, reverse? %d, upsidedown? %d, bpp? %d\n",
-			     __FUNCTION__, alu == GXcopy, reverse, upsidedown, bpp));
-			dst_bits = pixmap->devPrivate.ptr;
-			stride /= sizeof(FbBits);
-			do {
-				fbBlt(dst_bits + (box->y1 + dy) * stride,
-				      stride,
-				      (box->x1 + dx) * bpp,
-
-				      dst_bits + (box->y1 + ty) * stride,
-				      stride,
-				      (box->x1 + tx) * bpp,
+			if (!sna_gc_move_to_cpu(gc, dst, region))
+				return;
 
-				      (box->x2 - box->x1) * bpp,
-				      (box->y2 - box->y1),
+			get_drawable_deltas(src, pixmap, &tx, &ty);
+			miCopyRegion(src, dst, gc,
+				     region, dx - tx, dy - ty,
+				     fbCopyNtoN, 0, NULL);
 
-				      alu, -1, bpp,
-				      reverse, upsidedown);
-				box++;
-			} while (--n);
+			sna_gc_move_to_gpu(gc);
 		}
 	}
 }
@@ -3797,10 +3791,8 @@ source_prefer_gpu(struct sna_pixmap *priv)
 
 static void
 sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
-	       BoxPtr box, int n,
-	       int dx, int dy,
-	       Bool reverse, Bool upsidedown, Pixel bitplane,
-	       void *closure)
+	       RegionPtr region, int dx, int dy,
+	       Pixel bitplane, void *closure)
 {
 	PixmapPtr src_pixmap = get_drawable_pixmap(src);
 	struct sna_pixmap *src_priv = sna_pixmap(src_pixmap);
@@ -3812,9 +3804,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	int alu = gc ? gc->alu : GXcopy;
 	int16_t src_dx, src_dy;
 	int16_t dst_dx, dst_dy;
+	BoxPtr box = RegionRects(region);
+	int n = RegionNumRects(region);
 	int stride, bpp;
 	char *bits;
-	RegionRec region;
 	Bool replaces;
 
 	if (n == 0)
@@ -3822,10 +3815,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	if (src_pixmap == dst_pixmap)
 		return sna_self_copy_boxes(src, dst, gc,
-					   box, n,
-					   dx, dy,
-					   reverse, upsidedown, bitplane,
-					   closure);
+					   region, dx, dy,
+					   bitplane, closure);
 
 	DBG(("%s (boxes=%dx[(%d, %d), (%d, %d)...], src=+(%d, %d), alu=%d, src.size=%dx%d, dst.size=%dx%d)\n",
 	     __FUNCTION__, n,
@@ -3837,8 +3828,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	assert_pixmap_damage(dst_pixmap);
 	assert_pixmap_damage(src_pixmap);
 
-	pixman_region_init_rects(&region, box, n);
-
 	bpp = dst_pixmap->drawable.bitsPerPixel;
 
 	get_drawable_deltas(dst, dst_pixmap, &dst_dx, &dst_dy);
@@ -3846,11 +3835,15 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	src_dx += dx;
 	src_dy += dy;
 
+	RegionTranslate(region, dst_dx, dst_dy);
+	src_dx -= dst_dx;
+	src_dy -= dst_dy;
+
 	replaces = n == 1 &&
-		box->x1 + dst_dx <= 0 &&
-		box->y1 + dst_dy <= 0 &&
-		box->x2 + dst_dx >= dst_pixmap->drawable.width &&
-		box->y2 + dst_dy >= dst_pixmap->drawable.height;
+		box->x1 <= 0 &&
+		box->y1 <= 0 &&
+		box->x2 >= dst_pixmap->drawable.width &&
+		box->y2 >= dst_pixmap->drawable.height;
 
 	DBG(("%s: dst=(priv=%p, gpu_bo=%p, cpu_bo=%p), src=(priv=%p, gpu_bo=%p, cpu_bo=%p), replaces=%d\n",
 	     __FUNCTION__,
@@ -3862,14 +3855,12 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     src_priv ? src_priv->cpu_bo : NULL,
 	     replaces));
 
-	RegionTranslate(&region, dst_dx, dst_dy);
-
 	if (dst_priv == NULL)
 		goto fallback;
 
 	if (dst_priv->cpu_damage && alu_overwrites(alu)) {
 		DBG(("%s: overwritting CPU damage\n", __FUNCTION__));
-		sna_damage_subtract(&dst_priv->cpu_damage, &region);
+		sna_damage_subtract(&dst_priv->cpu_damage, region);
 		if (dst_priv->cpu_damage == NULL) {
 			list_del(&dst_priv->list);
 			dst_priv->undamaged = false;
@@ -3879,15 +3870,15 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	bo = sna_drawable_use_bo(&dst_pixmap->drawable,
 				 source_prefer_gpu(src_priv) ?:
-				 region_inplace(sna, dst_pixmap, &region,
+				 region_inplace(sna, dst_pixmap, region,
 						dst_priv, alu_overwrites(alu)),
-				 &region.extents, &damage);
+				 &region->extents, &damage);
 	if (bo) {
 		if (src_priv && src_priv->clear) {
 			DBG(("%s: applying src clear[%08x] to dst\n",
 			     __FUNCTION__, src_priv->clear_color));
 			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(&region));
+						   RegionExtents(region));
 			if (n == 1) {
 				if (!sna->render.fill_one(sna,
 							  dst_pixmap, bo,
@@ -3915,19 +3906,19 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 
 			if (damage)
-				sna_damage_add(damage, &region);
+				sna_damage_add(damage, region);
 
-			goto out;
+			return;
 		}
 
 		if (src_priv &&
-		    move_to_gpu(src_pixmap, src_priv, &region.extents, alu) &&
+		    move_to_gpu(src_pixmap, src_priv, &region->extents, alu) &&
 		    sna_pixmap_move_to_gpu(src_pixmap, MOVE_READ)) {
 			DBG(("%s: move whole src_pixmap to GPU and copy\n",
 			     __FUNCTION__));
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -3936,21 +3927,21 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (damage) {
 				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(&region));
-				sna_damage_add(damage, &region);
+							   RegionExtents(region));
+				sna_damage_add(damage, region);
 			}
-			goto out;
+			return;
 		}
 
 		if (src_priv &&
-		    region_overlaps_damage(&region, src_priv->gpu_damage,
+		    region_overlaps_damage(region, src_priv->gpu_damage,
 					   src_dx, src_dy)) {
 			BoxRec area;
 
 			DBG(("%s: region overlaps GPU damage, upload and copy\n",
 			     __FUNCTION__));
 
-			area = region.extents;
+			area = region->extents;
 			area.x1 += src_dx;
 			area.x2 += src_dx;
 			area.y1 += src_dy;
@@ -3962,7 +3953,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -3971,10 +3962,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (damage) {
 				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(&region));
-				sna_damage_add(damage, &region);
+							   RegionExtents(region));
+				sna_damage_add(damage, region);
 			}
-			goto out;
+			return;
 		}
 
 		if (bo != dst_priv->gpu_bo)
@@ -3988,17 +3979,17 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			assert(bo != dst_priv->cpu_bo);
 
-			RegionTranslate(&region, src_dx-dst_dx, src_dy-dst_dy);
+			RegionTranslate(region, src_dx, src_dy);
 			ret = sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
-							      &region,
+							      region,
 							      MOVE_READ | MOVE_ASYNC_HINT);
-			RegionTranslate(&region, dst_dx-src_dx, dst_dy-src_dy);
+			RegionTranslate(region, -src_dx, -src_dy);
 			if (!ret)
 				goto fallback;
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -4007,10 +3998,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (damage) {
 				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(&region));
-				sna_damage_add(damage, &region);
+							   RegionExtents(region));
+				sna_damage_add(damage, region);
 			}
-			goto out;
+			return;
 		}
 
 		if (alu != GXcopy) {
@@ -4023,15 +4014,15 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			     __FUNCTION__, alu));
 
 			tmp = sna_pixmap_create_upload(src->pScreen,
-						       region.extents.x2 - region.extents.x1,
-						       region.extents.y2 - region.extents.y1,
+						       region->extents.x2 - region->extents.x1,
+						       region->extents.y2 - region->extents.y1,
 						       src->depth,
 						       KGEM_BUFFER_WRITE_INPLACE);
 			if (tmp == NullPixmap)
-				goto out;
+				return;
 
-			dx = -region.extents.x1;
-			dy = -region.extents.y1;
+			dx = -region->extents.x1;
+			dy = -region->extents.y1;
 			for (i = 0; i < n; i++) {
 				assert(box[i].x1 + src_dx >= 0);
 				assert(box[i].y1 + src_dy >= 0);
@@ -4058,7 +4049,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
-						    dst_pixmap, bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -4069,10 +4060,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (damage) {
 				assert_pixmap_contains_box(dst_pixmap,
-							   RegionExtents(&region));
-				sna_damage_add(damage, &region);
+							   RegionExtents(region));
+				sna_damage_add(damage, region);
 			}
-			goto out;
+			return;
 		} else {
 			DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
 			     __FUNCTION__));
@@ -4085,7 +4076,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				}
 				if (src_pixmap->devPrivate.ptr == NULL) {
 					if (!src_priv->ptr) /* uninitialised!*/
-						goto out;
+						return;
 					assert(src_priv->stride);
 					src_pixmap->devPrivate.ptr = src_priv->ptr;
 					src_pixmap->devKind = src_priv->stride;
@@ -4104,7 +4095,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			} else {
 				assert(!DAMAGE_IS_ALL(dst_priv->cpu_damage));
 				if (!sna_write_boxes(sna, dst_pixmap,
-						     dst_priv->gpu_bo, dst_dx, dst_dy,
+						     dst_priv->gpu_bo, 0, 0,
 						     src_pixmap->devPrivate.ptr,
 						     src_pixmap->devKind,
 						     src_dx, src_dy,
@@ -4123,15 +4114,15 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					dst_priv->undamaged = false;
 				} else {
 					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(&region));
+								   RegionExtents(region));
 					sna_damage_add(&dst_priv->gpu_damage,
-						       &region);
+						       region);
 				}
 				assert_pixmap_damage(dst_pixmap);
 			}
 		}
 
-		goto out;
+		return;
 	}
 
 fallback:
@@ -4141,19 +4132,19 @@ fallback:
 
 		if (dst_priv) {
 			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(&region));
+						   RegionExtents(region));
 
 			if (!sna_drawable_move_region_to_cpu(&dst_pixmap->drawable,
-							     &region,
+							     region,
 							     MOVE_WRITE | MOVE_INPLACE_HINT))
-				goto out;
+				return;
 		}
 
 		do {
 			pixman_fill(dst_pixmap->devPrivate.ptr,
 				    dst_pixmap->devKind/sizeof(uint32_t),
 				    dst_pixmap->drawable.bitsPerPixel,
-				    box->x1 + dst_dx, box->y1 + dst_dy,
+				    box->x1, box->y1,
 				    box->x2 - box->x1,
 				    box->y2 - box->y1,
 				    src_priv->clear_color);
@@ -4168,68 +4159,65 @@ fallback:
 		if (src_priv) {
 			unsigned mode;
 
-			RegionTranslate(&region, src_dx-dst_dx, src_dy-dst_dy);
+			RegionTranslate(region, src_dx, src_dy);
 
 			assert_pixmap_contains_box(src_pixmap,
-						   RegionExtents(&region));
+						   RegionExtents(region));
 
 			mode = MOVE_READ;
 			if (src_priv->cpu_bo == NULL)
 				mode |= MOVE_INPLACE_HINT;
 
 			if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
-							     &region, mode))
-				goto out;
+							     region, mode))
+				return;
 
-			RegionTranslate(&region, dst_dx-src_dx, dst_dy-src_dy);
+			RegionTranslate(region, -src_dx, -src_dy);
 		}
 
 		if (dst_priv) {
 			unsigned mode;
 
 			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(&region));
+						   RegionExtents(region));
 
 			if (alu_overwrites(alu))
 				mode = MOVE_WRITE | MOVE_INPLACE_HINT;
 			else
 				mode = MOVE_WRITE | MOVE_READ;
 			if (!sna_drawable_move_region_to_cpu(&dst_pixmap->drawable,
-							     &region, mode))
-				goto out;
+							     region, mode))
+				return;
 		}
 
 		dst_stride = dst_pixmap->devKind;
 		src_stride = src_pixmap->devKind;
 
 		if (alu == GXcopy && bpp >= 8) {
-			dst_bits = (FbBits *)
-				((char *)dst_pixmap->devPrivate.ptr +
-				 dst_dy * dst_stride + dst_dx * bpp / 8);
+			dst_bits = (FbBits *)dst_pixmap->devPrivate.ptr;
 			src_bits = (FbBits *)
 				((char *)src_pixmap->devPrivate.ptr +
 				 src_dy * src_stride + src_dx * bpp / 8);
 
 			do {
-				DBG(("%s: memcpy_blt(box=(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d), pitches=(%d, %d))\n",
+				DBG(("%s: memcpy_blt(box=(%d, %d), (%d, %d), src=(%d, %d), pitches=(%d, %d))\n",
 				     __FUNCTION__,
 				     box->x1, box->y1,
 				     box->x2 - box->x1,
 				     box->y2 - box->y1,
 				     src_dx, src_dy,
-				     dst_dx, dst_dy,
 				     src_stride, dst_stride));
 
+				assert(box->x1 >= 0);
+				assert(box->y1 >= 0);
+				assert(box->x2 <= dst_pixmap->drawable.width);
+				assert(box->y2 <= dst_pixmap->drawable.height);
+
 				assert(box->x1 + src_dx >= 0);
 				assert(box->y1 + src_dy >= 0);
 				assert(box->x2 + src_dx <= src_pixmap->drawable.width);
 				assert(box->y2 + src_dy <= src_pixmap->drawable.height);
 
-				assert(box->x1 + dst_dx >= 0);
-				assert(box->y1 + dst_dy >= 0);
-				assert(box->x2 + dst_dx <= dst_pixmap->drawable.width);
-				assert(box->y2 + dst_dy <= dst_pixmap->drawable.height);
-
 				memcpy_blt(src_bits, dst_bits, bpp,
 					   src_stride, dst_stride,
 					   box->x1, box->y1,
@@ -4239,42 +4227,144 @@ fallback:
 				box++;
 			} while (--n);
 		} else {
-			DBG(("%s: alu==GXcopy? %d, reverse? %d, upsidedown? %d, bpp? %d\n",
-			     __FUNCTION__, alu == GXcopy, reverse, upsidedown, bpp));
-			dst_bits = dst_pixmap->devPrivate.ptr;
-			src_bits = src_pixmap->devPrivate.ptr;
+			DBG(("%s: fallback -- miCopyRegion\n", __FUNCTION__));
 
-			dst_stride /= sizeof(FbBits);
-			src_stride /= sizeof(FbBits);
-			do {
-				DBG(("%s: fbBlt (%d, %d), (%d, %d)\n",
-				     __FUNCTION__,
-				     box->x1, box->y1,
-				     box->x2, box->y2));
-				assert(box->x1 + src_dx >= 0);
-				assert(box->y1 + src_dy >= 0);
-				assert(box->x1 + dst_dx >= 0);
-				assert(box->y1 + dst_dy >= 0);
-				fbBlt(src_bits + (box->y1 + src_dy) * src_stride,
-				      src_stride,
-				      (box->x1 + src_dx) * bpp,
+			RegionTranslate(region, -dst_dx, -dst_dy);
 
-				      dst_bits + (box->y1 + dst_dy) * dst_stride,
-				      dst_stride,
-				      (box->x1 + dst_dx) * bpp,
+			if (!sna_gc_move_to_cpu(gc, dst, region))
+				return;
 
-				      (box->x2 - box->x1) * bpp,
-				      (box->y2 - box->y1),
+			miCopyRegion(src, dst, gc,
+				     region, dx, dy,
+				     fbCopyNtoN, 0, NULL);
 
-				      alu, -1, bpp,
-				      reverse, upsidedown);
-				box++;
-			} while (--n);
+			sna_gc_move_to_gpu(gc);
+		}
+	}
+}
+
+typedef void (*sna_copy_func)(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+			      RegionPtr region, int dx, int dy,
+			      Pixel bitPlane, void *closure);
+
+static RegionPtr
+sna_do_copy(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+	    int sx, int sy,
+	    int width, int height,
+	    int dx, int dy,
+	    sna_copy_func copy, Pixel bitPlane, void *closure)
+{
+	RegionPtr clip = NULL, free_clip = NULL;
+	RegionRec region;
+	bool expose = false;
+
+	/* Short cut for unmapped windows */
+	if (dst->type == DRAWABLE_WINDOW && !((WindowPtr)dst)->realized)
+		return NULL;
+
+	if (src->pScreen->SourceValidate)
+		src->pScreen->SourceValidate(src, sx, sy,
+					     width, height,
+					     gc->subWindowMode);
+
+	sx += src->x;
+	sy += src->y;
+
+	dx += dst->x;
+	dy += dst->y;
+
+	region.extents.x1 = dx;
+	region.extents.y1 = dy;
+	region.extents.x2 = dx + width;
+	region.extents.y2 = dy + height;
+	region.data = NULL;
+
+	{
+		BoxPtr box = &gc->pCompositeClip->extents;
+		if (region.extents.x1 < box->x1)
+			region.extents.x1 = box->x1;
+		if (region.extents.x2 > box->x2)
+			region.extents.x2 = box->x2;
+		if (region.extents.y1 < box->y1)
+			region.extents.y1 = box->y1;
+		if (region.extents.y2 > box->y2)
+			region.extents.y2 = box->y2;
+	}
+	if (box_empty(&region.extents))
+		return NULL;
+
+	region.extents.x1 += sx - dx;
+	region.extents.x2 += sx - dx;
+	region.extents.y1 += sy - dy;
+	region.extents.y2 += sy - dy;
+
+	/* Compute source clip region */
+	if (src->type == DRAWABLE_PIXMAP) {
+		if (src == dst && gc->clientClipType == CT_NONE)
+			clip = gc->pCompositeClip;
+	} else {
+		if (gc->subWindowMode == IncludeInferiors) {
+			/*
+			 * XFree86 DDX empties the border clip when the
+			 * VT is inactive, make sure the region isn't empty
+			 */
+			if (!((WindowPtr) src)->parent &&
+			    RegionNotEmpty(&((WindowPtr) src)->borderClip)) {
+				/*
+				 * special case bitblt from root window in
+				 * IncludeInferiors mode; just like from a pixmap
+				 */
+			} else if (src == dst && gc->clientClipType == CT_NONE) {
+				clip = gc->pCompositeClip;
+			} else {
+				free_clip = clip =
+					NotClippedByChildren((WindowPtr) src);
+			}
+		} else
+			clip = &((WindowPtr)src)->clipList;
+	}
+	if (clip == NULL) {
+		expose = true;
+		if (region.extents.x1 < src->x) {
+			region.extents.x1 = src->x;
+			expose = false;
 		}
+		if (region.extents.y1 < src->y) {
+			region.extents.y1 = src->y;
+			expose = false;
+		}
+		if (region.extents.x2 > src->x + (int) src->width) {
+			region.extents.x2 = src->x + (int) src->width;
+			expose = false;
+		}
+		if (region.extents.y2 > src->y + (int) src->height) {
+			region.extents.y2 = src->y + (int) src->height;
+			expose = false;
+		}
+		if (box_empty(&region.extents))
+			return NULL;
+	} else {
+		RegionIntersect(&region, &region, clip);
+		if (free_clip)
+			RegionDestroy(free_clip);
 	}
+	RegionTranslate(&region, dx-sx, dy-sy);
+	if (gc->pCompositeClip->data)
+		RegionIntersect(&region, &region, gc->pCompositeClip);
 
-out:
+	if (RegionNotEmpty(&region))
+		copy(src, dst, gc, &region, sx-dx, sy-dy, bitPlane, closure);
 	RegionUninit(&region);
+
+	/* Pixmap sources generate a NoExposed (we return NULL to do this) */
+	clip = NULL;
+	if (!expose && gc->fExpose)
+		clip = miHandleExposures(src, dst, gc,
+					 sx - src->x, sy - src->y,
+					 width, height,
+					 dx - dst->x, dy - dst->y,
+					 (unsigned long) bitPlane);
+	return clip;
 }
 
 static RegionPtr
@@ -4367,12 +4457,12 @@ out:
 		return ret;
 	}
 
-	return miDoCopy(src, dst, gc,
-			src_x, src_y,
-			width, height,
-			dst_x, dst_y,
-			src == dst ? sna_self_copy_boxes : sna_copy_boxes,
-			0, NULL);
+	return sna_do_copy(src, dst, gc,
+			   src_x, src_y,
+			   width, height,
+			   dst_x, dst_y,
+			   src == dst ? sna_self_copy_boxes : sna_copy_boxes,
+			   0, NULL);
 }
 
 inline static Bool
@@ -5350,10 +5440,8 @@ struct sna_copy_plane {
 
 static void
 sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
-		    BoxPtr box, int n,
-		    int sx, int sy,
-		    Bool reverse, Bool upsidedown, Pixel bitplane,
-		    void *closure)
+		    RegionRec *region, int sx, int sy,
+		    Pixel bitplane, void *closure)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -5361,17 +5449,20 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 	PixmapPtr bitmap = (PixmapPtr)_bitmap;
 	uint32_t br00, br13;
 	int16_t dx, dy;
+	BoxPtr box;
+	int n;
 
-	DBG(("%s: plane=%x x%d\n", __FUNCTION__, (unsigned)bitplane, n));
+	DBG(("%s: plane=%x (%d,%d),(%d,%d)x%d\n",
+	     __FUNCTION__, (unsigned)bitplane, RegionNumRects(region),
+	     region->extents.x1, region->extents.y1,
+	     region->extents.x2, region->extents.y2));
 
-	if (n == 0)
-		return;
+	box = RegionRects(region);
+	n = RegionNumRects(region);
+	assert(n);
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 	assert_pixmap_contains_boxes(pixmap, box, n, dx, dy);
-	if (arg->damage)
-		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
-	assert_pixmap_damage(pixmap);
 
 	br00 = 3 << 20;
 	br13 = arg->bo->pitch;
@@ -5501,15 +5592,18 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		box++;
 	} while (--n);
 
+	if (arg->damage) {
+		RegionTranslate(region, dx, dy);
+		sna_damage_add(arg->damage, region);
+	}
+	assert_pixmap_damage(pixmap);
 	sna->blt_state.fill_bo = 0;
 }
 
 static void
 sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
-		   BoxPtr box, int n,
-		   int sx, int sy,
-		   Bool reverse, Bool upsidedown, Pixel bitplane,
-		   void *closure)
+		   RegionPtr region, int sx, int sy,
+		   Pixel bitplane, void *closure)
 {
 	PixmapPtr dst_pixmap = get_drawable_pixmap(drawable);
 	PixmapPtr src_pixmap = get_drawable_pixmap(source);
@@ -5518,6 +5612,8 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	int16_t dx, dy;
 	int bit = ffs(bitplane) - 1;
 	uint32_t br00, br13;
+	BoxPtr box = RegionRects(region);
+	int n = RegionNumRects(region);
 
 	DBG(("%s: plane=%x [%d] x%d\n", __FUNCTION__,
 	     (unsigned)bitplane, bit, n));
@@ -5531,9 +5627,6 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 
 	get_drawable_deltas(drawable, dst_pixmap, &dx, &dy);
 	assert_pixmap_contains_boxes(dst_pixmap, box, n, dx, dy);
-	if (arg->damage)
-		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
-	assert_pixmap_damage(dst_pixmap);
 
 	br00 = XY_MONO_SRC_COPY;
 	if (drawable->bitsPerPixel == 32)
@@ -5707,6 +5800,11 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 		box++;
 	} while (--n);
 
+	if (arg->damage) {
+		RegionTranslate(region, dx, dy);
+		sna_damage_add(arg->damage, region);
+	}
+	assert_pixmap_damage(dst_pixmap);
 	sna->blt_state.fill_bo = 0;
 }
 
@@ -5802,12 +5900,12 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 		}
 		RegionUninit(&region);
-		return miDoCopy(src, dst, gc,
-				src_x, src_y,
-				w, h,
-				dst_x, dst_y,
-				src->depth == 1 ? sna_copy_bitmap_blt :sna_copy_plane_blt,
-				(Pixel)bit, &arg);
+		return sna_do_copy(src, dst, gc,
+				   src_x, src_y,
+				   w, h,
+				   dst_x, dst_y,
+				   src->depth == 1 ? sna_copy_bitmap_blt : sna_copy_plane_blt,
+				   (Pixel)bit, &arg);
 	}
 
 fallback:
@@ -12105,8 +12203,11 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
 
 	RegionNull(&dst);
 	RegionIntersect(&dst, &win->borderClip, src);
+	if (!RegionNotEmpty(&dst))
+		return;
+
 #ifdef COMPOSITE
-	if (pixmap->screen_x || pixmap->screen_y)
+	if (pixmap->screen_x | pixmap->screen_y)
 		RegionTranslate(&dst, -pixmap->screen_x, -pixmap->screen_y);
 #endif
 
@@ -12118,8 +12219,8 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
 		miCopyRegion(&pixmap->drawable, &pixmap->drawable,
 			     0, &dst, dx, dy, fbCopyNtoN, 0, NULL);
 	} else {
-		miCopyRegion(&pixmap->drawable, &pixmap->drawable,
-			     NULL, &dst, dx, dy, sna_self_copy_boxes, 0, NULL);
+		sna_self_copy_boxes(&pixmap->drawable, &pixmap->drawable, NULL,
+				    &dst, dx, dy, 0, NULL);
 	}
 
 	RegionUninit(&dst);
@@ -12389,7 +12490,7 @@ static void sna_accel_flush(struct sna *sna)
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
 	bool busy;
 
-	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
+	DBG(("%s (time=%ld), cpu damage? %d, exec? %d nbatch=%d, busy? %d\n",
 	     __FUNCTION__, (long)TIME,
 	     priv && priv->cpu_damage,
 	     priv && priv->gpu_bo->exec != NULL,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 744818f..2341cb3 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3184,7 +3184,7 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 	/* XXX a8 boxes */
 	if (!(dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)) {
 		DBG(("%s: fallback -- can not perform operation in place, unhanbled format %08lx\n",
-		     __FUNCTION__, dst->format));
+		     __FUNCTION__, (long)dst->format));
 		goto pixman;
 	}
 
commit 747ed0ca28bb46fc45ff20acd1f82212fa2b46bd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 10 17:05:20 2012 +0100

    sna: Only promote to full GPU if we already have a GPU bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6567de1..5a37a28 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10393,7 +10393,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		if (region_subsumes_drawable(&region, &pixmap->drawable) ||
 		    box_inplace(pixmap, &region.extents)) {
 			DBG(("%s: promoting to full GPU\n", __FUNCTION__));
-			if (priv->cpu_damage == NULL) {
+			if (priv->gpu_bo && priv->cpu_damage == NULL) {
 				sna_damage_all(&priv->gpu_damage,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height);
commit 232217eef8f99f5678d65bf9aa5b898ef6d3b3c6
Author: Daniel Stone <daniel at fooishbar.org>
Date:   Tue Jul 10 00:32:13 2012 +0100

    i810: Make DGA optional
    
    Don't build DGA when it's not available, or when we don't want it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 15ed9b9..dde23f1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -161,7 +161,6 @@ fi
 
 PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto pixman-1 >= $required_pixman_version $REQUIRED_MODULES])
 
-
 AC_MSG_CHECKING([whether to include XAA support])
 AC_ARG_ENABLE(xaa,
 	      AS_HELP_STRING([--enable-xaa],
@@ -181,6 +180,25 @@ if test "x$XAA" = xyes; then
 fi
 AM_CONDITIONAL(XAA, test "x$XAA" = xyes)
 
+AC_MSG_CHECKING([whether to include DGA support])
+AC_ARG_ENABLE(dga,
+	      AS_HELP_STRING([--enable-dga],
+			     [Enable legacy Direct Graphics Access (DGA) [default=auto]]),
+	      [DGA="$enableval"],
+	      [DGA=auto])
+AC_MSG_RESULT([$DGA])
+AM_CONDITIONAL(DGA, test "x$DGA" != xno)
+if test "x$DGA" != xno; then
+        save_CFLAGS=$CFLAGS
+        CFLAGS=$XSERVER_CFLAGS
+	AC_CHECK_HEADERS([dgaproc.h], DGA=yes, DGA=no)
+        CFLAGS=$save_CFLAGS
+fi
+if test "x$DGA" = xyes; then
+	AC_DEFINE(USE_DGA, 1, [Enable DGA support])
+fi
+AM_CONDITIONAL(DGA, test "x$DGA" = xyes)
+
 AC_ARG_WITH(default-accel,
 	    AS_HELP_STRING([--with-default-accel],
 			   [Select the default acceleration method [default=uxa if enabled, otherwise sna]]),
diff --git a/src/legacy/i810/Makefile.am b/src/legacy/i810/Makefile.am
index 01cc218..07a384f 100644
--- a/src/legacy/i810/Makefile.am
+++ b/src/legacy/i810/Makefile.am
@@ -10,7 +10,6 @@ AM_CFLAGS = @CWARNFLAGS@ @XORG_CFLAGS@ @DRM_CFLAGS@ @DRI_CFLAGS@ @PCIACCESS_CFLA
 liblegacy_i810_la_SOURCES = \
          i810_common.h \
          i810_cursor.c \
-         i810_dga.c \
          i810_driver.c \
          i810.h \
          i810_memory.c \
@@ -24,6 +23,11 @@ liblegacy_i810_la_SOURCES += \
          i810_accel.c
 endif
 
+if DGA
+liblegacy_i810_la_SOURCES += \
+         i810_dga.c
+endif
+
 if DRI
 liblegacy_i810_la_SOURCES +=\
          i810_dri.c \
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index df8f8e1..f4f7f3e 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -993,7 +993,7 @@ DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
        uint32_t LCD_TV_Control = INREG(LCD_TV_C);
        uint32_t TV_HTotal = INREG(LCD_TV_HTOTAL);
        uint32_t ActiveStart, ActiveEnd;
-       
+
        if((LCD_TV_Control & LCD_TV_ENABLE)
 	  && !(LCD_TV_Control & LCD_TV_VGAMOD)
 	   && TV_HTotal) {
@@ -1006,7 +1006,7 @@ DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
        OUTREG(LCD_TV_OVRACT,
 	      (ActiveEnd << 16) | ActiveStart);
    }
-   
+
    /* Turn on DRAM Refresh */
    temp = INREG8(DRAM_ROW_CNTL_HI);
    temp &= ~DRAM_REFRESH_RATE;
@@ -1585,11 +1585,11 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
 
    pI810->LpRing = calloc(sizeof(I810RingBuffer),1);
    if (!pI810->LpRing) {
-     xf86DrvMsg(scrn->scrnIndex, X_ERROR, 
+     xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		"Could not allocate lpring data structure.\n");
      return FALSE;
    }
-   
+
    miClearVisualTypes();
 
    /* Re-implemented Direct Color support, -jens */
@@ -1623,7 +1623,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
     * pI810->directRenderingEnabled based on it each generation.
     */
    pI810->directRenderingEnabled = !pI810->directRenderingDisabled;
-   
+
    if (pI810->directRenderingEnabled==TRUE)
      pI810->directRenderingEnabled = I810DRIScreenInit(screen);
 
@@ -1693,7 +1693,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
    }
 #endif
 
-#ifdef XFreeXDGA
+#ifdef HAVE_DGA
    I810DGAInit(screen);
 #endif
 
commit 78dc0c04745ad4485b994f67833f4a155749f01d
Author: Daniel Stone <daniel at fooishbar.org>
Date:   Tue Jul 10 00:32:13 2012 +0100

    i810: Make XAA optional
    
    Don't build XAA when it's not available, or when we don't want it.
    
    Signed-off-by: Daniel Stone <daniel at fooishbar.org>

diff --git a/configure.ac b/configure.ac
index 7ab7ab8..15ed9b9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -158,6 +158,29 @@ if test "x$GLAMOR" != "xno"; then
 	AC_DEFINE(USE_GLAMOR, 1, [Enable glamor acceleration])
 fi
 
+
+PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto pixman-1 >= $required_pixman_version $REQUIRED_MODULES])
+
+
+AC_MSG_CHECKING([whether to include XAA support])
+AC_ARG_ENABLE(xaa,
+	      AS_HELP_STRING([--enable-xaa],
+			     [Enable legacy X Acceleration Architecture (XAA) [default=auto]]),
+	      [XAA="$enableval"],
+	      [XAA=auto])
+AC_MSG_RESULT([$XAA])
+AM_CONDITIONAL(XAA, test "x$XAA" != xno)
+if test "x$XAA" != xno; then
+        save_CFLAGS=$CFLAGS
+        CFLAGS=$XSERVER_CFLAGS
+	AC_CHECK_HEADERS([xaa.h], XAA=yes, XAA=no)
+        CFLAGS=$save_CFLAGS
+fi
+if test "x$XAA" = xyes; then
+	AC_DEFINE(USE_XAA, 1, [Enable XAA support])
+fi
+AM_CONDITIONAL(XAA, test "x$XAA" = xyes)
+
 AC_ARG_WITH(default-accel,
 	    AS_HELP_STRING([--with-default-accel],
 			   [Select the default acceleration method [default=uxa if enabled, otherwise sna]]),
@@ -247,7 +270,6 @@ XORG_DRIVER_CHECK_EXT(XF86DRI, xextproto x11)
 XORG_DRIVER_CHECK_EXT(DPMSExtension, xextproto)
 
 # Obtain compiler/linker options for the driver dependencies
-PKG_CHECK_MODULES(XORG, [xorg-server >= $required_xorg_xserver_version xproto fontsproto pixman-1 >= $required_pixman_version $REQUIRED_MODULES])
 PKG_CHECK_MODULES(DRM, [libdrm >= 2.4.24]) # libdrm_intel is checked separately
 PKG_CHECK_MODULES(DRI, [xf86driproto], , DRI=no)
 PKG_CHECK_MODULES(DRI2, [dri2proto >= 2.6],, DRI2=no)
diff --git a/src/legacy/i810/Makefile.am b/src/legacy/i810/Makefile.am
index e7fa04f..01cc218 100644
--- a/src/legacy/i810/Makefile.am
+++ b/src/legacy/i810/Makefile.am
@@ -8,7 +8,6 @@ AM_CFLAGS = @CWARNFLAGS@ @XORG_CFLAGS@ @DRM_CFLAGS@ @DRI_CFLAGS@ @PCIACCESS_CFLA
 	    $(NULL)
 
 liblegacy_i810_la_SOURCES = \
-         i810_accel.c \
          i810_common.h \
          i810_cursor.c \
          i810_dga.c \
@@ -20,6 +19,11 @@ liblegacy_i810_la_SOURCES = \
          i810_video.c \
          i810_wmark.c
 
+if XAA
+liblegacy_i810_la_SOURCES += \
+         i810_accel.c
+endif
+
 if DRI
 liblegacy_i810_la_SOURCES +=\
          i810_dri.c \
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index 874551b..512d07e 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -42,7 +42,9 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "compiler.h"
 #include "xf86Pci.h"
 #include "i810_reg.h"
+#ifdef HAVE_XAA
 #include "xaa.h"
+#endif
 #include "xf86Cursor.h"
 #include "xf86xv.h"
 #include "vbe.h"
@@ -202,7 +204,9 @@ typedef struct _I810Rec {
    I810RegRec SavedReg;
    I810RegRec ModeReg;
 
+#ifdef XAA
    XAAInfoRecPtr AccelInfoRec;
+#endif
    xf86CursorInfoPtr CursorInfoRec;
    CloseScreenProcPtr CloseScreen;
    ScreenBlockHandlerProcPtr BlockHandler;
diff --git a/src/legacy/i810/i810_dga.c b/src/legacy/i810/i810_dga.c
index 336588c..e258360 100644
--- a/src/legacy/i810/i810_dga.c
+++ b/src/legacy/i810/i810_dga.c
@@ -29,8 +29,6 @@
 #include "xf86.h"
 #include "xf86_OSproc.h"
 #include "xf86Pci.h"
-#include "xaa.h"
-#include "xaalocal.h"
 #include "i810.h"
 #include "i810_reg.h"
 #include "dgaproc.h"
@@ -39,11 +37,14 @@
 static Bool I810_OpenFramebuffer(ScrnInfoPtr, char **, unsigned char **,
 				 int *, int *, int *);
 static Bool I810_SetMode(ScrnInfoPtr, DGAModePtr);
-static void I810_Sync(ScrnInfoPtr);
 static int I810_GetViewport(ScrnInfoPtr);
 static void I810_SetViewport(ScrnInfoPtr, int, int, int);
+
+#ifdef HAVE_XAA
+static void I810_Sync(ScrnInfoPtr);
 static void I810_FillRect(ScrnInfoPtr, int, int, int, int, unsigned long);
 static void I810_BlitRect(ScrnInfoPtr, int, int, int, int, int, int);
+#endif
 
 #if 0
 static void I810_BlitTransRect(ScrnInfoPtr, int, int, int, int, int, int,
@@ -57,9 +58,15 @@ DGAFunctionRec I810DGAFuncs = {
    I810_SetMode,
    I810_SetViewport,
    I810_GetViewport,
+#ifdef HAVE_XAA
    I810_Sync,
    I810_FillRect,
    I810_BlitRect,
+#else
+   NULL,
+   NULL,
+   NULL,
+#endif
 #if 0
    I810_BlitTransRect
 #else
@@ -186,6 +193,7 @@ I810_SetViewport(ScrnInfoPtr pScrn, int x, int y, int flags)
    pI810->DGAViewportStatus = 0;
 }
 
+#ifdef HAVE_XAA
 static void
 I810_FillRect(ScrnInfoPtr pScrn,
 	      int x, int y, int w, int h, unsigned long color)
@@ -226,6 +234,7 @@ I810_BlitRect(ScrnInfoPtr pScrn,
       SET_SYNC_FLAG(pI810->AccelInfoRec);
    }
 }
+#endif
 
 #if 0
 static void
diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index ba11245..0f891bb 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -379,7 +379,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
 
    pI810DRI->regsSize = I810_REG_SIZE;
    if (drmAddMap(pI810->drmSubFD, (drm_handle_t) pI810->MMIOAddr,
-		 pI810DRI->regsSize, DRM_REGISTERS, 0, 
+		 pI810DRI->regsSize, DRM_REGISTERS, 0,
 		 (drmAddress) &pI810DRI->regs) < 0) {
       xf86DrvMsg(pScreen->myNum, X_ERROR, "[drm] drmAddMap(regs) failed\n");
       DRICloseScreen(pScreen);
@@ -421,7 +421,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
     * under the DRI.
     */
 
-   drmAgpAlloc(pI810->drmSubFD, 4096 * 1024, 1, NULL, 
+   drmAgpAlloc(pI810->drmSubFD, 4096 * 1024, 1, NULL,
 	       (drmAddress) &dcacheHandle);
    pI810->dcacheHandle = dcacheHandle;
 
@@ -507,7 +507,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
 		 "[agp] GART: no dcache memory found\n");
    }
 
-   drmAgpAlloc(pI810->drmSubFD, back_size, 0, NULL, 
+   drmAgpAlloc(pI810->drmSubFD, back_size, 0, NULL,
 	       (drmAddress) &agpHandle);
    pI810->backHandle = agpHandle;
 
@@ -564,10 +564,10 @@ I810DRIScreenInit(ScreenPtr pScreen)
    /* Now allocate and bind the agp space.  This memory will include the
     * regular framebuffer as well as texture memory.
     */
-   drmAgpAlloc(pI810->drmSubFD, sysmem_size, 0, NULL, 
+   drmAgpAlloc(pI810->drmSubFD, sysmem_size, 0, NULL,
 	       (drmAddress)&agpHandle);
    pI810->sysmemHandle = agpHandle;
-   
+
    if (agpHandle != DRM_AGP_NO_HANDLE) {
       if (drmAgpBind(pI810->drmSubFD, agpHandle, 0) == 0) {
 	xf86DrvMsg(pScrn->scrnIndex, X_INFO,
@@ -610,7 +610,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
       }
       drmAgpAlloc(pI810->drmSubFD, pI810->MC.Size, 0, NULL,
 		  (drmAddress) &agpHandle);
-      
+
       pI810->xvmcHandle = agpHandle;
 
       if (agpHandle != DRM_AGP_NO_HANDLE) {
@@ -634,7 +634,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
    }
 
    drmAgpAlloc(pI810->drmSubFD, 4096, 2,
-	       (unsigned long *)&pI810->CursorPhysical, 
+	       (unsigned long *)&pI810->CursorPhysical,
 	       (drmAddress) &agpHandle);
 
    pI810->cursorHandle = agpHandle;
@@ -787,7 +787,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
    pI810DRI->agp_buf_size = pI810->BufferMem.Size;
 
    if (drmAddMap(pI810->drmSubFD, (drm_handle_t) pI810->LpRing->mem.Start,
-		 pI810->LpRing->mem.Size, DRM_AGP, 0, 
+		 pI810->LpRing->mem.Size, DRM_AGP, 0,
 		 (drmAddress) &pI810->ring_map) < 0) {
       xf86DrvMsg(pScreen->myNum, X_ERROR,
 		 "[drm] drmAddMap(ring_map) failed.  Disabling DRI.\n");
@@ -821,7 +821,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
    }
 
    if (drmAddMap(pI810->drmSubFD, (drm_handle_t) pI810->TexMem.Start,
-		 pI810->TexMem.Size, DRM_AGP, 0, 
+		 pI810->TexMem.Size, DRM_AGP, 0,
 		 (drmAddress) &pI810DRI->textures) < 0) {
       xf86DrvMsg(pScreen->myNum, X_ERROR,
 		 "[drm] drmAddMap(textures) failed.  Disabling DRI.\n");
@@ -975,7 +975,7 @@ I810DRIFinishScreenInit(ScreenPtr pScreen)
     */
    if (info->allowPageFlip && info->drmMinor >= 3) {
      ShadowFBInit( pScreen, I810DRIRefreshArea );
-   } 
+   }
    else
      info->allowPageFlip = 0;
    return DRIFinishScreenInit(pScreen);
@@ -1009,11 +1009,20 @@ I810DRISwapContext(ScreenPtr pScreen, DRISyncType syncType,
 }
 
 static void
+I810DRISetNeedSync(ScrnInfoPtr pScrn)
+{
+#ifdef HAVE_XAA
+   I810Ptr pI810 = I810PTR(pScrn);
+   if (pI810->AccelInfoRec)
+	pI810->AccelInfoRec->NeedToSync = TRUE;
+#endif
+}
+
+static void
 I810DRIInitBuffers(WindowPtr pWin, RegionPtr prgn, CARD32 index)
 {
    ScreenPtr pScreen = pWin->drawable.pScreen;
    ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
-   I810Ptr pI810 = I810PTR(pScrn);
    BoxPtr pbox = REGION_RECTS(prgn);
    int nbox = REGION_NUM_RECTS(prgn);
 
@@ -1041,8 +1050,7 @@ I810DRIInitBuffers(WindowPtr pWin, RegionPtr prgn, CARD32 index)
    }
    I810SelectBuffer(pScrn, I810_SELECT_FRONT);
 
-   if (pI810->AccelInfoRec)
-   	pI810->AccelInfoRec->NeedToSync = TRUE;
+   I810DRISetNeedSync(pScrn);
 }
 
 /* This routine is a modified form of XAADoBitBlt with the calls to
@@ -1058,7 +1066,6 @@ I810DRIMoveBuffers(WindowPtr pParent, DDXPointRec ptOldOrg,
 {
    ScreenPtr pScreen = pParent->drawable.pScreen;
    ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
-   I810Ptr pI810 = I810PTR(pScrn);
    BoxPtr pboxTmp, pboxNext, pboxBase;
    DDXPointPtr pptTmp, pptNew2 = NULL;
    int xdir, ydir;
@@ -1201,8 +1208,7 @@ I810DRIMoveBuffers(WindowPtr pParent, DDXPointRec ptOldOrg,
       free(pboxNew1);
    }
 
-   if (pI810->AccelInfoRec)
-	pI810->AccelInfoRec->NeedToSync = TRUE;
+   I810DRISetNeedSync(pScrn);
 }
 
 
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 141c19c..df8f8e1 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -68,6 +68,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xf86xv.h"
 #include <X11/extensions/Xv.h>
 #include "vbe.h"
+#include "xf86fbman.h"
 
 #include "i810.h"
 
@@ -1094,6 +1095,7 @@ DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
    hwp->writeCrtc(hwp, IO_CTNL, temp);
 }
 
+#ifdef HAVE_XAA
 static void
 I810SetRingRegs(ScrnInfoPtr scrn)
 {
@@ -1113,6 +1115,7 @@ I810SetRingRegs(ScrnInfoPtr scrn)
    itemp |= ((pI810->LpRing->mem.Size - 4096) | RING_NO_REPORT | RING_VALID);
    OUTREG(LP_RING + RING_LEN, itemp);
 }
+#endif
 
 static void
 I810Restore(ScrnInfoPtr scrn)
@@ -1700,6 +1703,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
       return FALSE;
    }
 
+#ifdef HAVE_XAA
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_NOACCEL, FALSE)) {
       if (pI810->LpRing->mem.Size != 0) {
 	 I810SetRingRegs(scrn);
@@ -1711,6 +1715,7 @@ I810ScreenInit(SCREEN_INIT_ARGS_DECL)
 	     I810EmitFlush(scrn);
       }
    }
+#endif
 
    miInitializeBackingStore(screen);
    xf86SetBackingStore(screen);
@@ -1938,11 +1943,13 @@ I810LeaveVT(VT_FUNC_ARGS_DECL)
    }
 #endif
 
+#ifdef HAVE_XAA
    if (pI810->AccelInfoRec != NULL) {
       I810RefreshRing(scrn);
       I810Sync(scrn);
       pI810->AccelInfoRec->NeedToSync = FALSE;
    }
+#endif
    I810Restore(scrn);
 
    if (!I810UnbindGARTMemory(scrn))
@@ -1961,14 +1968,18 @@ I810CloseScreen(CLOSE_SCREEN_ARGS_DECL)
    ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
    vgaHWPtr hwp = VGAHWPTR(scrn);
    I810Ptr pI810 = I810PTR(scrn);
+#ifdef HAVE_XAA
    XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
+#endif
 
    if (scrn->vtSema == TRUE) {
+#ifdef HAVE_XAA
       if (pI810->AccelInfoRec != NULL) {
 	 I810RefreshRing(scrn);
 	 I810Sync(scrn);
 	 pI810->AccelInfoRec->NeedToSync = FALSE;
       }
+#endif
       I810Restore(scrn);
       vgaHWLock(hwp);
    }
@@ -1993,12 +2004,14 @@ I810CloseScreen(CLOSE_SCREEN_ARGS_DECL)
       pI810->ScanlineColorExpandBuffers = NULL;
    }
 
+#ifdef HAVE_XAA
    if (infoPtr) {
       if (infoPtr->ScanlineColorExpandBuffers)
 	 free(infoPtr->ScanlineColorExpandBuffers);
       XAADestroyInfoRec(infoPtr);
       pI810->AccelInfoRec = NULL;
    }
+#endif
 
    if (pI810->CursorInfoRec) {
       xf86DestroyCursorInfoRec(pI810->CursorInfoRec);
diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 56d04a4..4ebad66 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -49,8 +49,6 @@ THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "i810.h"
 #include "xf86xv.h"
 #include <X11/extensions/Xv.h>
-#include "xaa.h"
-#include "xaalocal.h"
 #include "dixstruct.h"
 #include "fourcc.h"
 
commit 45ab003a5860fd4290df24739d2520fddfe27a8f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 21:28:40 2012 +0100

    sna: Check for overlapping glyphs within each list, then overlapping lists
    
    Spotted by Zhigang Gong is this optimisation to avoid the problem with
    multiple lines passed in a single request (using multiple lists). As the
    start of line will overlap with the previous line when we use the simple
    bbox comparison, we always declare those runs as overlapping and so we
    cannot substitute a glyph mask. However, we can reduce the problem to
    only checking for overlapping glyphs within a list and then checking for
    overlapping lists. Very, very clever.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6475863..d4f6dec 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -603,7 +603,7 @@ void sna_composite_trifan(CARD8 op,
 Bool sna_gradients_create(struct sna *sna);
 void sna_gradients_close(struct sna *sna);
 
-Bool sna_glyphs_create(struct sna *sna);
+bool sna_glyphs_create(struct sna *sna);
 void sna_glyphs(CARD8 op,
 		PicturePtr src,
 		PicturePtr dst,
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index dbacaa8..84ee13c 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -144,7 +144,7 @@ void sna_glyphs_close(struct sna *sna)
  * This function allocates the storage pixmap, and then fills in the
  * rest of the allocated structures for all caches with the given format.
  */
-Bool sna_glyphs_create(struct sna *sna)
+bool sna_glyphs_create(struct sna *sna)
 {
 	ScreenPtr screen = sna->scrn->pScreen;
 	pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff };
@@ -441,7 +441,7 @@ static void apply_damage_clipped_to_dst(struct sna_composite_op *op,
 	sna_damage_add_box(op->damage, &box);
 }
 
-static Bool
+static bool
 glyphs_to_dst(struct sna *sna,
 	      CARD8 op,
 	      PicturePtr src,
@@ -593,7 +593,7 @@ next_glyph:
 	return TRUE;
 }
 
-static Bool
+static bool
 glyphs_slow(struct sna *sna,
 	    CARD8 op,
 	    PicturePtr src,
@@ -730,7 +730,7 @@ too_large(struct sna *sna, int width, int height)
 		height > sna->render.max_3d_size);
 }
 
-static Bool
+static bool
 glyphs_via_mask(struct sna *sna,
 		CARD8 op,
 		PicturePtr src,
@@ -1033,18 +1033,33 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 	PictFormatPtr format = list[0].format;
 	int16_t x1, x2, y1, y2;
 	int16_t x, y;
-	BoxRec extents;
-	Bool first = TRUE;
+	BoxRec stack_extents[64], *list_extents = stack_extents;
+	int i, j;
+
+	if (nlist > ARRAY_SIZE(stack_extents) + 1) {
+		list_extents = malloc(sizeof(BoxRec) * (nlist-1));
+		if (list_extents == NULL)
+			return NULL;
+	}
 
 	x = 0;
 	y = 0;
-	extents.x1 = 0;
-	extents.y1 = 0;
-	extents.x2 = 0;
-	extents.y2 = 0;
-	while (nlist--) {
+	for (i = 0; i < nlist; i++) {
+		BoxRec extents;
+		bool first = true;
 		int n = list->len;
 
+		/* Check the intersection of each glyph within the list and
+		 * then each list against the previous lists.
+		 *
+		 * If we overlap then we cannot substitute a mask as the
+		 * rendering will be altered.
+		 */
+		extents.x1 = 0;
+		extents.y1 = 0;
+		extents.x2 = 0;
+		extents.y2 = 0;
+
 		if (format->format != list->format->format)
 			return NULL;
 
@@ -1074,8 +1089,10 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 			} else {
 				/* Potential overlap */
 				if (x1 < extents.x2 && x2 > extents.x1 &&
-				    y1 < extents.y2 && y2 > extents.y1)
-					return NULL;
+				    y1 < extents.y2 && y2 > extents.y1) {
+					format = NULL;
+					goto out;
+				}
 
 				if (x1 < extents.x1)
 					extents.x1 = x1;
@@ -1089,8 +1106,26 @@ glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 			x += glyph->info.xOff;
 			y += glyph->info.yOff;
 		}
+
+		/* Incrementally building a region is expensive. We expect
+		 * the number of lists to be small, so just keep a list
+		 * of the previous boxes and walk those.
+		 */
+		for (j = 0; j < i; j++) {
+			if (extents.x2 < list_extents[j].x1 &&
+			    extents.x1 > list_extents[j].x2 &&
+			    extents.y2 < list_extents[j].y1 &&
+			    extents.y1 > list_extents[j].y2) {
+				format = NULL;
+				goto out;
+			}
+		}
+		list_extents[i] = extents;
 	}
 
+out:
+	if (list_extents != stack_extents)
+		free(list_extents);
 	return format;
 }
 
commit 8066bc33d78e78ce7c13833b08a7daaea2f3ed22
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 11:25:11 2012 +0100

    sna: Fix reversal of inside logic for BitmapToRegion
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index bad0c51..fa5d032 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -90,10 +90,10 @@ fbBitmapToRegion(PixmapPtr pixmap)
 		for (base = 0; bits < end; base += FB_UNIT) {
 			FbBits w = READ(bits++);
 			if (x1 < 0) {
-				if (!~w)
+				if (!w)
 					continue;
 			} else {
-				if (!w)
+				if (!~w)
 					continue;
 			}
 			for (i = 0; i < FB_UNIT; i++) {
commit 8a9a17cd092d156272e8953974119f527b2b77eb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 11:12:12 2012 +0100

    sna: Improve the check for assertions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index f1ffacc..7ab7ab8 100644
--- a/configure.ac
+++ b/configure.ac
@@ -320,7 +320,6 @@ if test "x$DEBUG" = xno; then
 	AC_DEFINE(NDEBUG,1,[Disable internal debugging])
 fi
 if test "x$DEBUG" != xno; then
-	AC_DEFINE(HAS_EXTRA_DEBUG,1,[Enable additional debugging])
 	PKG_CHECK_MODULES(VALGRIND, [valgrind], have_valgrind=yes, have_valgrind=no)
 	if test x$have_valgrind = xyes; then
 		AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index e645ee6..484c982 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -1100,14 +1100,18 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled: %s\n", BUILDER_DESCRIPTION);
 #endif
-#if HAS_EXTRA_DEBUG
+#if !NDEBUG
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
-		   "SNA compiled with debugging enabled\n");
+		   "SNA compiled with assertions enabled\n");
 #endif
 #if DEBUG_MEMORY
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled with memory allocation reporting enabled\n");
 #endif
+#if DEBUG_PIXMAP
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "SNA compiled with extra pixmap/damage validation\n");
+#endif
 
 	DBG(("%s\n", __FUNCTION__));
 	DBG(("pixman version: %s\n", pixman_version_string()));
commit b11bc37684181390fc1400afb44054785104ca15
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 11:09:16 2012 +0100

    sna: Simplify the DBG incarnation
    
    It was only ever used in conjunction with HAS_DEBUG_FULL. For debug
    purposes it is as easy to redefine DBG locally. By simplifying the DBG
    macro we can create it consistently and so reduce the number of compiler
    warnings.
    
    Long term, this has to be dynamic. Sigh.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 720a0ba..f1ffacc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -315,6 +315,7 @@ if test "x$KMS_ONLY" = xyes; then
 fi
 
 AM_CONDITIONAL(DEBUG, test x$DEBUG != xno)
+AM_CONDITIONAL(FULL_DEBUG, test x$FULL_DEBUG == xfull)
 if test "x$DEBUG" = xno; then
 	AC_DEFINE(NDEBUG,1,[Disable internal debugging])
 fi
diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 8cd3c45..604a5db 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -30,6 +30,10 @@ AM_CFLAGS = \
 	@DRI_CFLAGS@ \
 	$(NULL)
 
+if DEBUG
+AM_CFLAGS += @VALGRIND_CFLAGS@
+endif
+
 noinst_LTLIBRARIES = libsna.la
 libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@ fb/libfb.la
 
@@ -94,8 +98,7 @@ libsna_la_SOURCES += \
 	$(NULL)
 endif
 
-if DEBUG
-AM_CFLAGS += @VALGRIND_CFLAGS@
+if FULL_DEBUG
 libsna_la_SOURCES += \
 	kgem_debug.c \
 	kgem_debug.h \
diff --git a/src/sna/blt.c b/src/sna/blt.c
index c0922b5..1ad5eee 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -35,11 +35,6 @@
 #define USE_SSE2 1
 #endif
 
-#if DEBUG_BLT
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #if USE_SSE2
 #include <xmmintrin.h>
 
diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
index 075d6cc..bad0c51 100644
--- a/src/sna/fb/fbbitmap.c
+++ b/src/sna/fb/fbbitmap.c
@@ -83,7 +83,7 @@ fbBitmapToRegion(PixmapPtr pixmap)
 		if (READ(bits) & mask0)
 			x1 = 0;
 		else
-			x1 =-1;
+			x1 = -1;
 
 		/* Process all words which are fully in the pixmap */
 		end = bits + (width >> FB_SHIFT);
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index b41c386..93880a8 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -38,11 +38,6 @@
 
 #include "gen2_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE 0
 #define NO_COMPOSITE_SPANS 0
 #define NO_COPY 0
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index cfc8c63..20286fc 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -37,11 +37,6 @@
 
 #include "gen3_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE 0
 #define NO_COMPOSITE_SPANS 0
 #define NO_COPY 0
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c577536..ed85554 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -42,11 +42,6 @@
 
 #include "gen4_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 /* gen4 has a serious issue with its shaders that we need to flush
  * after every rectangle... So until that is resolved, prefer
  * the BLT engine.
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 49cc17e..7a20303 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -42,11 +42,6 @@
 
 #include "gen5_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE_SPANS 0
 
 #define PREFER_BLT_FILL 1
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 778a072..6d8fbfd 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -42,11 +42,6 @@
 
 #include "gen6_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE 0
 #define NO_COMPOSITE_SPANS 0
 #define NO_COPY 0
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index f9b2e9e..afb4b9b 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -42,11 +42,6 @@
 
 #include "gen7_render.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE 0
 #define NO_COMPOSITE_SPANS 0
 #define NO_COPY 0
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2578ff9..4fd4d1f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -55,6 +55,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
+#define DBG_NO_CACHE 0
 #define DBG_NO_CACHE_LEVEL 0
 #define DBG_NO_VMAP 0
 #define DBG_NO_LLC 0
@@ -66,12 +67,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG_NO_RELAXED_FENCING 0
 #define DBG_DUMP 0
 
-#define NO_CACHE 0
-
-#if DEBUG_KGEM
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
+#define SHOW_BATCH 0
 
 /* Worst case seems to be 965gm where we cannot write within a cacheline that
  * is being simultaneously being read by the GPU, or within the sampler
@@ -1256,7 +1252,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	bo->binding.offset = 0;
 	kgem_bo_clear_scanout(kgem, bo);
 
-	if (NO_CACHE)
+	if (DBG_NO_CACHE)
 		goto destroy;
 
 	if (bo->vmap) {
@@ -1446,7 +1442,7 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 				kgem_bo_free(kgem, bo);
 		}
 	}
-#if DEBUG_KGEM
+#if HAS_DEBUG_FULL
 	{
 		int count = 0;
 		list_for_each_entry(bo, &kgem->flushing, request)
@@ -1543,7 +1539,7 @@ static bool kgem_retire__requests(struct kgem *kgem)
 		free(rq);
 	}
 
-#if DEBUG_KGEM
+#if HAS_DEBUG_FULL
 	{
 		int count = 0;
 
@@ -1941,7 +1937,7 @@ void _kgem_submit(struct kgem *kgem)
 
 	kgem_finish_partials(kgem);
 
-#if DEBUG_BATCH
+#if HAS_DEBUG_FULL && SHOW_BATCH
 	__kgem_batch_debug(kgem, batch_end);
 #endif
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 22ae401..8d01807 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -36,10 +36,10 @@
 
 #include "compiler.h"
 
-#if DEBUG_KGEM
-#define DBG_HDR(x) ErrorF x
+#if HAS_DEBUG_FULL
+#define DBG(x) ErrorF x
 #else
-#define DBG_HDR(x)
+#define DBG(x)
 #endif
 
 struct kgem_bo {
@@ -455,8 +455,8 @@ static inline bool kgem_bo_can_blt(struct kgem *kgem,
 static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 				       struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
-		 __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
+	DBG(("%s: domain=%d, offset: %d size: %d\n",
+	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
 
 	if (bo->domain == DOMAIN_GTT)
 		return true;
@@ -473,8 +473,8 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 
 static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: map=%p, tiling=%d, domain=%d\n",
-		 __FUNCTION__, bo->map, bo->tiling, bo->domain));
+	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
+	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
 
 	if (bo->map == NULL)
 		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
@@ -496,15 +496,15 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
-		 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
+	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
 	return bo->rq;
 }
 
 static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
-		 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
+	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
 	if (bo->rq && !bo->exec)
 		kgem_retire(kgem);
 	return kgem_bo_is_busy(bo);
@@ -520,7 +520,7 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 
 static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle));
+	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	bo->dirty = true;
 }
 
@@ -558,6 +558,4 @@ static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
 }
 #endif
 
-#undef DBG_HDR
-
 #endif /* KGEM_H */
diff --git a/src/sna/sna.h b/src/sna/sna.h
index f62cfbc..6475863 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -67,27 +67,11 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <libudev.h>
 #endif
 
+#if HAS_DEBUG_FULL
+#define DBG(x) ErrorF x
+#else
 #define DBG(x)
-
-#define DEBUG_ALL (HAS_DEBUG_FULL || 0)
-#define DEBUG_ACCEL (DEBUG_ALL || 0)
-#define DEBUG_BATCH (DEBUG_ALL || 0)
-#define DEBUG_BLT (DEBUG_ALL || 0)
-#define DEBUG_COMPOSITE (DEBUG_ALL || 0)
-#define DEBUG_DAMAGE (DEBUG_ALL || 0)
-#define DEBUG_DISPLAY (DEBUG_ALL || 0)
-#define DEBUG_DRI (DEBUG_ALL || 0)
-#define DEBUG_DRIVER (DEBUG_ALL || 0)
-#define DEBUG_GRADIENT (DEBUG_ALL || 0)
-#define DEBUG_GLYPHS (DEBUG_ALL || 0)
-#define DEBUG_IO (DEBUG_ALL || 0)
-#define DEBUG_KGEM (DEBUG_ALL || 0)
-#define DEBUG_RENDER (DEBUG_ALL || 0)
-#define DEBUG_STREAM (DEBUG_ALL || 0)
-#define DEBUG_TRAPEZOIDS (DEBUG_ALL || 0)
-#define DEBUG_VIDEO (DEBUG_ALL || 0)
-#define DEBUG_VIDEO_TEXTURED (DEBUG_ALL || 0)
-#define DEBUG_VIDEO_OVERLAY (DEBUG_ALL || 0)
+#endif
 
 #define DEBUG_NO_RENDER 0
 #define DEBUG_NO_BLT 0
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 973ac32..6567de1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -51,11 +51,6 @@
 #include <sys/mman.h>
 #include <unistd.h>
 
-#if DEBUG_ACCEL
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define FORCE_INPLACE 0
 #define FORCE_FALLBACK 0
 #define FORCE_FLUSH 0
@@ -12441,7 +12436,7 @@ static void sna_accel_inactive(struct sna *sna)
 
 	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)TIME));
 
-#if DEBUG_ACCEL
+#if HAS_FULL_DEBUG
 	{
 		unsigned count, bytes;
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 0b0471e..1d2678a 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -38,11 +38,6 @@
 #include "sna_reg.h"
 #include "rop.h"
 
-#if DEBUG_BLT
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_BLT_COMPOSITE 0
 #define NO_BLT_COPY 0
 #define NO_BLT_COPY_BOXES 0
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 606acb6..2fcc0d9 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -36,11 +36,6 @@
 
 #include <mipict.h>
 
-#if DEBUG_COMPOSITE
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_COMPOSITE 0
 #define NO_COMPOSITE_RECTANGLES 0
 
@@ -372,7 +367,7 @@ sna_compute_composite_extents(BoxPtr extents,
 	return extents->x1 < extents->x2 && extents->y1 < extents->y2;
 }
 
-#if DEBUG_COMPOSITE
+#if HAS_DEBUG_FULL
 static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char *function)
 {
 	if (box->x1 < 0 || box->y1 < 0 ||
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 745e2d1..4bd4b9b 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -60,10 +60,7 @@ static inline bool region_is_singular(RegionRec *r)
 	return r->data == NULL;
 }
 
-#if DEBUG_DAMAGE
-#undef DBG
-#define DBG(x) ErrorF x
-
+#if HAS_DEBUG_FULL
 static const char *_debug_describe_region(char *buf, int max,
 					  RegionPtr region)
 {
@@ -151,7 +148,6 @@ static const char *_debug_describe_damage(char *buf, int max,
 
 	return buf;
 }
-
 #endif
 
 static void
@@ -634,7 +630,7 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 				      REGION_NUM_RECTS(region));
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 					    RegionPtr region)
 {
@@ -715,7 +711,7 @@ __sna_damage_add_boxes(struct sna_damage *damage,
 	return _sna_damage_create_elt_from_boxes(damage, box, n, dx, dy);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 struct sna_damage *_sna_damage_add_boxes(struct sna_damage *damage,
 					 const BoxRec *b, int n,
 					 int16_t dx, int16_t dy)
@@ -800,7 +796,7 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 	return _sna_damage_create_elt_from_rectangles(damage, r, n, dx, dy);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 struct sna_damage *_sna_damage_add_rectangles(struct sna_damage *damage,
 					      const xRectangle *r, int n,
 					      int16_t dx, int16_t dy)
@@ -882,7 +878,7 @@ __sna_damage_add_points(struct sna_damage *damage,
 	return damage;
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 struct sna_damage *_sna_damage_add_points(struct sna_damage *damage,
 					  const DDXPointRec *p, int n,
 					  int16_t dx, int16_t dy)
@@ -909,7 +905,7 @@ struct sna_damage *_sna_damage_add_points(struct sna_damage *damage,
 }
 #endif
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 fastcall struct sna_damage *_sna_damage_add_box(struct sna_damage *damage,
 						const BoxRec *box)
 {
@@ -1060,7 +1056,7 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 				      REGION_NUM_RECTS(region));
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 fastcall struct sna_damage *_sna_damage_subtract(struct sna_damage *damage,
 						 RegionPtr region)
 {
@@ -1131,7 +1127,7 @@ inline static struct sna_damage *__sna_damage_subtract_box(struct sna_damage *da
 	return _sna_damage_create_elt(damage, box, 1);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 fastcall struct sna_damage *_sna_damage_subtract_box(struct sna_damage *damage,
 						     const BoxRec *box)
 {
@@ -1212,7 +1208,7 @@ static struct sna_damage *__sna_damage_subtract_boxes(struct sna_damage *damage,
 	return _sna_damage_create_elt_from_boxes(damage, box, n, dx, dy);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 fastcall struct sna_damage *_sna_damage_subtract_boxes(struct sna_damage *damage,
 						       const BoxRec *box, int n,
 						       int dx, int dy)
@@ -1272,7 +1268,7 @@ static int __sna_damage_contains_box(struct sna_damage *damage,
 	return pixman_region_contains_rectangle(&damage->region, (BoxPtr)box);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 int _sna_damage_contains_box(struct sna_damage *damage,
 			     const BoxRec *box)
 {
@@ -1338,7 +1334,7 @@ static Bool __sna_damage_intersect(struct sna_damage *damage,
 	return RegionNotEmpty(result);
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 Bool _sna_damage_intersect(struct sna_damage *damage,
 			   RegionPtr region, RegionPtr result)
 {
@@ -1391,7 +1387,7 @@ struct sna_damage *_sna_damage_reduce(struct sna_damage *damage)
 	return damage;
 }
 
-#if DEBUG_DAMAGE
+#if HAS_DEBUG_FULL
 int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes)
 {
 	char damage_buf[1000];
@@ -1436,7 +1432,7 @@ void __sna_damage_destroy(struct sna_damage *damage)
 	__freed_damage = damage;
 }
 
-#if DEBUG_DAMAGE && TEST_DAMAGE
+#if TEST_DAMAGE && HAS_DEBUG_FULL
 struct sna_damage_selftest{
 	int width, height;
 };
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index a006ade..21db3e3 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -287,7 +287,7 @@ static inline void sna_damage_destroy(struct sna_damage **damage)
 
 void _sna_damage_debug_get_region(struct sna_damage *damage, RegionRec *r);
 
-#if DEBUG_DAMAGE && TEST_DAMAGE
+#if HAS_DEBUG_FULL && TEST_DAMAGE
 void sna_damage_selftest(void);
 #else
 static inline void sna_damage_selftest(void) {}
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 12a6bac..1c808d1 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -51,11 +51,6 @@
 
 #include "intel_options.h"
 
-#if DEBUG_DISPLAY
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #if 0
 #define __DBG DBG
 #else
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 75c66b7..405a7cd 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -46,11 +46,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <i915_drm.h>
 #include <dri2.h>
 
-#if DEBUG_DRI
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #if DRI2INFOREC_VERSION <= 2
 #error DRI2 version supported by the Xserver is too old
 #endif
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index bbbcb63..e645ee6 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -75,11 +75,6 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "git_version.h"
 #endif
 
-#if DEBUG_DRIVER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 static DevPrivateKeyRec sna_pixmap_key;
 static DevPrivateKeyRec sna_gc_key;
 static DevPrivateKeyRec sna_glyph_key;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 0179520..dbacaa8 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -69,11 +69,6 @@
 
 #include <mipict.h>
 
-#if DEBUG_GLYPHS
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define FALLBACK 0
 #define NO_GLYPH_CACHE 0
 #define NO_GLYPHS_TO_DST 0
@@ -86,7 +81,7 @@
 #define GLYPH_MAX_SIZE 64
 #define GLYPH_CACHE_SIZE (CACHE_PICTURE_SIZE * CACHE_PICTURE_SIZE / (GLYPH_MIN_SIZE * GLYPH_MIN_SIZE))
 
-#if DEBUG_GLYPHS
+#if HAS_DEBUG_FULL
 static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char *function)
 {
 	if (box->x1 < 0 || box->y1 < 0 ||
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 9ac0328..d9f6293 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -32,11 +32,6 @@
 #include "sna.h"
 #include "sna_render.h"
 
-#if DEBUG_GRADIENT
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define xFixedToDouble(f) pixman_fixed_to_double(f)
 
 static int
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index ffa86b1..2baee4c 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -35,11 +35,6 @@
 
 #include <sys/mman.h>
 
-#if DEBUG_IO
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define PITCH(x, y) ALIGN((x)*(y), 4)
 
 #define FORCE_INPLACE 0 /* 1 upload directly, -1 force indirect */
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index f6a562b..24922b3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -30,11 +30,6 @@
 #include "sna_render_inline.h"
 #include "fb/fbpict.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define NO_REDIRECT 0
 #define NO_CONVERT 0
 #define NO_FIXUP 0
diff --git a/src/sna/sna_stream.c b/src/sna/sna_stream.c
index 7f05d21..aab1549 100644
--- a/src/sna/sna_stream.c
+++ b/src/sna/sna_stream.c
@@ -28,11 +28,6 @@
 #include "sna.h"
 #include "sna_render.h"
 
-#if DEBUG_STREAM
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 int sna_static_stream_init(struct sna_static_stream *stream)
 {
 	stream->used = 0;
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index ae14d79..fdc297a 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -33,11 +33,6 @@
 #include "sna_render.h"
 #include "fb/fbpict.h"
 
-#if DEBUG_RENDER
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 struct sna_tile_span {
 	BoxRec box;
 	float opacity;
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 56c6a3e..744818f 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -38,11 +38,6 @@
 
 #include <mipict.h>
 
-#if DEBUG_TRAPEZOIDS
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #if 0
 #define __DBG(x) ErrorF x
 #else
@@ -79,7 +74,7 @@ typedef void (*span_func_t)(struct sna *sna,
 			    const BoxRec *box,
 			    int coverage);
 
-#if DEBUG_TRAPEZOIDS
+#if HAS_DEBUG_FULL
 static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char *function)
 {
 	if (box->x1 < 0 || box->y1 < 0 ||
@@ -2000,7 +1995,7 @@ mono_merge_unsorted_edges(struct mono_edge *head, struct mono_edge *unsorted)
 	return mono_merge_sorted_edges(head, unsorted);
 }
 
-#if DEBUG_TRAPEZOIDS
+#if 0
 static inline void
 __dbg_mono_edges(const char *function, struct mono_edge *edges)
 {
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 6ad81c3..71d1bbc 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -74,11 +74,6 @@ static inline Bool sna_video_xvmc_setup(struct sna *sna,
 }
 #endif
 
-#if DEBUG_VIDEO_TEXTURED
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 void sna_video_free_buffers(struct sna *sna, struct sna_video *video)
 {
 	unsigned int i;
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index d47f745..99f9ca5 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -39,11 +39,6 @@
 
 #include "intel_options.h"
 
-#if DEBUG_VIDEO_OVERLAY
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
 
 #define HAS_GAMMA(sna) ((sna)->kgem.gen >= 30)
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index 8c3cdaa..d0a4808 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -40,11 +40,6 @@
 #include <drm_fourcc.h>
 #include <i915_drm.h>
 
-#if DEBUG_VIDEO_OVERLAY
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define IMAGE_MAX_WIDTH		2048
 #define IMAGE_MAX_HEIGHT	2048
 
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 1b3b3af..805aee7 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -39,11 +39,6 @@
 #include "sna_video_hwmc.h"
 #endif
 
-#if DEBUG_VIDEO_TEXTURED
-#undef DBG
-#define DBG(x) ErrorF x
-#endif
-
 #define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
 
 static Atom xvBrightness, xvContrast, xvSyncToVblank;
commit 21798a88676e91049917fafd3196dd4374b94226
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 09:23:20 2012 +0100

    sna: Promote large operations to use the whole GPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index fd1abc9..973ac32 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1050,9 +1050,6 @@ static inline bool pixmap_inplace(struct sna *sna,
 }
 
 static bool
-sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned flags);
-
-static bool
 sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -2067,6 +2064,25 @@ drawable_gc_flags(DrawablePtr draw, GCPtr gc, bool partial)
 	return (partial ? MOVE_READ : 0) | MOVE_WRITE | MOVE_INPLACE_HINT;
 }
 
+static inline bool
+box_inplace(PixmapPtr pixmap, const BoxRec *box)
+{
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	return ((int)(box->x2 - box->x1) * (int)(box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 12) >= sna->kgem.half_cpu_cache_pages;
+}
+
+static inline struct sna_pixmap *
+sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
+{
+	assert(priv->gpu_bo);
+	if (!priv->pinned && priv->gpu_bo->proxy == NULL &&
+	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
+		list_move(&priv->inactive, &sna->active_pixmaps);
+	priv->clear = false;
+	priv->cpu = false;
+	return priv;
+}
+
 static bool
 sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int flags)
 {
@@ -2088,6 +2104,12 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		goto done;
 	}
 
+	if ((flags & MOVE_READ) == 0)
+		sna_damage_subtract_box(&priv->cpu_damage, box);
+
+	sna_damage_reduce(&priv->cpu_damage);
+	assert_pixmap_damage(pixmap);
+
 	if (priv->gpu_bo == NULL) {
 		unsigned create, tiling;
 
@@ -2115,16 +2137,12 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height);
+			list_del(&priv->list);
 			goto done;
 		}
 	}
 	assert(priv->gpu_bo->proxy == NULL);
 
-	if ((flags & MOVE_READ) == 0)
-		sna_damage_subtract_box(&priv->cpu_damage, box);
-
-	sna_damage_reduce(&priv->cpu_damage);
-	assert_pixmap_damage(pixmap);
 	if (priv->cpu_damage == NULL) {
 		list_del(&priv->list);
 		goto done;
@@ -2243,18 +2261,19 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	}
 
 done:
-	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
-		list_move(&priv->inactive, &sna->active_pixmaps);
-	priv->clear = false;
-	assert_pixmap_damage(pixmap);
-	return true;
-}
+	if (priv->cpu_damage == NULL &&
+	    flags & MOVE_WRITE &&
+	    box_inplace(pixmap, box)) {
+		DBG(("%s: large operation on undamaged, promoting to full GPU\n",
+		     __FUNCTION__));
+		sna_damage_all(&priv->gpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+		priv->undamaged = false;
+	}
 
-static inline bool
-box_inplace(PixmapPtr pixmap, const BoxRec *box)
-{
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	return ((int)(box->x2 - box->x1) * (int)(box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 12) >= sna->kgem.half_cpu_cache_pages;
+	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
+	return sna_pixmap_mark_active(sna, priv) != NULL;
 }
 
 #define PREFER_GPU	0x1
@@ -2550,18 +2569,6 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	return pixmap;
 }
 
-static inline struct sna_pixmap *
-sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
-{
-	assert(priv->gpu_bo);
-	if (!priv->pinned && priv->gpu_bo->proxy == NULL &&
-	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
-		list_move(&priv->inactive, &sna->active_pixmaps);
-	priv->clear = false;
-	priv->cpu = false;
-	return priv;
-}
-
 struct sna_pixmap *
 sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 {
@@ -2638,8 +2645,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height);
-			list_del(&priv->list);
-			priv->undamaged = false;
 			DBG(("%s: marking as all-damaged for GPU\n",
 			     __FUNCTION__));
 			goto active;
commit 1b6ad7a6ae6820c8f66d1c80613885ee84e7d316
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 08:54:11 2012 +0100

    sna: Only consider large clears as candidates for GPU migration
    
    If we only operating on a small region of the pixmap and have require
    damage migration in the past, we are likely to require migration again
    at some point. So keep track of small damage areas.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ac98d35..fd1abc9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2254,7 +2254,7 @@ static inline bool
 box_inplace(PixmapPtr pixmap, const BoxRec *box)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	return ((box->x2 - box->x1) * (box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 15) >= sna->kgem.half_cpu_cache_pages;
+	return ((int)(box->x2 - box->x1) * (int)(box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 12) >= sna->kgem.half_cpu_cache_pages;
 }
 
 #define PREFER_GPU	0x1
@@ -10381,22 +10381,27 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	 */
 	hint = PREFER_GPU;
 	if (n == 1 && gc->fillStyle != FillStippled && alu_overwrites(gc->alu)) {
+		region.data = NULL;
 		if (priv->cpu_damage &&
 		    region_is_singular(gc->pCompositeClip)) {
-			region.data = NULL;
 			if (region_subsumes_damage(&region, priv->cpu_damage)) {
+				DBG(("%s: discarding existing CPU damage\n", __FUNCTION__));
 				sna_damage_destroy(&priv->cpu_damage);
 				list_del(&priv->list);
 			}
 		}
-		if (priv->cpu_damage == NULL) {
-			sna_damage_all(&priv->gpu_damage,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height);
-			priv->undamaged = false;
-			priv->cpu = false;
+		if (region_subsumes_drawable(&region, &pixmap->drawable) ||
+		    box_inplace(pixmap, &region.extents)) {
+			DBG(("%s: promoting to full GPU\n", __FUNCTION__));
+			if (priv->cpu_damage == NULL) {
+				sna_damage_all(&priv->gpu_damage,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height);
+				priv->undamaged = false;
+				priv->cpu = false;
+			}
+			hint |= IGNORE_CPU;
 		}
-		hint |= IGNORE_CPU;
 	}
 
 	bo = sna_drawable_use_bo(draw, hint, &region.extents, &damage);
commit d8a75538ea1d2a79d6282b0e2dfd73cfdea1a480
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 08:38:39 2012 +0100

    sna: PadPixmap only writes to the out-of-bounds bits
    
    So we only need to delcare it as reading the source pixmap and not mark
    it as damaged.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cb99e6a..ac98d35 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2735,7 +2735,7 @@ static bool must_check sna_validate_pixmap(DrawablePtr draw, PixmapPtr pixmap)
 	    FbEvenTile(pixmap->drawable.width *
 		       pixmap->drawable.bitsPerPixel)) {
 		DBG(("%s: flushing pixmap\n", __FUNCTION__));
-		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
 			return false;
 
 		fbPadPixmap(pixmap);
commit eafb454edf188e7dada1ddf886d1e46f0151968d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 08:38:08 2012 +0100

    sna: Rename conflicting symbols with uxa
    
    Reported-by: Christoph Reiter <reiter.christoph at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51887
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/Makefile.am b/src/sna/fb/Makefile.am
index 16f9b28..72d9bbf 100644
--- a/src/sna/fb/Makefile.am
+++ b/src/sna/fb/Makefile.am
@@ -5,6 +5,7 @@ libfb_la_LIBADD = $(PIXMAN_LIBS)
 
 libfb_la_SOURCES = 	\
 	fb.h		\
+	sfb.h		\
 	fbarc.c		\
 	fbarcbits.h	\
 	fbbitmap.c	\
diff --git a/src/sna/fb/fb.h b/src/sna/fb/fb.h
index 7847951..3339236 100644
--- a/src/sna/fb/fb.h
+++ b/src/sna/fb/fb.h
@@ -43,6 +43,8 @@
 #define DBG(x)
 #endif
 
+#include "sfb.h"
+
 #define WRITE(ptr, val) (*(ptr) = (val))
 #define READ(ptr) (*(ptr))
 
diff --git a/src/sna/fb/fbpict.h b/src/sna/fb/fbpict.h
index 6bcee34..1ce09df 100644
--- a/src/sna/fb/fbpict.h
+++ b/src/sna/fb/fbpict.h
@@ -24,6 +24,8 @@
 #ifndef FBPICT_H
 #define FBPICT_H
 
+#include "sfb.h"
+
 extern void
 fbComposite(CARD8 op,
             PicturePtr pSrc,
diff --git a/src/sna/fb/sfb.h b/src/sna/fb/sfb.h
new file mode 100644
index 0000000..a4d9d17
--- /dev/null
+++ b/src/sna/fb/sfb.h
@@ -0,0 +1,40 @@
+/* And rename to avoid symbol clashes with UXA */
+#define fbPolyArc sfbPolyArc
+#define fbBlt sfbBlt
+#define fbBltOne sfbBltOne
+#define fbBltPlane sfbBltPlane
+#define fbCopyNtoN sfbCopyNtoN
+#define fbCopy1toN sfbCopy1toN
+#define fbCopyNto1 sfbCopyNto1
+#define fbCopyArea sfbCopyArea
+#define fbCopyPlane sfbCopyPlane
+#define fbFill sfbFill
+#define fbSolidBoxClipped sfbSolidBoxClipped
+#define fbPolyFillRect sfbPolyFillRect
+#define fbFillSpans sfbFillSpans
+#define fbPadPixmap sfbPadPixmap
+#define fbValidateGC sfbValidateGC
+#define fbGetSpans sfbGetSpans
+#define fbPolyGlyphBlt sfbPolyGlyphBlt
+#define fbImageGlyphBlt sfbImageGlyphBlt
+#define fbPutImage sfbPutImage
+#define fbPuXYtImage sfbPutXYImage
+#define fbGetImage sfbGetImage
+#define fbPolyLine sfbPolyLine
+#define fbFixCoordModePrevious sfbFixCoordModePrevious
+#define fbPolySegment sfbPolySegment
+#define fbBitmapToRegion sfbBitmapToRegion
+#define fbPolyPoint sfbPolyPoint
+#define fbPushImage sfbPushImage
+#define fbPushPixels sfbPushPixels
+#define fbSetSpans sfbSetSpans
+#define fbSegment sfbSegment
+#define fbSegment1 sfbSegment1
+#define fbTransparentSpan sfbTransparentSpan
+#define fbStipple sfbStipple
+#define fbTile sfbTile
+#define fbReplicatePixel sfbReplicatePixel
+
+#define fbComposite sfbComposite
+#define image_from_pict simage_from_pict
+#define free_pixmap_pict sfree_pixmap_pict
commit 0af29175a087cc2e509962f8828790c8f7232611
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 01:24:23 2012 +0100

    sna: Just use a linear scan to find the terminating clip box
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbclip.c b/src/sna/fb/fbclip.c
index 5a8eefa..8d9c4db 100644
--- a/src/sna/fb/fbclip.c
+++ b/src/sna/fb/fbclip.c
@@ -29,7 +29,7 @@
 #include "fbclip.h"
 
 static const BoxRec *
-find_c0(const BoxRec *begin, const BoxRec *end, int16_t y)
+find_clip_row_for_y(const BoxRec *begin, const BoxRec *end, int16_t y)
 {
 	const BoxRec *mid;
 
@@ -45,31 +45,9 @@ find_c0(const BoxRec *begin, const BoxRec *end, int16_t y)
 
 	mid = begin + (end - begin) / 2;
 	if (mid->y2 > y)
-		return find_c0(begin, mid, y);
+		return find_clip_row_for_y(begin, mid, y);
 	else
-		return find_c0(mid, end, y);
-}
-
-static const BoxRec *
-find_c1(const BoxRec *begin, const BoxRec *end, int16_t y)
-{
-	const BoxRec *mid;
-
-	if (end == begin)
-		return end;
-
-	if (end - begin == 1) {
-		if (begin->y1 > y)
-			return begin;
-		else
-			return end;
-	}
-
-	mid = begin + (end - begin) / 2;
-	if (mid->y1 > y)
-		return find_c1(begin, mid, y);
-	else
-		return find_c1(mid, end, y);
+		return find_clip_row_for_y(mid, end, y);
 }
 
 const BoxRec *
@@ -99,14 +77,10 @@ fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end)
 	c1 = c0 + region->data->numRects;
 
 	if (c0->y2 <= box->y1)
-		c0 = find_c0(c0, c1, box->y1);
-	if (c1[-1].y1 >= box->y2)
-		c1 = find_c1(c0, c1, box->y2);
+		c0 = find_clip_row_for_y(c0, c1, box->y1);
 
-	DBG(("%s: c0=(%d, %d),(%d, %d); c1=(%d, %d),(%d, %d)\n",
-	     __FUNCTION__,
-	     c0->x1, c0->y1, c0->x2, c0->y2,
-	     c1[-1].x1, c1[-1].y1, c1[-1].x2, c1[-1].y2));
+	DBG(("%s: c0=(%d, %d),(%d, %d)\n",
+	     __FUNCTION__, c0->x1, c0->y1, c0->x2, c0->y2));
 
 	*end = c1;
 	return c0;
diff --git a/src/sna/fb/fbclip.h b/src/sna/fb/fbclip.h
index 0436c40..feb2d2c 100644
--- a/src/sna/fb/fbclip.h
+++ b/src/sna/fb/fbclip.h
@@ -55,8 +55,14 @@ fbDrawableRun(DrawablePtr d, GCPtr gc, const BoxRec *box,
 	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
 		BoxRec b;
 
-		if (box->x2 <= c->x1 || box->x1 >= c->x2)
+		if (box->x1 >= c->x2)
 			continue;
+		if (box->x2 <= c->x1) {
+			if (box->y2 <= c->y2)
+				break;
+			else
+				continue;
+		}
 
 		b = *box;
 		if (box_intersect(&b, c))
@@ -71,8 +77,14 @@ fbDrawableRunUnclipped(DrawablePtr d, GCPtr gc, const BoxRec *box,
 {
 	const BoxRec *c, *end;
 	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
-		if (box->x2 <= c->x1 || box->x1 >= c->x2)
+		if (box->x1 >= c->x2)
 			continue;
+		if (box->x2 <= c->x1) {
+			if (box->y2 <= c->y2)
+				break;
+			else
+				continue;
+		}
 		func(d, gc, c, data);
 	}
 }
commit 2941a5fe15626730869a48a63bb088e8ae2c0549
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 9 00:20:57 2012 +0100

    sna: Remove the consideration of CPU damage for overwriting FillRect
    
    We consideer a singular FillRect to be a sequence point in the rendering
    commands, that is it is usually used to clear the background as the first
    operation in a drawing sequence. So it is useful to ask if we can move
    the sequence to the GPU at that point.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cd960a1..cb99e6a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2064,7 +2064,7 @@ drawable_gc_flags(DrawablePtr draw, GCPtr gc, bool partial)
 	DBG(("%s: try operating on drawable inplace [hint? %d]\n",
 	     __FUNCTION__, drawable_gc_inplace_hint(draw, gc)));
 
-	return (!partial ?: MOVE_READ) | MOVE_WRITE | MOVE_INPLACE_HINT;
+	return (partial ? MOVE_READ : 0) | MOVE_WRITE | MOVE_INPLACE_HINT;
 }
 
 static bool
@@ -2257,13 +2257,12 @@ box_inplace(PixmapPtr pixmap, const BoxRec *box)
 	return ((box->x2 - box->x1) * (box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 15) >= sna->kgem.half_cpu_cache_pages;
 }
 
-#define PREFER_GPU 1
-#define FORCE_GPU 2
+#define PREFER_GPU	0x1
+#define FORCE_GPU	0x2
+#define IGNORE_CPU	0x4
 
 static inline struct kgem_bo *
-sna_drawable_use_bo(DrawablePtr drawable,
-		    int prefer_gpu,
-		    const BoxRec *box,
+sna_drawable_use_bo(DrawablePtr drawable, unsigned flags, const BoxRec *box,
 		    struct sna_damage ***damage)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
@@ -2272,11 +2271,11 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	int16_t dx, dy;
 	int ret;
 
-	DBG(("%s pixmap=%ld, box=((%d, %d), (%d, %d)), prefer_gpu?=%d...\n",
+	DBG(("%s pixmap=%ld, box=((%d, %d), (%d, %d)), flagss=%x...\n",
 	     __FUNCTION__,
 	     pixmap->drawable.serialNumber,
 	     box->x1, box->y1, box->x2, box->y2,
-	     prefer_gpu));
+	     flags));
 
 	assert_pixmap_damage(pixmap);
 	assert_drawable_contains_box(drawable, box);
@@ -2294,11 +2293,11 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->flush)
-		prefer_gpu |= PREFER_GPU;
-	if (priv->cpu && (prefer_gpu & FORCE_GPU) == 0)
-		prefer_gpu = 0;
+		flags |= PREFER_GPU;
+	if (priv->cpu && (flags & (IGNORE_CPU | FORCE_GPU)) == 0)
+		flags = 0;
 
-	if (!prefer_gpu && (!priv->gpu_bo || !kgem_bo_is_busy(priv->gpu_bo)))
+	if (!flags && (!priv->gpu_bo || !kgem_bo_is_busy(priv->gpu_bo)))
 		goto use_cpu_bo;
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
@@ -2308,9 +2307,9 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 
 	if (priv->gpu_bo == NULL) {
-		unsigned int flags;
+		unsigned int move;
 
-		if ((prefer_gpu & FORCE_GPU) == 0 &&
+		if ((flags & FORCE_GPU) == 0 &&
 		    (priv->create & KGEM_CAN_CREATE_GPU) == 0) {
 			DBG(("%s: untiled, will not force allocation\n",
 			     __FUNCTION__));
@@ -2323,7 +2322,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
-		if (priv->cpu_damage && prefer_gpu == 0) {
+		if (priv->cpu_damage && flags == 0) {
 			DBG(("%s: prefer cpu", __FUNCTION__));
 			goto use_cpu_bo;
 		}
@@ -2334,10 +2333,10 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
-		flags = MOVE_WRITE | MOVE_READ;
-		if (prefer_gpu & FORCE_GPU)
-			flags |= __MOVE_FORCE;
-		if (!sna_pixmap_move_to_gpu(pixmap, flags))
+		move = MOVE_WRITE | MOVE_READ;
+		if (flags & FORCE_GPU)
+			move |= __MOVE_FORCE;
+		if (!sna_pixmap_move_to_gpu(pixmap, move))
 			goto use_cpu_bo;
 
 		DBG(("%s: allocated GPU bo for operation\n", __FUNCTION__));
@@ -2357,7 +2356,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	     region.extents.x2, region.extents.y2));
 
 	if (priv->gpu_damage) {
-		if (!priv->cpu_damage) {
+		if (flags & IGNORE_CPU || !priv->cpu_damage) {
 			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
 							       &region.extents)) {
 				DBG(("%s: region wholly contained within GPU damage\n",
@@ -2384,7 +2383,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		}
 	}
 
-	if (priv->cpu_damage) {
+	if ((flags & IGNORE_CPU) == 0 && priv->cpu_damage) {
 		ret = sna_damage_contains_box(priv->cpu_damage, &region.extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within CPU damage\n",
@@ -2442,7 +2441,7 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
-	if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
+	if (flags == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -10334,7 +10333,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	struct sna_damage **damage;
 	struct kgem_bo *bo;
 	RegionRec region;
-	unsigned flags;
+	unsigned flags, hint;
 	uint32_t color;
 
 	DBG(("%s(n=%d, PlaneMask: %lx (solid %d), solid fill: %d [style=%d, tileIsPixel=%d], alu=%d)\n", __FUNCTION__,
@@ -10380,19 +10379,27 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	/* Clear the cpu damage so that we refresh the GPU status of the
 	 * pixmap upon a redraw after a period of inactivity.
 	 */
-	if (priv->cpu_damage &&
-	    n == 1 && region_is_singular(gc->pCompositeClip) &&
-	    gc->fillStyle != FillStippled && alu_overwrites(gc->alu)) {
-		region.data = NULL;
-		if (region_subsumes_damage(&region, priv->cpu_damage)) {
-			sna_damage_destroy(&priv->cpu_damage);
-			list_del(&priv->list);
+	hint = PREFER_GPU;
+	if (n == 1 && gc->fillStyle != FillStippled && alu_overwrites(gc->alu)) {
+		if (priv->cpu_damage &&
+		    region_is_singular(gc->pCompositeClip)) {
+			region.data = NULL;
+			if (region_subsumes_damage(&region, priv->cpu_damage)) {
+				sna_damage_destroy(&priv->cpu_damage);
+				list_del(&priv->list);
+			}
+		}
+		if (priv->cpu_damage == NULL) {
+			sna_damage_all(&priv->gpu_damage,
+				       pixmap->drawable.width,
+				       pixmap->drawable.height);
 			priv->undamaged = false;
 			priv->cpu = false;
 		}
+		hint |= IGNORE_CPU;
 	}
 
-	bo = sna_drawable_use_bo(draw, PREFER_GPU, &region.extents, &damage);
+	bo = sna_drawable_use_bo(draw, hint, &region.extents, &damage);
 	if (bo == NULL)
 		goto fallback;
 
commit 8be00b6d4767ffc09328d246d02ee75312ad5842
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 8 20:48:40 2012 +0100

    sna: Substitute the reduce clip region for fallback ops
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/fb/fbclip.c b/src/sna/fb/fbclip.c
index 9b33e4a..5a8eefa 100644
--- a/src/sna/fb/fbclip.c
+++ b/src/sna/fb/fbclip.c
@@ -98,8 +98,10 @@ fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end)
 	c0 = (const BoxRec *)region->data + 1;
 	c1 = c0 + region->data->numRects;
 
-	c0 = find_c0(c0, c1, box->y1);
-	c1 = find_c1(c0, c1, box->y2);
+	if (c0->y2 <= box->y1)
+		c0 = find_c0(c0, c1, box->y1);
+	if (c1[-1].y1 >= box->y2)
+		c1 = find_c1(c0, c1, box->y2);
 
 	DBG(("%s: c0=(%d, %d),(%d, %d); c1=(%d, %d),(%d, %d)\n",
 	     __FUNCTION__,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 08260af..cd960a1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2757,7 +2757,8 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc,
 	assert(gc->ops == (GCOps *)&sna_gc_ops);
 	assert(gc->funcs == (GCFuncs *)&sna_gc_funcs);
 
-	sgc->priv = region;
+	sgc->priv = gc->pCompositeClip;
+	gc->pCompositeClip = region;
 	gc->ops = (GCOps *)&sna_gc_ops__cpu;
 	gc->funcs = (GCFuncs *)&sna_gc_funcs__cpu;
 
@@ -2815,6 +2816,7 @@ static void sna_gc_move_to_gpu(GCPtr gc)
 
 	gc->ops = (GCOps *)&sna_gc_ops;
 	gc->funcs = (GCFuncs *)&sna_gc_funcs;
+	gc->pCompositeClip = sna_gc(gc)->priv;
 }
 
 static inline bool clip_box(BoxPtr box, GCPtr gc)
@@ -3545,18 +3547,6 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (w == 0 || h == 0)
 		return;
 
-	if (priv == NULL) {
-		DBG(("%s: fbPutImage, unattached(%d, %d, %d, %d)\n",
-		     __FUNCTION__, x, y, w, h));
-		if (sna_gc_move_to_cpu(gc, drawable, NULL)) {
-			fbPutImage(drawable, gc, depth,
-				   x, y, w, h, left,
-				   format, bits);
-			sna_gc_move_to_gpu(gc);
-		}
-		return;
-	}
-
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
 	region.extents.x1 = x + drawable->x;
@@ -3575,6 +3565,12 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 			return;
 	}
 
+	if (priv == NULL) {
+		DBG(("%s: fallback -- unattached(%d, %d, %d, %d)\n",
+		     __FUNCTION__, x, y, w, h));
+		goto fallback;
+	}
+
 	RegionTranslate(&region, dx, dy);
 
 	if (FORCE_FALLBACK)
@@ -4355,6 +4351,9 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				src_y - dst_y - dst->y + src->y);
 		if (!sna_drawable_move_region_to_cpu(src, &region, MOVE_READ))
 			goto out_gc;
+		RegionTranslate(&region,
+				-(src_x - dst_x - dst->x + src->x),
+				-(src_y - dst_y - dst->y + src->y));
 
 		ret = miDoCopy(src, dst, gc,
 			       src_x, src_y,
commit 0457935a70f6c1ae06f61f9ad0dd1bdc825465fa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 8 20:38:03 2012 +0100

    sna: Review placement hints for fallback operations
    
    Look for those operations that maybe better via the GTT and those that
    are preferred to be in CPU cache. The wonders of multiple layers of
    heuristics.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9ce870d..08260af 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2044,12 +2044,9 @@ inline static bool drawable_gc_inplace_hint(DrawablePtr draw, GCPtr gc)
 	return true;
 }
 
-inline static unsigned drawable_gc_flags(DrawablePtr draw,
-					 GCPtr gc,
-					 bool read)
+inline static unsigned
+drawable_gc_flags(DrawablePtr draw, GCPtr gc, bool partial)
 {
-	unsigned flags;
-
 	assert(sna_gc(gc)->changes == 0);
 
 	if (gc->fillStyle == FillStippled) {
@@ -2059,7 +2056,7 @@ inline static unsigned drawable_gc_flags(DrawablePtr draw,
 	}
 
 	if (fb_gc(gc)->and) {
-		DBG(("%s: read due to rop %d:%x\n",
+		DBG(("%s: read due to rrop %d:%x\n",
 		     __FUNCTION__, gc->alu, (unsigned)fb_gc(gc)->and));
 		return MOVE_READ | MOVE_WRITE;
 	}
@@ -2067,12 +2064,7 @@ inline static unsigned drawable_gc_flags(DrawablePtr draw,
 	DBG(("%s: try operating on drawable inplace [hint? %d]\n",
 	     __FUNCTION__, drawable_gc_inplace_hint(draw, gc)));
 
-	flags = MOVE_WRITE;
-	if (read) {
-		DBG(("%s: partial write\n", __FUNCTION__));
-		flags |= MOVE_READ;
-	}
-	return flags;
+	return (!partial ?: MOVE_READ) | MOVE_WRITE | MOVE_INPLACE_HINT;
 }
 
 static bool
@@ -3633,8 +3625,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc,
-							       true)))
+					     drawable_gc_flags(drawable, gc, false)))
 		goto out_gc;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
@@ -5302,8 +5293,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, n > 1)))
+					     drawable_gc_flags(drawable, gc, n > 1)))
 		goto out_gc;
 
 	DBG(("%s: fbFillSpans\n", __FUNCTION__));
@@ -5343,8 +5333,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, true)))
+					     drawable_gc_flags(drawable, gc, n > 1)))
 		goto out_gc;
 
 	DBG(("%s: fbSetSpans\n", __FUNCTION__));
@@ -6039,8 +6028,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable, gc,
-							       n > 1)))
+					     MOVE_READ | MOVE_WRITE))
 		goto out_gc;
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
@@ -8510,9 +8498,8 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, true)))
-		goto out;
+					     drawable_gc_flags(drawable, gc, true)))
+		goto out_gc;
 
 	DBG(("%s: miPolyRectangle\n", __FUNCTION__));
 	miPolyRectangle(drawable, gc, n, r);
@@ -8691,8 +8678,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
-					     drawable_gc_flags(drawable,
-							       gc, true)))
+					     MOVE_READ | MOVE_WRITE))
 		goto out_gc;
 
 	DBG(("%s -- fbPolyArc\n", __FUNCTION__));
@@ -9039,8 +9025,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, draw, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &data.region,
-					     drawable_gc_flags(draw, gc,
-							       true)))
+					     drawable_gc_flags(draw, gc, true)))
 		goto out_gc;
 
 	DBG(("%s: fallback -- miFillPolygon -> sna_fill_spans__cpu\n",
@@ -10451,8 +10436,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, draw, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &region,
-					     drawable_gc_flags(draw, gc,
-							       n > 1)))
+					     drawable_gc_flags(draw, gc, n > 1)))
 		goto out_gc;
 
 	DBG(("%s: fallback - fbPolyFillRect\n", __FUNCTION__));
@@ -10582,8 +10566,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, draw, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &data.region,
-					     drawable_gc_flags(draw, gc,
-							       true)))
+					     drawable_gc_flags(draw, gc, true)))
 		goto out_gc;
 
 	DBG(("%s: fallback -- miPolyFillArc -> sna_fill_spans__cpu\n",
@@ -11044,8 +11027,7 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     drawable_gc_flags(drawable,
-								       gc, true)))
+						     MOVE_READ | MOVE_WRITE))
 			goto out_gc;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
@@ -11139,7 +11121,7 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     drawable_gc_flags(drawable, gc, true)))
+						     MOVE_READ | MOVE_WRITE))
 			goto out_gc;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
@@ -11242,8 +11224,7 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     drawable_gc_flags(drawable,
-								       gc, n > 1)))
+						     MOVE_READ | MOVE_WRITE))
 			goto out_gc;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -11339,8 +11320,7 @@ force_fallback:
 		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
-						     drawable_gc_flags(drawable,
-								       gc, n > 1)))
+						     MOVE_READ | MOVE_WRITE))
 			goto out_gc;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -11637,8 +11617,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, n > 1)))
+					     MOVE_READ | MOVE_WRITE))
 		goto out_gc;
 
 	DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
@@ -11718,8 +11697,7 @@ fallback:
 	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, true)))
+					     MOVE_READ | MOVE_WRITE))
 		goto out_gc;
 
 	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
@@ -11904,8 +11882,7 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
 	if (!sna_pixmap_move_to_cpu(bitmap, MOVE_READ))
 		goto out_gc;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
-					     drawable_gc_flags(drawable,
-							       gc, false)))
+					     drawable_gc_flags(drawable, gc, false)))
 		goto out_gc;
 
 	DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
commit b7f0b0e7e3f66165b87c46f897de536cf74a9daf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 8 19:59:34 2012 +0100

    sna: Remove function for force-to-gpu
    
    This is now enitrely done in the core move-to-gpu as a special case.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6738400..f62cfbc 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -416,10 +416,6 @@ PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
 PixmapPtr sna_pixmap_create_unattached(ScreenPtr screen,
 				       int width, int height, int depth);
 
-struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags);
-struct sna_pixmap *sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags);
-struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
-
 #define MOVE_WRITE 0x1
 #define MOVE_READ 0x2
 #define MOVE_INPLACE_HINT 0x4
@@ -427,6 +423,15 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_SOURCE_HINT 0x10
 #define MOVE_WHOLE_HINT 0x20
 #define __MOVE_FORCE 0x40
+
+struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags);
+static inline struct sna_pixmap *
+sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
+{
+	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
+	DBG(("%s(pixmap=%p, flags=%x)\n", __FUNCTION__, pixmap, flags));
+	return sna_pixmap_move_to_gpu(pixmap, flags | __MOVE_FORCE);
+}
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
@@ -450,6 +455,8 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 	return sna_pixmap_move_to_gpu(get_drawable_pixmap(drawable), flags) != NULL;
 }
 
+struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
+
 static inline bool
 sna_drawable_is_clear(DrawablePtr d)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 681b283..9ce870d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1218,7 +1218,8 @@ skip_inplace_map:
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
-		DBG(("%s: operate inplace\n", __FUNCTION__));
+		DBG(("%s: try to operate inplace\n", __FUNCTION__));
+		assert(priv->cpu == false);
 
 		pixmap->devPrivate.ptr =
 			kgem_bo_map(&sna->kgem, priv->gpu_bo);
@@ -1233,10 +1234,10 @@ skip_inplace_map:
 				list_del(&priv->list);
 				priv->undamaged = false;
 				priv->clear = false;
-				priv->cpu = false;
 			}
 
 			assert_pixmap_damage(pixmap);
+			DBG(("%s: operate inplace\n", __FUNCTION__));
 			return true;
 		}
 
@@ -1647,7 +1648,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	    region_inplace(sna, pixmap, region, priv, (flags & MOVE_READ) == 0)) {
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
-		DBG(("%s: operate inplace\n", __FUNCTION__));
+		DBG(("%s: try to operate inplace\n", __FUNCTION__));
 
 		pixmap->devPrivate.ptr =
 			kgem_bo_map(&sna->kgem, priv->gpu_bo);
@@ -1674,6 +1675,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			priv->cpu = false;
 			if (dx | dy)
 				RegionTranslate(region, -dx, -dy);
+			DBG(("%s: operate inplace\n", __FUNCTION__));
 			return true;
 		}
 
@@ -2570,32 +2572,6 @@ sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 }
 
 struct sna_pixmap *
-sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
-{
-	struct sna_pixmap *priv;
-
-	DBG(("%s(pixmap=%p)\n", __FUNCTION__, pixmap));
-
-	priv = sna_pixmap(pixmap);
-	if (priv == NULL)
-		return NULL;
-
-	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
-		DBG(("%s: GPU all-damaged\n", __FUNCTION__));
-		assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
-		return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
-	}
-
-	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
-	if (!sna_pixmap_move_to_gpu(pixmap, flags | __MOVE_FORCE))
-		return NULL;
-
-	assert(!priv->cpu);
-
-	return priv;
-}
-
-struct sna_pixmap *
 sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -2679,15 +2655,15 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		}
 	}
 
-	if (priv->cpu_damage == NULL)
-		goto done;
-
 	if (priv->gpu_bo->proxy) {
 		DBG(("%s: reusing cached upload\n", __FUNCTION__));
 		assert((flags & MOVE_WRITE) == 0);
-		goto done;
+		return priv;
 	}
 
+	if (priv->cpu_damage == NULL)
+		goto done;
+
 	if (priv->mapped) {
 		assert(priv->stride);
 		pixmap->devPrivate.ptr = priv->ptr;
@@ -12440,6 +12416,7 @@ static void sna_accel_flush(struct sna *sna)
 	if (priv) {
 		sna_pixmap_force_to_gpu(priv->pixmap, MOVE_READ);
 		kgem_bo_flush(&sna->kgem, priv->gpu_bo);
+		assert(!priv->cpu);
 	}
 
 	sna_mode_redisplay(sna);
commit bb8770158c08394c2de79e0ca1c1b3112e17dd23
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 8 18:53:25 2012 +0100

    sna: Apply the clear color when resizing the front buffer
    
    If the existing front buffer is clear, just apply the clear color to
    then new buffer rather than copy the old one across.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 2d8b0f3..12a6bac 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2159,17 +2159,28 @@ static void copy_front(struct sna *sna, PixmapPtr old, PixmapPtr new)
 	DBG(("%s: copying box (%dx%d) from (%d, %d) to (%d, %d)\n",
 	     __FUNCTION__, box.x2, box.y2, sx, sy, dx, dy));
 
-	if (box.x2 != new->drawable.width || box.y2 != new->drawable.height) {
-		(void)sna->render.fill_one(sna, new, new_priv->gpu_bo, 0,
+	if (old_priv->clear) {
+		(void)sna->render.fill_one(sna, new, new_priv->gpu_bo,
+					   old_priv->clear_color,
 					   0, 0,
 					   new->drawable.width,
 					   new->drawable.height,
-					   GXclear);
+					   GXcopy);
+		new_priv->clear = true;
+		new_priv->clear_color = old_priv->clear_color;
+	} else {
+		if (box.x2 != new->drawable.width || box.y2 != new->drawable.height) {
+			(void)sna->render.fill_one(sna, new, new_priv->gpu_bo, 0,
+						   0, 0,
+						   new->drawable.width,
+						   new->drawable.height,
+						   GXclear);
+		}
+		(void)sna->render.copy_boxes(sna, GXcopy,
+					     old, old_priv->gpu_bo, sx, sy,
+					     new, new_priv->gpu_bo, dx, dy,
+					     &box, 1, 0);
 	}
-	(void)sna->render.copy_boxes(sna, GXcopy,
-				     old, old_priv->gpu_bo, sx, sy,
-				     new, new_priv->gpu_bo, dx, dy,
-				     &box, 1, 0);
 
 	if (!DAMAGE_IS_ALL(new_priv->gpu_damage))
 		sna_damage_all(&new_priv->gpu_damage,
commit 8dd14855d75240501aa2d089bcdfa46e7badbf18
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 8 15:46:41 2012 +0100

    sna/dri: Review stale comments
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index e520394..75c66b7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -869,10 +869,6 @@ sna_dri_frame_event_info_free(struct sna *sna,
 	free(info);
 }
 
-/*
- * Our internal swap routine takes care of actually exchanging, blitting, or
- * flipping buffers as necessary.
- */
 static Bool
 sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 {
@@ -1402,7 +1398,6 @@ sna_dri_page_flip_handler(struct sna *sna,
 
 	/* Is this the event whose info shall be delivered to higher level? */
 	if (event->user_data & 1) {
-		/* Yes: Cache msc, ust for later delivery. */
 		info->fe_frame = event->sequence;
 		info->fe_tv_sec = event->tv_sec;
 		info->fe_tv_usec = event->tv_usec;
@@ -1433,7 +1428,6 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	VG_CLEAR(vbl);
 
-	/* XXX In theory we can just exchange pixmaps.... */
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1)
 		return FALSE;
@@ -1562,14 +1556,15 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			vbl.request.sequence = current_msc - current_msc % divisor + remainder;
 
 			/*
-			 * If the calculated deadline vbl.request.sequence is smaller than
-			 * or equal to current_msc, it means we've passed the last point
-			 * when effective onset frame seq could satisfy
-			 * seq % divisor == remainder, so we need to wait for the next time
-			 * this will happen.
+			 * If the calculated deadline vbl.request.sequence is
+			 * smaller than or equal to current_msc, it means
+			 * we've passed the last point when effective onset
+			 * frame seq could satisfy *seq % divisor == remainder,
+			 * so we need to wait for the next time this will
+			 * happen.
 			 *
-			 * This comparison takes the 1 frame swap delay in pageflipping mode
-			 * into account.
+			 * This comparison takes the 1 frame swap delay
+			 * in pageflipping mode into account.
 			 */
 			if (vbl.request.sequence <= current_msc)
 				vbl.request.sequence += divisor;
@@ -1599,7 +1594,8 @@ sna_dri_immediate_xchg(struct sna *sna,
 {
 	drmVBlank vbl;
 
-	DBG(("%s: emitting immediate exchange, throttling client\n", __FUNCTION__));
+	DBG(("%s: emitting immediate exchange, throttling client\n",
+	     __FUNCTION__));
 	VG_CLEAR(vbl);
 
 	if ((sna->flags & SNA_NO_WAIT) == 0) {
@@ -1794,7 +1790,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	 * the swap.
 	 */
 	if (current_msc < *target_msc) {
-		DBG(("%s: waiting for swap: current=%d, target=%d,  divisor=%d\n",
+		DBG(("%s: waiting for swap: current=%d, target=%d, divisor=%d\n",
 		     __FUNCTION__,
 		     (int)current_msc,
 		     (int)*target_msc,
@@ -2097,7 +2093,8 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 
 	/*
 	 * If we get here, target_msc has already passed or we don't have one,
-	 * so we queue an event that will satisfy the divisor/remainder equation.
+	 * so we queue an event that will satisfy the divisor/remainder
+	 * equation.
 	 */
 	vbl.request.type =
 		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
commit 3bb7a530e77a5c6e8bae0fb14c570feafe37c2bd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 20:53:27 2012 +0100

    sna/dri: Fix cross-chaining of pageflip vs vblank
    
    And double-check that the drawable is still flippable before completing
    the delay exchange.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 8346554..e520394 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1083,9 +1083,17 @@ static void chain_swap(struct sna *sna,
 	int type;
 
 	assert(chain == sna_dri_window_get_chain((WindowPtr)draw));
+	DBG(("%s: chaining type=%d\n", __FUNCTION__, chain->type));
+	switch (chain->type) {
+	case DRI2_XCHG_THROTTLE:
+	case DRI2_SWAP_THROTTLE:
+		break;
+	default:
+		return;
+	}
 
-	/* In theory, it shoudln't be possible for cross-chaining to occur! */
-	if (chain->type == DRI2_XCHG_THROTTLE) {
+	if (chain->type == DRI2_XCHG_THROTTLE &&
+	    can_exchange(sna, draw, chain->front, chain->back)) {
 		DBG(("%s: performing chained exchange\n", __FUNCTION__));
 		sna_dri_exchange_buffers(draw, chain->front, chain->back);
 		type = DRI2_EXCHANGE_COMPLETE;
commit 1454df8caa51fff9810a9b792d17aa82b247a4ae
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 19:59:50 2012 +0100

    sna/dri: Use draw ref directly
    
    As we hook into the DestroyWindow notification, we can reliably use the
    original Drawable reference and avoid the secondary object lookups.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 07c95fb..8346554 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -73,7 +73,7 @@ enum frame_event_type {
 };
 
 struct sna_dri_frame_event {
-	XID drawable_id;
+	DrawablePtr draw;
 	ClientPtr client;
 	enum frame_event_type type;
 	unsigned frame;
@@ -817,7 +817,7 @@ void sna_dri_destroy_window(WindowPtr win)
 
 	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.serialNumber));
 	while (chain) {
-		chain->drawable_id = None;
+		chain->draw = NULL;
 		chain = chain->chain;
 	}
 }
@@ -849,9 +849,6 @@ sna_dri_frame_event_info_free(struct sna *sna,
 			      DrawablePtr draw,
 			      struct sna_dri_frame_event *info)
 {
-	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
-	     info, info->client, (long)info->drawable_id));
-
 	if (draw && draw->type == DRAWABLE_WINDOW)
 		sna_dri_remove_frame_event((WindowPtr)draw, info);
 	_sna_dri_destroy_buffer(sna, info->front);
@@ -1148,19 +1145,12 @@ static bool sna_dri_blit_complete(struct sna *sna,
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 {
 	struct sna_dri_frame_event *info = (void *)(uintptr_t)event->user_data;
-	DrawablePtr draw = NULL;
-	int status;
-
-	DBG(("%s(id=%d, type=%d)\n", __FUNCTION__,
-	     (int)info->drawable_id, info->type));
-
-	status = BadDrawable;
-	if (info->drawable_id)
-		status = dixLookupDrawable(&draw,
-					   info->drawable_id,
-					   serverClient,
-					   M_ANY, DixWriteAccess);
-	if (status != Success)
+	DrawablePtr draw;
+
+	DBG(("%s(type=%d)\n", __FUNCTION__, info->type));
+
+	draw = info->draw;
+	if (draw == NULL)
 		goto done;
 
 	switch (info->type) {
@@ -1268,8 +1258,7 @@ sna_dri_flip_continue(struct sna *sna,
 static void sna_dri_flip_event(struct sna *sna,
 			       struct sna_dri_frame_event *flip)
 {
-	DrawablePtr draw = NULL;
-	int status;
+	DrawablePtr draw;
 
 	DBG(("%s(frame=%d, tv=%d.%06d, type=%d)\n",
 	     __FUNCTION__,
@@ -1281,13 +1270,8 @@ static void sna_dri_flip_event(struct sna *sna,
 	if (sna->dri.flip_pending == flip)
 		sna->dri.flip_pending = NULL;
 
-	status = BadDrawable;
-	if (flip->drawable_id)
-		status = dixLookupDrawable(&draw,
-					   flip->drawable_id,
-					   serverClient,
-					   M_ANY, DixWriteAccess);
-	if (status != Success) {
+	draw = flip->draw;
+	if (draw == NULL) {
 		DBG(("%s: drawable already gone\n", __FUNCTION__));
 		sna_dri_frame_event_info_free(sna, draw, flip);
 		return;
@@ -1457,7 +1441,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		info = sna->dri.flip_pending;
 		if (info) {
-			if (info->drawable_id == draw->id) {
+			if (info->draw == draw) {
 				DBG(("%s: chaining flip\n", __FUNCTION__));
 				info->next_front.name = 1;
 				return TRUE;
@@ -1478,7 +1462,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		info->type = type;
 
-		info->drawable_id = draw->id;
+		info->draw = draw;
 		info->client = client;
 		info->event_complete = func;
 		info->event_data = data;
@@ -1518,7 +1502,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (info == NULL)
 			return FALSE;
 
-		info->drawable_id = draw->id;
+		info->draw = draw;
 		info->client = client;
 		info->event_complete = func;
 		info->event_data = data;
@@ -1767,7 +1751,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	if (!info)
 		goto blit_fallback;
 
-	info->drawable_id = draw->id;
+	info->draw = draw;
 	info->client = client;
 	info->event_complete = func;
 	info->event_data = data;
@@ -2078,7 +2062,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (!info)
 		goto out_complete;
 
-	info->drawable_id = draw->id;
+	info->draw = draw;
 	info->client = client;
 	info->type = DRI2_WAITMSC;
 	sna_dri_add_frame_event(draw, info);
commit 9abb6c09bdb9831db5f0626eed70663a2dc39fc6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 18:43:06 2012 +0100

    sna/dri: Remove dead code for 'old_fb'
    
    The member still exists but is never set and is unused.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 5f3ca76..6738400 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -308,8 +308,7 @@ extern int sna_output_dpms_status(xf86OutputPtr output);
 extern int sna_page_flip(struct sna *sna,
 			 struct kgem_bo *bo,
 			 void *data,
-			 int ref_crtc_hw_id,
-			 uint32_t *old_fb);
+			 int ref_crtc_hw_id);
 
 constant static inline struct sna *
 to_sna(ScrnInfoPtr scrn)
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 7c1d3bc..2d8b0f3 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2304,8 +2304,7 @@ int
 sna_page_flip(struct sna *sna,
 	      struct kgem_bo *bo,
 	      void *data,
-	      int ref_crtc_hw_id,
-	      uint32_t *old_fb)
+	      int ref_crtc_hw_id)
 {
 	int count;
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 50ee087..07c95fb 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -97,7 +97,6 @@ struct sna_dri_frame_event {
 		struct kgem_bo *bo;
 		uint32_t name;
 	} old_front, next_front, cache;
-	uint32_t old_fb;
 
 	int off_delay;
 };
@@ -884,9 +883,7 @@ sna_dri_page_flip(struct sna *sna, struct sna_dri_frame_event *info)
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	info->count = sna_page_flip(sna, bo,
-				    info, info->pipe,
-				    &info->old_fb);
+	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
 		return FALSE;
 
@@ -1246,7 +1243,7 @@ sna_dri_flip_continue(struct sna *sna,
 	bo = get_private(info->back)->bo;
 	assert(get_drawable_pixmap(draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
 
-	info->count = sna_page_flip(sna, bo, info, info->pipe, &info->old_fb);
+	info->count = sna_page_flip(sna, bo, info, info->pipe);
 	if (info->count == 0)
 		return FALSE;
 
@@ -1368,8 +1365,7 @@ static void sna_dri_flip_event(struct sna *sna,
 
 			flip->count = sna_page_flip(sna,
 						    get_private(flip->front)->bo,
-						    flip, flip->pipe,
-						    &flip->old_fb);
+						    flip, flip->pipe);
 			if (flip->count == 0)
 				goto finish_async_flip;
 
@@ -1382,8 +1378,7 @@ static void sna_dri_flip_event(struct sna *sna,
 			/* Just queue a no-op flip to trigger another event */
 			flip->count = sna_page_flip(sna,
 						    get_private(flip->front)->bo,
-						    flip, flip->pipe,
-						    &flip->old_fb);
+						    flip, flip->pipe);
 			if (flip->count == 0)
 				goto finish_async_flip;
 		} else {
commit ad877abdc70b842afd202b2f97892d97a0b8d151
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 17:48:52 2012 +0100

    sna/dri: Attach the chain of frame events to the Window, not pixmap
    
    So that we can have multiple clients swapping in separate windows
    concurrently.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6994fba..50ee087 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -63,6 +63,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 enum frame_event_type {
 	DRI2_SWAP,
+	DRI2_SWAP_WAIT,
 	DRI2_SWAP_THROTTLE,
 	DRI2_XCHG_THROTTLE,
 	DRI2_ASYNC_FLIP,
@@ -79,8 +80,6 @@ struct sna_dri_frame_event {
 	int pipe;
 	int count;
 
-	struct list drawable_events;
-
 	/* for swaps & flips only */
 	DRI2SwapEventPtr event_complete;
 	void *event_data;
@@ -107,7 +106,6 @@ struct sna_dri_private {
 	int refcnt;
 	PixmapPtr pixmap;
 	struct kgem_bo *bo;
-	struct sna_dri_frame_event *chain;
 };
 
 static inline struct sna_dri_frame_event *
@@ -131,30 +129,30 @@ static inline struct kgem_bo *ref(struct kgem_bo *bo)
 /* Prefer to enable TILING_Y if this buffer will never be a
  * candidate for pageflipping
  */
-static uint32_t color_tiling(struct sna *sna, DrawablePtr drawable)
+static uint32_t color_tiling(struct sna *sna, DrawablePtr draw)
 {
 	uint32_t tiling;
 
 	if (COLOR_PREFER_TILING_Y &&
-	    (drawable->width  != sna->front->drawable.width ||
-	     drawable->height != sna->front->drawable.height))
+	    (draw->width  != sna->front->drawable.width ||
+	     draw->height != sna->front->drawable.height))
 		tiling = I915_TILING_Y;
 	else
 		tiling = I915_TILING_X;
 
 	return kgem_choose_tiling(&sna->kgem, -tiling,
-				  drawable->width,
-				  drawable->height,
-				  drawable->bitsPerPixel);
+				  draw->width,
+				  draw->height,
+				  draw->bitsPerPixel);
 }
 
-static uint32_t other_tiling(struct sna *sna, DrawablePtr drawable)
+static uint32_t other_tiling(struct sna *sna, DrawablePtr draw)
 {
 	/* XXX Can mix color X / depth Y? */
 	return kgem_choose_tiling(&sna->kgem, -I915_TILING_Y,
-				  drawable->width,
-				  drawable->height,
-				  drawable->bitsPerPixel);
+				  draw->width,
+				  draw->height,
+				  draw->bitsPerPixel);
 }
 
 static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
@@ -200,11 +198,11 @@ static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
 }
 
 static DRI2Buffer2Ptr
-sna_dri_create_buffer(DrawablePtr drawable,
+sna_dri_create_buffer(DrawablePtr draw,
 		      unsigned int attachment,
 		      unsigned int format)
 {
-	struct sna *sna = to_sna_from_drawable(drawable);
+	struct sna *sna = to_sna_from_drawable(draw);
 	DRI2Buffer2Ptr buffer;
 	struct sna_dri_private *private;
 	PixmapPtr pixmap;
@@ -212,13 +210,12 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	int bpp;
 
 	DBG(("%s(attachment=%d, format=%d, drawable=%dx%d)\n",
-	     __FUNCTION__, attachment, format,
-	     drawable->width, drawable->height));
+	     __FUNCTION__, attachment, format, draw->width, draw->height));
 
 	pixmap = NULL;
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
-		pixmap = get_drawable_pixmap(drawable);
+		pixmap = get_drawable_pixmap(draw);
 		buffer = sna_pixmap_get_buffer(pixmap);
 		if (buffer) {
 			DBG(("%s: reusing front buffer attachment\n",
@@ -248,12 +245,12 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	case DRI2BufferFrontRight:
 	case DRI2BufferFakeFrontLeft:
 	case DRI2BufferFakeFrontRight:
-		bpp = drawable->bitsPerPixel;
+		bpp = draw->bitsPerPixel;
 		bo = kgem_create_2d(&sna->kgem,
-				    drawable->width,
-				    drawable->height,
-				    drawable->bitsPerPixel,
-				    color_tiling(sna, drawable),
+				    draw->width,
+				    draw->height,
+				    draw->bitsPerPixel,
+				    color_tiling(sna, draw),
 				    CREATE_EXACT);
 		break;
 
@@ -280,11 +277,11 @@ sna_dri_create_buffer(DrawablePtr drawable,
 		 * not understand W tiling and the GTT is incapable of
 		 * W fencing.
 		 */
-		bpp = format ? format : drawable->bitsPerPixel;
+		bpp = format ? format : draw->bitsPerPixel;
 		bpp *= 2;
 		bo = kgem_create_2d(&sna->kgem,
-				    ALIGN(drawable->width, 64),
-				    ALIGN((drawable->height + 1) / 2, 64),
+				    ALIGN(draw->width, 64),
+				    ALIGN((draw->height + 1) / 2, 64),
 				    bpp, I915_TILING_NONE, CREATE_EXACT);
 		break;
 
@@ -292,10 +289,10 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	case DRI2BufferDepthStencil:
 	case DRI2BufferHiz:
 	case DRI2BufferAccum:
-		bpp = format ? format : drawable->bitsPerPixel,
+		bpp = format ? format : draw->bitsPerPixel,
 		bo = kgem_create_2d(&sna->kgem,
-				    drawable->width, drawable->height, bpp,
-				    other_tiling(sna, drawable),
+				    draw->width, draw->height, bpp,
+				    other_tiling(sna, draw),
 				    CREATE_EXACT);
 		break;
 
@@ -373,9 +370,9 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 	}
 }
 
-static void sna_dri_destroy_buffer(DrawablePtr drawable, DRI2Buffer2Ptr buffer)
+static void sna_dri_destroy_buffer(DrawablePtr draw, DRI2Buffer2Ptr buffer)
 {
-	_sna_dri_destroy_buffer(to_sna_from_drawable(drawable), buffer);
+	_sna_dri_destroy_buffer(to_sna_from_drawable(draw), buffer);
 }
 
 static void sna_dri_reference_buffer(DRI2Buffer2Ptr buffer)
@@ -773,95 +770,102 @@ sna_dri_get_pipe(DrawablePtr pDraw)
 	return pipe;
 }
 
-static struct list *
-sna_dri_get_window_events(WindowPtr win)
+static struct sna_dri_frame_event *
+sna_dri_window_get_chain(WindowPtr win)
 {
-	struct list *head;
+	return ((void **)win->devPrivates)[1];
+}
 
-	head = ((void **)win->devPrivates)[1];
-	if (head)
-		return head;
+static void
+sna_dri_window_set_chain(WindowPtr win,
+			 struct sna_dri_frame_event *chain)
+{
+	DBG(("%s: head now %p\n", __FUNCTION__, chain));
+	assert(win->drawable.type == DRAWABLE_WINDOW);
+	((void **)win->devPrivates)[1] = chain;
+}
 
-	head = malloc(sizeof(*head));
-	if (head == NULL)
-		return NULL;
+static void
+sna_dri_remove_frame_event(WindowPtr win,
+			    struct sna_dri_frame_event *info)
+{
+	struct sna_dri_frame_event *chain;
+
+	DBG(("%s: remove[%p] from window %ld)\n",
+	     __FUNCTION__, info, (long)win->drawable.id));
+
+	chain = sna_dri_window_get_chain(win);
+	if (chain == NULL)
+		return;
 
-	list_init(head);
-	((void **)win->devPrivates)[1] = head;
-	return head;
+	if (chain == info) {
+		sna_dri_window_set_chain(win, info->chain);
+		return;
+	}
+
+	while (chain->chain != info)
+		chain = chain->chain;
+	chain->chain = info->chain;
 }
 
 void sna_dri_destroy_window(WindowPtr win)
 {
-	struct list *head = ((void **)win->devPrivates)[1];
+	struct sna_dri_frame_event *chain;
 
-	if (head == NULL)
+	chain = sna_dri_window_get_chain(win);
+	if (chain == NULL)
 		return;
 
 	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.serialNumber));
-
-	while (!list_is_empty(head)) {
-		struct sna_dri_frame_event *info =
-			list_first_entry(head,
-					 struct sna_dri_frame_event,
-					 drawable_events);
-
-		DBG(("%s: marking drawable gone [%p]: %ld\n",
-		     __FUNCTION__, info, (long)info->drawable_id));
-
-		list_del(&info->drawable_events);
-		info->drawable_id = None;
+	while (chain) {
+		chain->drawable_id = None;
+		chain = chain->chain;
 	}
-	free(head);
 }
 
-static bool
+static void
 sna_dri_add_frame_event(DrawablePtr draw, struct sna_dri_frame_event *info)
 {
-	struct list *head;
+	struct sna_dri_frame_event *chain;
 
 	if (draw->type != DRAWABLE_WINDOW)
-		return true;
-
-	head = sna_dri_get_window_events((WindowPtr)draw);
-	if (head == NULL) {
-		DBG(("%s: failed to get drawable events\n", __FUNCTION__));
-		return false;
-	}
-
-	list_add(&info->drawable_events, head);
+		return;
 
 	DBG(("%s: add[%p] to window %ld)\n",
 	     __FUNCTION__, info, (long)draw->id));
-	return true;
-}
 
-static void
-sna_dri_frame_event_release_bo(struct kgem *kgem, struct kgem_bo *bo)
-{
-	kgem_bo_destroy(kgem, bo);
+	chain = sna_dri_window_get_chain((WindowPtr)draw);
+	if (chain == NULL) {
+		sna_dri_window_set_chain((WindowPtr)draw, info);
+		return;
+	}
+
+	while (chain->chain != NULL)
+		chain = chain->chain;
+	chain->chain = info;
 }
 
 static void
 sna_dri_frame_event_info_free(struct sna *sna,
+			      DrawablePtr draw,
 			      struct sna_dri_frame_event *info)
 {
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
 	     info, info->client, (long)info->drawable_id));
 
-	list_del(&info->drawable_events);
-
+	if (draw && draw->type == DRAWABLE_WINDOW)
+		sna_dri_remove_frame_event((WindowPtr)draw, info);
 	_sna_dri_destroy_buffer(sna, info->front);
 	_sna_dri_destroy_buffer(sna, info->back);
 
 	if (info->old_front.bo)
-		sna_dri_frame_event_release_bo(&sna->kgem, info->old_front.bo);
+		kgem_bo_destroy(&sna->kgem, info->old_front.bo);
 
 	if (info->next_front.bo)
-		sna_dri_frame_event_release_bo(&sna->kgem, info->next_front.bo);
+		kgem_bo_destroy(&sna->kgem, info->next_front.bo);
 
 	if (info->cache.bo)
-		sna_dri_frame_event_release_bo(&sna->kgem, info->cache.bo);
+		kgem_bo_destroy(&sna->kgem, info->cache.bo);
 
 	if (info->bo)
 		kgem_bo_destroy(&sna->kgem, info->bo);
@@ -1084,6 +1088,8 @@ static void chain_swap(struct sna *sna,
 	drmVBlank vbl;
 	int type;
 
+	assert(chain == sna_dri_window_get_chain((WindowPtr)draw));
+
 	/* In theory, it shoudln't be possible for cross-chaining to occur! */
 	if (chain->type == DRI2_XCHG_THROTTLE) {
 		DBG(("%s: performing chained exchange\n", __FUNCTION__));
@@ -1095,7 +1101,7 @@ static void chain_swap(struct sna *sna,
 		chain->bo = sna_dri_copy_to_front(sna, draw, NULL,
 						  get_private(chain->front)->bo,
 						  get_private(chain->back)->bo,
-						 true);
+						  true);
 
 		type = DRI2_BLIT_COMPLETE;
 	}
@@ -1113,13 +1119,39 @@ static void chain_swap(struct sna *sna,
 	vbl.request.sequence = 0;
 	vbl.request.signal = (unsigned long)chain;
 	if (sna_wait_vblank(sna, &vbl))
-		sna_dri_frame_event_info_free(sna, chain);
+		sna_dri_frame_event_info_free(sna, draw, chain);
+}
+
+static bool sna_dri_blit_complete(struct sna *sna,
+				  struct sna_dri_frame_event *info)
+{
+	if (info->bo && kgem_bo_is_busy(info->bo)) {
+		kgem_retire(&sna->kgem);
+		if (kgem_bo_is_busy(info->bo)) {
+			drmVBlank vbl;
+
+			DBG(("%s: vsync'ed blit is still busy, postponing\n",
+			     __FUNCTION__));
+
+			VG_CLEAR(vbl);
+			vbl.request.type =
+				DRM_VBLANK_RELATIVE |
+				DRM_VBLANK_EVENT |
+				pipe_select(info->pipe);
+			vbl.request.sequence = 1;
+			vbl.request.signal = (unsigned long)info;
+			if (!sna_wait_vblank(sna, &vbl))
+				return false;
+		}
+	}
+
+	return true;
 }
 
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 {
 	struct sna_dri_frame_event *info = (void *)(uintptr_t)event->user_data;
-	DrawablePtr draw;
+	DrawablePtr draw = NULL;
 	int status;
 
 	DBG(("%s(id=%d, type=%d)\n", __FUNCTION__,
@@ -1150,81 +1182,38 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 						 get_private(info->front)->bo,
 						 get_private(info->back)->bo,
 						 true);
-		info->type = DRI2_SWAP_THROTTLE;
+		info->type = DRI2_SWAP_WAIT;
 		/* fall through to SwapComplete */
+	case DRI2_SWAP_WAIT:
+		if (!sna_dri_blit_complete(sna, info))
+			return;
+
+		DRI2SwapComplete(info->client,
+				 draw, event->sequence,
+				 event->tv_sec, event->tv_usec,
+				 DRI2_BLIT_COMPLETE,
+				 info->client ? info->event_complete : NULL,
+				 info->event_data);
+		break;
+
 	case DRI2_SWAP_THROTTLE:
+		if (!sna_dri_blit_complete(sna, info))
+			return;
+
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
 		     __FUNCTION__, info->type,
 		     event->sequence, event->tv_sec, event->tv_usec));
-
-		if (info->bo && kgem_bo_is_busy(info->bo)) {
-			kgem_retire(&sna->kgem);
-			if (kgem_bo_is_busy(info->bo)) {
-				drmVBlank vbl;
-
-				DBG(("%s: vsync'ed blit is still busy, postponing\n",
-				     __FUNCTION__));
-
-				VG_CLEAR(vbl);
-				vbl.request.type =
-					DRM_VBLANK_RELATIVE |
-					DRM_VBLANK_EVENT |
-					pipe_select(info->pipe);
-				vbl.request.sequence = 1;
-				vbl.request.signal = (unsigned long)info;
-				if (!sna_wait_vblank(sna, &vbl))
-					return;
-			}
-		}
-
-		if (info->chain) {
-			struct sna_dri_frame_event *chain = info->chain;
-
-			assert(get_private(info->front)->chain == info);
-			get_private(info->front)->chain = chain;
-
-			chain_swap(sna, draw, event, chain);
-
-			info->chain = NULL;
-		} else if (get_private(info->front)->chain == info) {
-			DBG(("%s: chain complete\n", __FUNCTION__));
-			get_private(info->front)->chain = NULL;
-		} else {
-			DBG(("%s: deferred blit complete, unblock client\n",
-			     __FUNCTION__));
-			DRI2SwapComplete(info->client,
-					 draw, event->sequence,
-					 event->tv_sec, event->tv_usec,
-					 DRI2_BLIT_COMPLETE,
-					 info->client ? info->event_complete : NULL,
-					 info->event_data);
-		}
 		break;
 
 	case DRI2_XCHG_THROTTLE:
 		DBG(("%s: xchg throttle\n", __FUNCTION__));
-
-		if (info->chain) {
-			struct sna_dri_frame_event *chain = info->chain;
-
-			assert(get_private(info->front)->chain == info);
-			get_private(info->front)->chain = chain;
-
-			chain_swap(sna, draw, event, chain);
-
-			info->chain = NULL;
-		} else {
-			DBG(("%s: chain complete\n", __FUNCTION__));
-			get_private(info->front)->chain = NULL;
-		}
 		break;
 
 	case DRI2_WAITMSC:
-		if (info->client)
-			DRI2WaitMSCComplete(info->client, draw,
-					    event->sequence,
-					    event->tv_sec,
-					    event->tv_usec);
+		DRI2WaitMSCComplete(info->client, draw,
+				    event->sequence,
+				    event->tv_sec,
+				    event->tv_usec);
 		break;
 	default:
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
@@ -1233,8 +1222,14 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 		break;
 	}
 
+	if (info->chain) {
+		sna_dri_remove_frame_event((WindowPtr)draw, info);
+		chain_swap(sna, draw, event, info->chain);
+		draw = NULL;
+	}
+
 done:
-	sna_dri_frame_event_info_free(sna, info);
+	sna_dri_frame_event_info_free(sna, draw, info);
 }
 
 static int
@@ -1276,7 +1271,8 @@ sna_dri_flip_continue(struct sna *sna,
 static void sna_dri_flip_event(struct sna *sna,
 			       struct sna_dri_frame_event *flip)
 {
-	DrawablePtr drawable;
+	DrawablePtr draw = NULL;
+	int status;
 
 	DBG(("%s(frame=%d, tv=%d.%06d, type=%d)\n",
 	     __FUNCTION__,
@@ -1285,80 +1281,74 @@ static void sna_dri_flip_event(struct sna *sna,
 	     flip->fe_tv_usec,
 	     flip->type));
 
+	if (sna->dri.flip_pending == flip)
+		sna->dri.flip_pending = NULL;
+
+	status = BadDrawable;
+	if (flip->drawable_id)
+		status = dixLookupDrawable(&draw,
+					   flip->drawable_id,
+					   serverClient,
+					   M_ANY, DixWriteAccess);
+	if (status != Success) {
+		DBG(("%s: drawable already gone\n", __FUNCTION__));
+		sna_dri_frame_event_info_free(sna, draw, flip);
+		return;
+	}
+
 	/* We assume our flips arrive in order, so we don't check the frame */
 	switch (flip->type) {
 	case DRI2_FLIP:
 		/* Deliver cached msc, ust from reference crtc */
-		/* Check for too small vblank count of pageflip completion, taking wraparound
-		 * into account. This usually means some defective kms pageflip completion,
-		 * causing wrong (msc, ust) return values and possible visual corruption.
+		/* Check for too small vblank count of pageflip completion,
+		 * taking wraparound * into account. This usually means some
+		 * defective kms pageflip completion, causing wrong (msc, ust)
+		 * return values and possible visual corruption.
 		 */
-		if (flip->drawable_id &&
-		    dixLookupDrawable(&drawable,
-				      flip->drawable_id,
-				      serverClient,
-				      M_ANY, DixWriteAccess) == Success) {
-			if ((flip->fe_frame < flip->frame) &&
-			    (flip->frame - flip->fe_frame < 5)) {
-				static int limit = 5;
-
-				/* XXX we are currently hitting this path with older
-				 * kernels, so make it quieter.
-				 */
-				if (limit) {
-					xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-						   "%s: Pageflip completion has impossible msc %d < target_msc %d\n",
-						   __func__, flip->fe_frame, flip->frame);
-					limit--;
-				}
-
-				/* All-0 values signal timestamping failure. */
-				flip->fe_frame = flip->fe_tv_sec = flip->fe_tv_usec = 0;
-			}
-
-			DBG(("%s: flip complete\n", __FUNCTION__));
-			DRI2SwapComplete(flip->client, drawable,
-					 flip->fe_frame,
-					 flip->fe_tv_sec,
-					 flip->fe_tv_usec,
-					 DRI2_FLIP_COMPLETE,
-					 flip->client ? flip->event_complete : NULL,
-					 flip->event_data);
+		if (flip->fe_frame < flip->frame &&
+		    flip->frame - flip->fe_frame < 5) {
+			/* All-0 values signal timestamping failure. */
+			flip->fe_frame = flip->fe_tv_sec = flip->fe_tv_usec = 0;
 		}
 
-		sna_dri_frame_event_info_free(sna, flip);
+		DBG(("%s: flip complete\n", __FUNCTION__));
+		DRI2SwapComplete(flip->client, draw,
+				 flip->fe_frame,
+				 flip->fe_tv_sec,
+				 flip->fe_tv_usec,
+				 DRI2_FLIP_COMPLETE,
+				 flip->client ? flip->event_complete : NULL,
+				 flip->event_data);
+
+		sna_dri_frame_event_info_free(sna, draw, flip);
 		break;
 
 	case DRI2_FLIP_THROTTLE:
-		assert(sna->dri.flip_pending == flip);
-		sna->dri.flip_pending = NULL;
-
-		if (flip->next_front.name &&
-		    flip->drawable_id &&
-		    dixLookupDrawable(&drawable,
-				      flip->drawable_id,
-				      serverClient,
-				      M_ANY, DixWriteAccess) == Success) {
-			if (can_flip(sna, drawable, flip->front, flip->back) &&
-			    sna_dri_flip_continue(sna, drawable, flip)) {
-				DRI2SwapComplete(flip->client, drawable,
-						0, 0, 0,
-						DRI2_FLIP_COMPLETE,
-						flip->client ? flip->event_complete : NULL,
-						flip->event_data);
-			} else {
-				DBG(("%s: no longer able to flip\n",
-				     __FUNCTION__));
+		if (!flip->next_front.name) {
+			DBG(("%s: flip chain complete\n", __FUNCTION__));
+			sna_dri_frame_event_info_free(sna, draw, flip);
+		} else if (can_flip(sna, draw, flip->front, flip->back) &&
+			   sna_dri_flip_continue(sna, draw, flip)) {
+			DRI2SwapComplete(flip->client, draw,
+					 0, 0, 0,
+					 DRI2_FLIP_COMPLETE,
+					 flip->client ? flip->event_complete : NULL,
+					 flip->event_data);
+		} else {
+			DBG(("%s: no longer able to flip\n", __FUNCTION__));
+
+			flip->bo = sna_dri_copy_to_front(sna, draw, NULL,
+							 get_private(flip->front)->bo,
+							 get_private(flip->back)->bo,
+							 false);
+			DRI2SwapComplete(flip->client, draw,
+					 0, 0, 0,
+					 DRI2_BLIT_COMPLETE,
+					 flip->client ? flip->event_complete : NULL,
+					 flip->event_data);
 
-				DRI2SwapComplete(flip->client, drawable,
-						0, 0, 0,
-						DRI2_EXCHANGE_COMPLETE,
-						flip->client ? flip->event_complete : NULL,
-						flip->event_data);
-				sna_dri_frame_event_info_free(sna, flip);
-			}
-		} else
-			sna_dri_frame_event_info_free(sna, flip);
+			sna_dri_frame_event_info_free(sna, draw, flip);
+		}
 		break;
 
 #if USE_ASYNC_SWAP
@@ -1402,7 +1392,7 @@ finish_async_flip:
 
 			DBG(("%s: async flip completed\n", __FUNCTION__));
 			sna->dri.flip_pending = NULL;
-			sna_dri_frame_event_info_free(sna, flip);
+			sna_dri_frame_event_info_free(sna, draw, flip);
 		}
 		break;
 #endif
@@ -1478,9 +1468,11 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 				return TRUE;
 			} else {
 				/* We need to first wait (one vblank) for the
-				 * async flips to complete before this client can
-				 * take over.
+				 * async flips to complete before this client
+				 * can take over.
 				 */
+				DBG(("%s: queueing flip after pending completion\n",
+				     __FUNCTION__));
 				type = DRI2_FLIP;
 			}
 		}
@@ -1499,36 +1491,33 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		info->back = back;
 		info->pipe = pipe;
 
-		if (!sna_dri_add_frame_event(draw, info)) {
-			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
-			free(info);
-			return FALSE;
-		}
-
+		sna_dri_add_frame_event(draw, info);
 		sna_dri_reference_buffer(front);
 		sna_dri_reference_buffer(back);
 
 		if (!sna_dri_page_flip(sna, info)) {
 			DBG(("%s: failed to queue page flip\n", __FUNCTION__));
-			sna_dri_frame_event_info_free(sna, info);
+			sna_dri_frame_event_info_free(sna, draw, info);
 			return FALSE;
 		}
 
-		get_private(info->back)->bo =
-			kgem_create_2d(&sna->kgem,
-				       draw->width,
-				       draw->height,
-				       draw->bitsPerPixel,
-				       get_private(info->front)->bo->tiling,
-				       CREATE_EXACT);
-		info->back->name = kgem_bo_flink(&sna->kgem,
-						 get_private(info->back)->bo);
-		sna->dri.flip_pending = info;
+		if (type != DRI2_FLIP) {
+			get_private(info->back)->bo =
+				kgem_create_2d(&sna->kgem,
+					       draw->width,
+					       draw->height,
+					       draw->bitsPerPixel,
+					       get_private(info->front)->bo->tiling,
+					       CREATE_EXACT);
+			info->back->name = kgem_bo_flink(&sna->kgem,
+							 get_private(info->back)->bo);
+			sna->dri.flip_pending = info;
 
-		DRI2SwapComplete(info->client, draw, 0, 0, 0,
-				 DRI2_EXCHANGE_COMPLETE,
-				 info->event_complete,
-				 info->event_data);
+			DRI2SwapComplete(info->client, draw, 0, 0, 0,
+					 DRI2_EXCHANGE_COMPLETE,
+					 info->event_complete,
+					 info->event_data);
+		}
 	} else {
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (info == NULL)
@@ -1543,12 +1532,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		info->pipe = pipe;
 		info->type = DRI2_FLIP;
 
-		if (!sna_dri_add_frame_event(draw, info)) {
-			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
-			free(info);
-			return FALSE;
-		}
-
+		sna_dri_add_frame_event(draw, info);
 		sna_dri_reference_buffer(front);
 		sna_dri_reference_buffer(back);
 
@@ -1556,7 +1540,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 		vbl.request.sequence = 0;
 		if (sna_wait_vblank(sna, &vbl)) {
-			sna_dri_frame_event_info_free(sna, info);
+			sna_dri_frame_event_info_free(sna, draw, info);
 			return FALSE;
 		}
 
@@ -1611,7 +1595,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence -= 1;
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl)) {
-			sna_dri_frame_event_info_free(sna, info);
+			sna_dri_frame_event_info_free(sna, draw, info);
 			return FALSE;
 		}
 
@@ -1626,7 +1610,6 @@ sna_dri_immediate_xchg(struct sna *sna,
 		       DrawablePtr draw,
 		       struct sna_dri_frame_event *info)
 {
-	struct sna_dri_private *priv = get_private(info->front);
 	drmVBlank vbl;
 
 	DBG(("%s: emitting immediate exchange, throttling client\n", __FUNCTION__));
@@ -1634,7 +1617,7 @@ sna_dri_immediate_xchg(struct sna *sna,
 
 	if ((sna->flags & SNA_NO_WAIT) == 0) {
 		info->type = DRI2_XCHG_THROTTLE;
-		if (priv->chain == NULL) {
+		if (sna_dri_window_get_chain((WindowPtr)draw) == info) {
 			DBG(("%s: no pending xchg, starting chain\n",
 			     __FUNCTION__));
 
@@ -1650,15 +1633,8 @@ sna_dri_immediate_xchg(struct sna *sna,
 				pipe_select(info->pipe);
 			vbl.request.sequence = 0;
 			vbl.request.signal = (unsigned long)info;
-			if (sna_wait_vblank(sna, &vbl) == 0)
-				priv->chain = info;
-			else
-				sna_dri_frame_event_info_free(sna, info);
-		} else {
-			DBG(("%s: attaching to vsync chain\n",
-			     __FUNCTION__));
-			assert(priv->chain->chain == NULL);
-			priv->chain->chain = info;
+			if (sna_wait_vblank(sna, &vbl))
+				sna_dri_frame_event_info_free(sna, draw, info);
 		}
 	} else {
 		sna_dri_exchange_buffers(draw, info->front, info->back);
@@ -1666,7 +1642,7 @@ sna_dri_immediate_xchg(struct sna *sna,
 				 DRI2_EXCHANGE_COMPLETE,
 				 info->event_complete,
 				 info->event_data);
-		sna_dri_frame_event_info_free(sna, info);
+		sna_dri_frame_event_info_free(sna, draw, info);
 	}
 }
 
@@ -1675,7 +1651,6 @@ sna_dri_immediate_blit(struct sna *sna,
 		       DrawablePtr draw,
 		       struct sna_dri_frame_event *info)
 {
-	struct sna_dri_private *priv = get_private(info->front);
 	drmVBlank vbl;
 
 	DBG(("%s: emitting immediate blit, throttling client\n", __FUNCTION__));
@@ -1683,7 +1658,7 @@ sna_dri_immediate_blit(struct sna *sna,
 
 	if ((sna->flags & SNA_NO_WAIT) == 0) {
 		info->type = DRI2_SWAP_THROTTLE;
-		if (priv->chain == NULL) {
+		if (sna_dri_window_get_chain((WindowPtr)draw) == info) {
 			DBG(("%s: no pending blit, starting chain\n",
 			     __FUNCTION__));
 
@@ -1703,26 +1678,19 @@ sna_dri_immediate_blit(struct sna *sna,
 				pipe_select(info->pipe);
 			vbl.request.sequence = 0;
 			vbl.request.signal = (unsigned long)info;
-			if (sna_wait_vblank(sna, &vbl) == 0)
-				priv->chain = info;
-			else
-				sna_dri_frame_event_info_free(sna, info);
-		} else {
-			DBG(("%s: attaching to vsync chain\n",
-			     __FUNCTION__));
-			assert(priv->chain->chain == NULL);
-			priv->chain->chain = info;
+			if (sna_wait_vblank(sna, &vbl))
+				sna_dri_frame_event_info_free(sna, draw, info);
 		}
 	} else {
 		info->bo = sna_dri_copy_to_front(sna, draw, NULL,
 						 get_private(info->front)->bo,
 						 get_private(info->back)->bo,
-						 true);
+						 false);
 		DRI2SwapComplete(info->client, draw, 0, 0, 0,
 				 DRI2_BLIT_COMPLETE,
 				 info->event_complete,
 				 info->event_data);
-		sna_dri_frame_event_info_free(sna, info);
+		sna_dri_frame_event_info_free(sna, draw, info);
 	}
 }
 
@@ -1748,8 +1716,8 @@ sna_dri_immediate_blit(struct sna *sna,
  */
 static int
 sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
-		       DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
-		       CARD64 remainder, DRI2SwapEventPtr func, void *data)
+		      DRI2BufferPtr back, CARD64 *target_msc, CARD64 divisor,
+		      CARD64 remainder, DRI2SwapEventPtr func, void *data)
 {
 	ScreenPtr screen = draw->pScreen;
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
@@ -1812,13 +1780,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	info->back = back;
 	info->pipe = pipe;
 
-	if (!sna_dri_add_frame_event(draw, info)) {
-		DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
-		free(info);
-		info = NULL;
-		goto blit_fallback;
-	}
-
+	sna_dri_add_frame_event(draw, info);
 	sna_dri_reference_buffer(front);
 	sna_dri_reference_buffer(back);
 
@@ -1854,16 +1816,16 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		info->frame = *target_msc;
 		info->type = DRI2_SWAP;
 
-		 vbl.request.type =
-			 DRM_VBLANK_ABSOLUTE |
-			 DRM_VBLANK_EVENT |
-			 pipe_select(pipe);
-		 vbl.request.sequence = *target_msc;
-		 vbl.request.signal = (unsigned long)info;
-		 if (sna_wait_vblank(sna, &vbl))
-			 goto blit_fallback;
+		vbl.request.type =
+			DRM_VBLANK_ABSOLUTE |
+			DRM_VBLANK_EVENT |
+			pipe_select(pipe);
+		vbl.request.sequence = *target_msc;
+		vbl.request.signal = (unsigned long)info;
+		if (sna_wait_vblank(sna, &vbl))
+			goto blit_fallback;
 
-		 return TRUE;
+		return TRUE;
 	}
 
 	/*
@@ -1872,10 +1834,10 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	 * equation.
 	 */
 	DBG(("%s: missed target, queueing event for next: current=%d, target=%d,  divisor=%d\n",
-		     __FUNCTION__,
-		     (int)current_msc,
-		     (int)*target_msc,
-		     (int)divisor));
+	     __FUNCTION__,
+	     (int)current_msc,
+	     (int)*target_msc,
+	     (int)divisor));
 
 	vbl.request.type =
 		DRM_VBLANK_ABSOLUTE |
@@ -1917,7 +1879,7 @@ blit_fallback:
 		pipe = DRI2_BLIT_COMPLETE;
 	}
 	if (info)
-		sna_dri_frame_event_info_free(sna, info);
+		sna_dri_frame_event_info_free(sna, draw, info);
 	DRI2SwapComplete(client, draw, 0, 0, 0, pipe, func, data);
 	*target_msc = 0; /* offscreen, so zero out target vblank count */
 	return TRUE;
@@ -1977,21 +1939,12 @@ blit:
 		info->front = front;
 		info->back = back;
 
-		if (!sna_dri_add_frame_event(draw, info)) {
-			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
-			free(info);
-			goto blit;
-		}
-
-		DBG(("%s: referencing (%p:%d, %p:%d)\n",
-		     __FUNCTION__,
-		     front, get_private(front)->refcnt,
-		     back, get_private(back)->refcnt));
+		sna_dri_add_frame_event(draw, info);
 		sna_dri_reference_buffer(front);
 		sna_dri_reference_buffer(back);
 
 		if (!sna_dri_page_flip(sna, info)) {
-			sna_dri_frame_event_info_free(sna, info);
+			sna_dri_frame_event_info_free(sna, draw, info);
 			goto blit;
 		}
 
@@ -2081,7 +2034,7 @@ sna_dri_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
  */
 static int
 sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
-			   CARD64 divisor, CARD64 remainder)
+			  CARD64 divisor, CARD64 remainder)
 {
 	struct sna *sna = to_sna_from_drawable(draw);
 	struct sna_dri_frame_event *info = NULL;
@@ -2133,11 +2086,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	info->drawable_id = draw->id;
 	info->client = client;
 	info->type = DRI2_WAITMSC;
-	if (!sna_dri_add_frame_event(draw, info)) {
-		DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
-		free(info);
-		goto out_complete;
-	}
+	sna_dri_add_frame_event(draw, info);
 
 	/*
 	 * If divisor is zero, or current_msc is smaller than target_msc,
@@ -2186,7 +2135,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	return TRUE;
 
 out_free_info:
-	sna_dri_frame_event_info_free(sna, info);
+	sna_dri_frame_event_info_free(sna, draw, info);
 out_complete:
 	DRI2WaitMSCComplete(client, draw, target_msc, 0, 0);
 	return TRUE;
commit 81cd9aa80091b9bb08b50062f117d678a3bc7a91
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 16:12:49 2012 +0100

    sna: Tweak start/stop of the deferred flush
    
    As we now emit work whenever we wakeup and find the GPU idle, we rarely
    actually have pending work in the deferred flush queue, so try to avoid
    installing a timer if we are not accumulating work.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 60595f5..681b283 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12254,15 +12254,14 @@ static bool has_shadow(struct sna *sna)
 	return !sna->mode.shadow_flip;
 }
 
-static bool need_flush(struct sna *sna, struct sna_pixmap *scanout)
+static bool start_flush(struct sna *sna, struct sna_pixmap *scanout)
 {
-	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d), busy=%d)\n",
+	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d))\n",
 	     __FUNCTION__,
 	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
 	     has_shadow(sna),
 	     scanout && scanout->cpu_damage != NULL,
-	     scanout && scanout->gpu_bo && scanout->gpu_bo->exec != NULL,
-	     scanout && scanout->gpu_bo && __kgem_flush(&sna->kgem, scanout->gpu_bo)));
+	     scanout && scanout->gpu_bo && scanout->gpu_bo->exec != NULL));
 
 	if (has_shadow(sna))
 		return true;
@@ -12270,10 +12269,25 @@ static bool need_flush(struct sna *sna, struct sna_pixmap *scanout)
 	if (!scanout)
 		return false;
 
-	if (scanout->cpu_damage || scanout->gpu_bo->exec)
+	return scanout->cpu_damage || scanout->gpu_bo->exec;
+}
+
+static bool stop_flush(struct sna *sna, struct sna_pixmap *scanout)
+{
+	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d))\n",
+	     __FUNCTION__,
+	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
+	     has_shadow(sna),
+	     scanout && scanout->cpu_damage != NULL,
+	     scanout && scanout->gpu_bo && scanout->gpu_bo->rq != NULL));
+
+	if (has_shadow(sna))
 		return true;
 
-	return __kgem_flush(&sna->kgem, scanout->gpu_bo);
+	if (!scanout)
+		return false;
+
+	return scanout->cpu_damage || scanout->gpu_bo->needs_flush;
 }
 
 static bool sna_accel_do_flush(struct sna *sna)
@@ -12301,8 +12315,9 @@ static bool sna_accel_do_flush(struct sna *sna)
 			return true;
 		}
 	} else {
-		if (!need_flush(sna, priv)) {
+		if (!start_flush(sna, priv)) {
 			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
+			kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
@@ -12417,7 +12432,7 @@ static void sna_accel_flush(struct sna *sna)
 	     sna->kgem.nbatch,
 	     sna->kgem.busy));
 
-	busy = need_flush(sna, priv);
+	busy = stop_flush(sna, priv);
 	if (!sna->kgem.busy && !busy)
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 	sna->kgem.busy = busy;
commit 6cb0c631e4eafc09f1677c73906de9108d735de4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 15:10:18 2012 +0100

    sna/dri: Clarify the message for one failure case
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 6627847..6994fba 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -2058,12 +2058,11 @@ sna_dri_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
 	}
 
 	VG_CLEAR(vbl);
-
 	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
-
 	if (sna_wait_vblank(sna, &vbl)) {
-		DBG(("%s: failed on pipe %d\n", __FUNCTION__, pipe));
+		DBG(("%s: query failed on pipe %d, ret=%d\n",
+		     __FUNCTION__, pipe, errno));
 		return FALSE;
 	}
 
commit 9a314d18cef1e08b23f9dca861ad2ba396bb1080
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 14:56:05 2012 +0100

    sna/dri: Add a couple of missing VG_CLEAR on vblanks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index beadf57..6627847 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1630,6 +1630,7 @@ sna_dri_immediate_xchg(struct sna *sna,
 	drmVBlank vbl;
 
 	DBG(("%s: emitting immediate exchange, throttling client\n", __FUNCTION__));
+	VG_CLEAR(vbl);
 
 	if ((sna->flags & SNA_NO_WAIT) == 0) {
 		info->type = DRI2_XCHG_THROTTLE;
@@ -1678,6 +1679,7 @@ sna_dri_immediate_blit(struct sna *sna,
 	drmVBlank vbl;
 
 	DBG(("%s: emitting immediate blit, throttling client\n", __FUNCTION__));
+	VG_CLEAR(vbl);
 
 	if ((sna->flags & SNA_NO_WAIT) == 0) {
 		info->type = DRI2_SWAP_THROTTLE;
commit 66a53c15cb5ee729fb43ea9713fd8538a3f982ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 14:46:08 2012 +0100

    sna/dri: Couple the frame events into DestroyWindow
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 7f4c0bf..5f3ca76 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -358,6 +358,7 @@ extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event);
+void sna_dri_destroy_window(WindowPtr win);
 void sna_dri_close(struct sna *sna, ScreenPtr pScreen);
 
 extern bool sna_crtc_on(xf86CrtcPtr crtc);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 099075b..60595f5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12618,6 +12618,7 @@ sna_unmap_window(WindowPtr win)
 static Bool
 sna_destroy_window(WindowPtr win)
 {
+	sna_dri_destroy_window(win);
 	return TRUE;
 }
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index fd965bc..beadf57 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -79,8 +79,7 @@ struct sna_dri_frame_event {
 	int pipe;
 	int count;
 
-	struct list drawable_resource;
-	struct list client_resource;
+	struct list drawable_events;
 
 	/* for swaps & flips only */
 	DRI2SwapEventPtr event_complete;
@@ -111,11 +110,6 @@ struct sna_dri_private {
 	struct sna_dri_frame_event *chain;
 };
 
-static DevPrivateKeyRec sna_client_key;
-
-static RESTYPE frame_event_client_type;
-static RESTYPE frame_event_drawable_type;
-
 static inline struct sna_dri_frame_event *
 to_frame_event(uintptr_t  data)
 {
@@ -195,12 +189,12 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	return priv->gpu_bo;
 }
 
-constant static inline void *sna_pixmap_get_dri(PixmapPtr pixmap)
+constant static inline void *sna_pixmap_get_buffer(PixmapPtr pixmap)
 {
 	return ((void **)pixmap->devPrivates)[2];
 }
 
-static inline void *sna_pixmap_set_dri(PixmapPtr pixmap, void *ptr)
+static inline void sna_pixmap_set_buffer(PixmapPtr pixmap, void *ptr)
 {
 	((void **)pixmap->devPrivates)[2] = ptr;
 }
@@ -225,7 +219,7 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
 		pixmap = get_drawable_pixmap(drawable);
-		buffer = sna_pixmap_get_dri(pixmap);
+		buffer = sna_pixmap_get_buffer(pixmap);
 		if (buffer) {
 			DBG(("%s: reusing front buffer attachment\n",
 			     __FUNCTION__));
@@ -332,8 +326,8 @@ sna_dri_create_buffer(DrawablePtr drawable,
 
 	if (pixmap) {
 		assert(attachment == DRI2BufferFrontLeft);
-		sna_pixmap_set_dri(pixmap, buffer);
-		assert(sna_pixmap_get_dri(pixmap) == buffer);
+		sna_pixmap_set_buffer(pixmap, buffer);
+		assert(sna_pixmap_get_buffer(pixmap) == buffer);
 		pixmap->refcnt++;
 	}
 
@@ -368,7 +362,7 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 				priv->pinned = pixmap == sna->front;
 			}
 
-			sna_pixmap_set_dri(pixmap, NULL);
+			sna_pixmap_set_buffer(pixmap, NULL);
 			pixmap->drawable.pScreen->DestroyPixmap(pixmap);
 		}
 
@@ -780,147 +774,66 @@ sna_dri_get_pipe(DrawablePtr pDraw)
 }
 
 static struct list *
-get_resource(XID id, RESTYPE type)
+sna_dri_get_window_events(WindowPtr win)
 {
-	struct list *resource;
-	void *ptr;
+	struct list *head;
 
-	ptr = NULL;
-	dixLookupResourceByType(&ptr, id, type, NULL, DixWriteAccess);
-	if (ptr)
-		return ptr;
+	head = ((void **)win->devPrivates)[1];
+	if (head)
+		return head;
 
-	resource = malloc(sizeof(*resource));
-	if (resource == NULL)
+	head = malloc(sizeof(*head));
+	if (head == NULL)
 		return NULL;
 
-	if (!AddResource(id, type, resource)) {
-		DBG(("%s: failed to add resource (%ld, %ld)\n",
-		     __FUNCTION__, (long)id, (long)type));
-		free(resource);
-		return NULL;
-	}
-
-	DBG(("%s(%ld): new(%ld)=%p\n", __FUNCTION__,
-	     (long)id, (long)type, resource));
-
-	list_init(resource);
-	return resource;
+	list_init(head);
+	((void **)win->devPrivates)[1] = head;
+	return head;
 }
 
-static int
-sna_dri_frame_event_client_gone(void *data, XID id)
+void sna_dri_destroy_window(WindowPtr win)
 {
-	struct list *resource = data;
-
-	DBG(("%s(%ld): %p\n", __FUNCTION__, (long)id, data));
-
-	while (!list_is_empty(resource)) {
-		struct sna_dri_frame_event *info =
-			list_first_entry(resource,
-					 struct sna_dri_frame_event,
-					 client_resource);
-
-		DBG(("%s: marking client gone [%p]: %p\n",
-		     __FUNCTION__, info, info->client));
-
-		list_del(&info->client_resource);
-		info->client = NULL;
-	}
-	free(resource);
-
-	return Success;
-}
+	struct list *head = ((void **)win->devPrivates)[1];
 
-static int
-sna_dri_frame_event_drawable_gone(void *data, XID id)
-{
-	struct list *resource = data;
+	if (head == NULL)
+		return;
 
-	DBG(("%s(%ld): resource=%p\n", __FUNCTION__, (long)id, resource));
+	DBG(("%s: window=%ld\n", __FUNCTION__, win->drawable.serialNumber));
 
-	while (!list_is_empty(resource)) {
+	while (!list_is_empty(head)) {
 		struct sna_dri_frame_event *info =
-			list_first_entry(resource,
+			list_first_entry(head,
 					 struct sna_dri_frame_event,
-					 drawable_resource);
+					 drawable_events);
 
 		DBG(("%s: marking drawable gone [%p]: %ld\n",
 		     __FUNCTION__, info, (long)info->drawable_id));
 
-		list_del(&info->drawable_resource);
+		list_del(&info->drawable_events);
 		info->drawable_id = None;
 	}
-	free(resource);
-
-	return Success;
+	free(head);
 }
 
-static Bool
-sna_dri_register_frame_event_resource_types(void)
+static bool
+sna_dri_add_frame_event(DrawablePtr draw, struct sna_dri_frame_event *info)
 {
-	frame_event_client_type =
-		CreateNewResourceType(sna_dri_frame_event_client_gone,
-				      "Frame Event Client");
-	if (!frame_event_client_type)
-		return FALSE;
-
-	DBG(("%s: frame_event_client_type=%d\n",
-	     __FUNCTION__, frame_event_client_type));
+	struct list *head;
 
-	frame_event_drawable_type =
-		CreateNewResourceType(sna_dri_frame_event_drawable_gone,
-				      "Frame Event Drawable");
-	if (!frame_event_drawable_type)
-		return FALSE;
-
-	DBG(("%s: frame_event_drawable_type=%d\n",
-	     __FUNCTION__, frame_event_drawable_type));
+	if (draw->type != DRAWABLE_WINDOW)
+		return true;
 
-	return TRUE;
-}
-
-static XID
-get_client_id(ClientPtr client)
-{
-	XID *ptr = dixGetPrivateAddr(&client->devPrivates, &sna_client_key);
-	if (*ptr == 0)
-		*ptr = FakeClientID(client->index);
-	return *ptr;
-}
-
-/*
- * Hook this frame event into the server resource
- * database so we can clean it up if the drawable or
- * client exits while the swap is pending
- */
-static Bool
-sna_dri_add_frame_event(struct sna_dri_frame_event *info)
-{
-	struct list *resource;
-
-	resource = get_resource(get_client_id(info->client),
-				frame_event_client_type);
-	if (resource == NULL) {
-		DBG(("%s: failed to get client resource\n", __FUNCTION__));
-		return FALSE;
+	head = sna_dri_get_window_events((WindowPtr)draw);
+	if (head == NULL) {
+		DBG(("%s: failed to get drawable events\n", __FUNCTION__));
+		return false;
 	}
 
-	list_add(&info->client_resource, resource);
+	list_add(&info->drawable_events, head);
 
-	resource = get_resource(info->drawable_id, frame_event_drawable_type);
-	if (resource == NULL) {
-		DBG(("%s: failed to get drawable resource\n", __FUNCTION__));
-		list_del(&info->client_resource);
-		return FALSE;
-	}
-
-	list_add(&info->drawable_resource, resource);
-
-	DBG(("%s: add[%p] (%p, %ld)\n", __FUNCTION__,
-	     info, info->client, (long)info->drawable_id));
-
-	return TRUE;
+	DBG(("%s: add[%p] to window %ld)\n",
+	     __FUNCTION__, info, (long)draw->id));
+	return true;
 }
 
 static void
@@ -936,8 +849,7 @@ sna_dri_frame_event_info_free(struct sna *sna,
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
 	     info, info->client, (long)info->drawable_id));
 
-	list_del(&info->client_resource);
-	list_del(&info->drawable_resource);
+	list_del(&info->drawable_events);
 
 	_sna_dri_destroy_buffer(sna, info->front);
 	_sna_dri_destroy_buffer(sna, info->back);
@@ -1587,7 +1499,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		info->back = back;
 		info->pipe = pipe;
 
-		if (!sna_dri_add_frame_event(info)) {
+		if (!sna_dri_add_frame_event(draw, info)) {
 			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 			free(info);
 			return FALSE;
@@ -1631,7 +1543,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		info->pipe = pipe;
 		info->type = DRI2_FLIP;
 
-		if (!sna_dri_add_frame_event(info)) {
+		if (!sna_dri_add_frame_event(draw, info)) {
 			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 			free(info);
 			return FALSE;
@@ -1898,7 +1810,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	info->back = back;
 	info->pipe = pipe;
 
-	if (!sna_dri_add_frame_event(info)) {
+	if (!sna_dri_add_frame_event(draw, info)) {
 		DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 		free(info);
 		info = NULL;
@@ -2063,7 +1975,7 @@ blit:
 		info->front = front;
 		info->back = back;
 
-		if (!sna_dri_add_frame_event(info)) {
+		if (!sna_dri_add_frame_event(draw, info)) {
 			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 			free(info);
 			goto blit;
@@ -2220,7 +2132,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	info->drawable_id = draw->id;
 	info->client = client;
 	info->type = DRI2_WAITMSC;
-	if (!sna_dri_add_frame_event(info)) {
+	if (!sna_dri_add_frame_event(draw, info)) {
 		DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 		free(info);
 		goto out_complete;
@@ -2280,8 +2192,6 @@ out_complete:
 }
 #endif
 
-static unsigned int dri2_server_generation;
-
 Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 {
 	DRI2InfoRec info;
@@ -2307,18 +2217,6 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 		return FALSE;
 	}
 
-	if (serverGeneration != dri2_server_generation) {
-	    dri2_server_generation = serverGeneration;
-	    if (!sna_dri_register_frame_event_resource_types()) {
-		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-			   "Cannot register DRI2 frame event resources\n");
-		return FALSE;
-	    }
-	}
-
-	if (!dixRegisterPrivateKey(&sna_client_key, PRIVATE_CLIENT, sizeof(XID)))
-		return FALSE;
-
 	sna->deviceName = drmGetDeviceNameFromFd(sna->kgem.fd);
 	memset(&info, '\0', sizeof(info));
 	info.fd = sna->kgem.fd;
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index e242b2f..bbbcb63 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -80,10 +80,8 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define DBG(x) ErrorF x
 #endif
 
-static DevPrivateKeyRec sna_private_index;
-static DevPrivateKeyRec sna_pixmap_index;
-static DevPrivateKeyRec sna_dri_index;
-static DevPrivateKeyRec sna_gc_index;
+static DevPrivateKeyRec sna_pixmap_key;
+static DevPrivateKeyRec sna_gc_key;
 static DevPrivateKeyRec sna_glyph_key;
 static DevPrivateKeyRec sna_window_key;
 
@@ -803,30 +801,23 @@ static void sna_mode_set(ScrnInfoPtr scrn)
 static Bool
 sna_register_all_privates(void)
 {
-	if (!dixRegisterPrivateKey(&sna_private_index, PRIVATE_PIXMAP, 0))
+	if (!dixRegisterPrivateKey(&sna_pixmap_key, PRIVATE_PIXMAP,
+				   3*sizeof(void *)))
 		return FALSE;
-	assert(sna_private_index.offset == 0);
+	assert(sna_pixmap_key.offset == 0);
 
-	if (!dixRegisterPrivateKey(&sna_pixmap_index, PRIVATE_PIXMAP, 0))
-		return FALSE;
-	assert(sna_pixmap_index.offset == sizeof(void*));
-
-	if (!dixRegisterPrivateKey(&sna_dri_index, PRIVATE_PIXMAP, 0))
-		return FALSE;
-	assert(sna_dri_index.offset == 2*sizeof(void*));
-
-	if (!dixRegisterPrivateKey(&sna_gc_index, PRIVATE_GC,
+	if (!dixRegisterPrivateKey(&sna_gc_key, PRIVATE_GC,
 				   sizeof(FbGCPrivate)))
 		return FALSE;
-	assert(sna_gc_index.offset == 0);
+	assert(sna_gc_key.offset == 0);
 
 	if (!dixRegisterPrivateKey(&sna_glyph_key, PRIVATE_GLYPH,
 				   sizeof(struct sna_glyph)))
 		return FALSE;
 	assert(sna_glyph_key.offset == 0);
 
-	if (!dixRegisterPrivateKey(&sna_window_key,
-				   PRIVATE_WINDOW, 0))
+	if (!dixRegisterPrivateKey(&sna_window_key, PRIVATE_WINDOW,
+				   2*sizeof(void *)))
 		return FALSE;
 	assert(sna_window_key.offset == 0);
 
commit 975a566bed72ddc79853b329307ed72a82df24b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 14:12:51 2012 +0100

    sna/dri: Replace the DRI2 drawable type with a devPrivate
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index e1b5de6..fd965bc 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -107,7 +107,6 @@ struct sna_dri_frame_event {
 struct sna_dri_private {
 	int refcnt;
 	PixmapPtr pixmap;
-	int width, height;
 	struct kgem_bo *bo;
 	struct sna_dri_frame_event *chain;
 };
@@ -116,7 +115,6 @@ static DevPrivateKeyRec sna_client_key;
 
 static RESTYPE frame_event_client_type;
 static RESTYPE frame_event_drawable_type;
-static RESTYPE dri_drawable_type;
 
 static inline struct sna_dri_frame_event *
 to_frame_event(uintptr_t  data)
@@ -197,6 +195,16 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	return priv->gpu_bo;
 }
 
+constant static inline void *sna_pixmap_get_dri(PixmapPtr pixmap)
+{
+	return ((void **)pixmap->devPrivates)[2];
+}
+
+static inline void *sna_pixmap_set_dri(PixmapPtr pixmap, void *ptr)
+{
+	((void **)pixmap->devPrivates)[2] = ptr;
+}
+
 static DRI2Buffer2Ptr
 sna_dri_create_buffer(DrawablePtr drawable,
 		      unsigned int attachment,
@@ -217,23 +225,16 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
 		pixmap = get_drawable_pixmap(drawable);
-
-		buffer = NULL;
-		dixLookupResourceByType((void **)&buffer, drawable->id,
-					dri_drawable_type, NULL, DixWriteAccess);
+		buffer = sna_pixmap_get_dri(pixmap);
 		if (buffer) {
+			DBG(("%s: reusing front buffer attachment\n",
+			     __FUNCTION__));
+
 			private = get_private(buffer);
-			if (private->pixmap == pixmap) {
-				DBG(("%s: reusing front buffer attachment\n",
-				     __FUNCTION__));
-				assert(private->width  == pixmap->drawable.width);
-				assert(private->height == pixmap->drawable.height);
-				private->refcnt++;
-				return buffer;
-			}
-			FreeResourceByType(drawable->id,
-					   dri_drawable_type,
-					   FALSE);
+			assert(private->pixmap == pixmap);
+
+			private->refcnt++;
+			return buffer;
 		}
 
 		bo = sna_pixmap_set_dri(sna, pixmap);
@@ -323,22 +324,18 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	buffer->flags = 0;
 	buffer->name = kgem_bo_flink(&sna->kgem, bo);
 	private->refcnt = 1;
-	private->pixmap = pixmap;
-	if (pixmap) {
-		private->width  = pixmap->drawable.width;
-		private->height = pixmap->drawable.height;
-	}
 	private->bo = bo;
+	private->pixmap = pixmap;
 
 	if (buffer->name == 0)
 		goto err;
 
-	if (pixmap)
+	if (pixmap) {
+		assert(attachment == DRI2BufferFrontLeft);
+		sna_pixmap_set_dri(pixmap, buffer);
+		assert(sna_pixmap_get_dri(pixmap) == buffer);
 		pixmap->refcnt++;
-
-	if (attachment == DRI2BufferFrontLeft &&
-	    AddResource(drawable->id, dri_drawable_type, buffer))
-		private->refcnt++;
+	}
 
 	return buffer;
 
@@ -361,17 +358,18 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 
 	if (--private->refcnt == 0) {
 		if (private->pixmap) {
-			ScreenPtr screen = private->pixmap->drawable.pScreen;
-			struct sna_pixmap *priv = sna_pixmap(private->pixmap);
+			PixmapPtr pixmap = private->pixmap;
+			struct sna_pixmap *priv = sna_pixmap(pixmap);
 
 			/* Undo the DRI markings on this pixmap */
 			if (priv->flush && --priv->flush == 0) {
 				list_del(&priv->list);
 				sna_accel_watch_flush(sna, -1);
-				priv->pinned = private->pixmap == sna->front;
+				priv->pinned = pixmap == sna->front;
 			}
 
-			screen->DestroyPixmap(private->pixmap);
+			sna_pixmap_set_dri(pixmap, NULL);
+			pixmap->drawable.pScreen->DestroyPixmap(pixmap);
 		}
 
 		private->bo->flush = 0;
@@ -858,17 +856,6 @@ sna_dri_frame_event_drawable_gone(void *data, XID id)
 	return Success;
 }
 
-static int
-sna_dri_drawable_gone(void *data, XID id)
-{
-	DBG(("%s(%ld)\n", __FUNCTION__, (long)id));
-
-	_sna_dri_destroy_buffer(to_sna_from_pixmap(get_private(data)->pixmap),
-				data);
-
-	return Success;
-}
-
 static Bool
 sna_dri_register_frame_event_resource_types(void)
 {
@@ -890,14 +877,6 @@ sna_dri_register_frame_event_resource_types(void)
 	DBG(("%s: frame_event_drawable_type=%d\n",
 	     __FUNCTION__, frame_event_drawable_type));
 
-	dri_drawable_type =
-		CreateNewResourceType(sna_dri_drawable_gone,
-				      "DRI2 Drawable");
-	if (!dri_drawable_type)
-		return FALSE;
-
-	DBG(("%s: dri_drawable_type=%d\n", __FUNCTION__, dri_drawable_type));
-
 	return TRUE;
 }
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index d12c2b0..e242b2f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -82,6 +82,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 static DevPrivateKeyRec sna_private_index;
 static DevPrivateKeyRec sna_pixmap_index;
+static DevPrivateKeyRec sna_dri_index;
 static DevPrivateKeyRec sna_gc_index;
 static DevPrivateKeyRec sna_glyph_key;
 static DevPrivateKeyRec sna_window_key;
@@ -810,6 +811,10 @@ sna_register_all_privates(void)
 		return FALSE;
 	assert(sna_pixmap_index.offset == sizeof(void*));
 
+	if (!dixRegisterPrivateKey(&sna_dri_index, PRIVATE_PIXMAP, 0))
+		return FALSE;
+	assert(sna_dri_index.offset == 2*sizeof(void*));
+
 	if (!dixRegisterPrivateKey(&sna_gc_index, PRIVATE_GC,
 				   sizeof(FbGCPrivate)))
 		return FALSE;
commit 0da1c98f660269806408af5fd08c1ab5e538082e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 12:15:28 2012 +0100

    test: Add missing header for distcheck
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/Makefile.am b/test/Makefile.am
index ba4966c..96c87f8 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -32,6 +32,7 @@ libtest_la_SOURCES = \
 	test_log.c \
 	test_render.c \
 	dri2.c \
+	dri2.h \
 	$(NULL)
 
 EXTRA_DIST = README
commit e3e58123d36924c760ab6f58a7155a040422e91d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 6 15:22:26 2012 +0100

    sna: Fixup fb wrapper
    
    To accommodate changes in the Xserver and avoid breakage; would have been
    much easier had the fb been exported in the first place.

diff --git a/configure.ac b/configure.ac
index d1ddf95..720a0ba 100644
--- a/configure.ac
+++ b/configure.ac
@@ -354,6 +354,7 @@ AC_CONFIG_FILES([
                 src/legacy/i810/Makefile
                 src/legacy/i810/xvmc/Makefile
                 src/sna/Makefile
+                src/sna/fb/Makefile
                 man/Makefile
                 src/render_program/Makefile
 		test/Makefile
diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 911a857..8cd3c45 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -18,6 +18,8 @@
 #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
+SUBDIRS = fb
+
 AM_CFLAGS = \
 	@CWARNFLAGS@ \
 	-I$(top_srcdir)/src \
@@ -29,7 +31,7 @@ AM_CFLAGS = \
 	$(NULL)
 
 noinst_LTLIBRARIES = libsna.la
-libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@
+libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@ fb/libfb.la
 
 libsna_la_SOURCES = \
 	blt.c \
diff --git a/src/sna/fb/Makefile.am b/src/sna/fb/Makefile.am
new file mode 100644
index 0000000..16f9b28
--- /dev/null
+++ b/src/sna/fb/Makefile.am
@@ -0,0 +1,37 @@
+noinst_LTLIBRARIES = libfb.la
+
+libfb_la_CFLAGS = @CWARNFLAGS@ @XORG_CFLAGS@
+libfb_la_LIBADD = $(PIXMAN_LIBS)
+
+libfb_la_SOURCES = 	\
+	fb.h		\
+	fbarc.c		\
+	fbarcbits.h	\
+	fbbitmap.c	\
+	fbblt.c		\
+	fbbltone.c	\
+	fbclip.c	\
+	fbclip.h	\
+	fbcopy.c	\
+	fbfill.c	\
+	fbgc.c		\
+	fbglyph.c	\
+	fbglyphbits.h	\
+	fbimage.c	\
+	fbline.c	\
+	fblinebits.h	\
+	fbpict.c	\
+	fbpict.h	\
+	fbpoint.c	\
+	fbpointbits.h	\
+	fbpush.c	\
+	fbrop.h		\
+	fbseg.c		\
+	fbsegbits.h	\
+	fbspan.c	\
+	fbstipple.c	\
+	fbtile.c	\
+	fbutil.c	\
+	$(NULL)
+
+EXTRA_DIST = README
diff --git a/src/sna/fb/README b/src/sna/fb/README
new file mode 100644
index 0000000..1542124
--- /dev/null
+++ b/src/sna/fb/README
@@ -0,0 +1 @@
+Note this code is intended to live inside pixman in the long term.
diff --git a/src/sna/fb/fb.h b/src/sna/fb/fb.h
new file mode 100644
index 0000000..7847951
--- /dev/null
+++ b/src/sna/fb/fb.h
@@ -0,0 +1,557 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef FB_H
+#define FB_H
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdbool.h>
+#include <pixman.h>
+
+#include <xorg-server.h>
+#include <servermd.h>
+#include <gcstruct.h>
+#include <colormap.h>
+#include <windowstr.h>
+
+#if HAS_DEBUG_FULL
+#define DBG(x) ErrorF x
+#else
+#define DBG(x)
+#endif
+
+#define WRITE(ptr, val) (*(ptr) = (val))
+#define READ(ptr) (*(ptr))
+
+/*
+ * This single define controls the basic size of data manipulated
+ * by this software; it must be log2(sizeof (FbBits) * 8)
+ */
+#define FB_SHIFT    LOG2_BITMAP_PAD
+
+#define FB_UNIT	    (1 << FB_SHIFT)
+#define FB_HALFUNIT (1 << (FB_SHIFT-1))
+#define FB_MASK	    (FB_UNIT - 1)
+#define FB_ALLONES  ((FbBits) -1)
+
+#if IMAGE_BYTE_ORDER != LSBFirst
+#error "IMAGE_BYTE_ORDER must be LSBFirst"
+#endif
+
+#if GLYPHPADBYTES != 4
+#error "GLYPHPADBYTES must be 4"
+#endif
+
+#if FB_SHIFT != 5
+#error "FB_SHIFT ala LOG2_BITMAP_PAD must be 5"
+#endif
+
+#define FB_STIP_SHIFT	LOG2_BITMAP_PAD
+#define FB_STIP_UNIT	(1 << FB_STIP_SHIFT)
+#define FB_STIP_MASK	(FB_STIP_UNIT - 1)
+#define FB_STIP_ALLONES	((FbStip) -1)
+#define FbFullMask(n)   ((n) == FB_UNIT ? FB_ALLONES : ((((FbBits) 1) << n) - 1))
+
+typedef uint32_t FbBits;
+typedef FbBits FbStip;
+typedef int FbStride;
+
+#include "fbrop.h"
+
+#define FbScrLeft(x,n)	((x) >> (n))
+#define FbScrRight(x,n)	((x) << (n))
+/* #define FbLeftBits(x,n)	((x) & ((((FbBits) 1) << (n)) - 1)) */
+#define FbLeftStipBits(x,n) ((x) & ((((FbStip) 1) << (n)) - 1))
+#define FbStipMoveLsb(x,s,n)	(FbStipRight (x,(s)-(n)))
+#define FbPatternOffsetBits	0
+
+#define FbStipLeft(x,n)	FbScrLeft(x,n)
+#define FbStipRight(x,n) FbScrRight(x,n)
+
+#define FbRotLeft(x,n)	FbScrLeft(x,n) | (n ? FbScrRight(x,FB_UNIT-n) : 0)
+#define FbRotRight(x,n)	FbScrRight(x,n) | (n ? FbScrLeft(x,FB_UNIT-n) : 0)
+
+#define FbRotStipLeft(x,n)  FbStipLeft(x,n) | (n ? FbStipRight(x,FB_STIP_UNIT-n) : 0)
+#define FbRotStipRight(x,n)  FbStipRight(x,n) | (n ? FbStipLeft(x,FB_STIP_UNIT-n) : 0)
+
+#define FbLeftMask(x)	    ( ((x) & FB_MASK) ? \
+			     FbScrRight(FB_ALLONES,(x) & FB_MASK) : 0)
+#define FbRightMask(x)	    ( ((FB_UNIT - (x)) & FB_MASK) ? \
+			     FbScrLeft(FB_ALLONES,(FB_UNIT - (x)) & FB_MASK) : 0)
+
+#define FbLeftStipMask(x)   ( ((x) & FB_STIP_MASK) ? \
+			     FbStipRight(FB_STIP_ALLONES,(x) & FB_STIP_MASK) : 0)
+#define FbRightStipMask(x)  ( ((FB_STIP_UNIT - (x)) & FB_STIP_MASK) ? \
+			     FbScrLeft(FB_STIP_ALLONES,(FB_STIP_UNIT - (x)) & FB_STIP_MASK) : 0)
+
+#define FbBitsMask(x,w)	(FbScrRight(FB_ALLONES,(x) & FB_MASK) & \
+			 FbScrLeft(FB_ALLONES,(FB_UNIT - ((x) + (w))) & FB_MASK))
+
+#define FbStipMask(x,w)	(FbStipRight(FB_STIP_ALLONES,(x) & FB_STIP_MASK) & \
+			 FbStipLeft(FB_STIP_ALLONES,(FB_STIP_UNIT - ((x)+(w))) & FB_STIP_MASK))
+
+#define FbMaskBits(x,w,l,n,r) { \
+    n = (w); \
+    r = FbRightMask((x)+n); \
+    l = FbLeftMask(x); \
+    if (l) { \
+	n -= FB_UNIT - ((x) & FB_MASK); \
+	if (n < 0) { \
+	    n = 0; \
+	    l &= r; \
+	    r = 0; \
+	} \
+    } \
+    n >>= FB_SHIFT; \
+}
+
+#define FbByteMaskInvalid   0x10
+
+#define FbPatternOffset(o,t)  ((o) ^ (FbPatternOffsetBits & ~(sizeof (t) - 1)))
+
+#define FbPtrOffset(p,o,t)		((t *) ((CARD8 *) (p) + (o)))
+#define FbSelectPatternPart(xor,o,t)	((xor) >> (FbPatternOffset (o,t) << 3))
+#define FbStorePart(dst,off,t,xor)	(WRITE(FbPtrOffset(dst,off,t), \
+					 FbSelectPart(xor,off,t)))
+#ifndef FbSelectPart
+#define FbSelectPart(x,o,t) FbSelectPatternPart(x,o,t)
+#endif
+
+#define FbMaskBitsBytes(x,w,copy,l,lb,n,r,rb) { \
+    n = (w); \
+    lb = 0; \
+    rb = 0; \
+    r = FbRightMask((x)+n); \
+    if (r) { \
+	/* compute right byte length */ \
+	if ((copy) && (((x) + n) & 7) == 0) { \
+	    rb = (((x) + n) & FB_MASK) >> 3; \
+	} else { \
+	    rb = FbByteMaskInvalid; \
+	} \
+    } \
+    l = FbLeftMask(x); \
+    if (l) { \
+	/* compute left byte length */ \
+	if ((copy) && ((x) & 7) == 0) { \
+	    lb = ((x) & FB_MASK) >> 3; \
+	} else { \
+	    lb = FbByteMaskInvalid; \
+	} \
+	/* subtract out the portion painted by leftMask */ \
+	n -= FB_UNIT - ((x) & FB_MASK); \
+	if (n < 0) { \
+	    if (lb != FbByteMaskInvalid) { \
+		if (rb == FbByteMaskInvalid) { \
+		    lb = FbByteMaskInvalid; \
+		} else if (rb) { \
+		    lb |= (rb - lb) << (FB_SHIFT - 3); \
+		    rb = 0; \
+		} \
+	    } \
+	    n = 0; \
+	    l &= r; \
+	    r = 0; \
+	}\
+    } \
+    n >>= FB_SHIFT; \
+}
+
+#define FbDoLeftMaskByteRRop(dst,lb,l,and,xor) { \
+    switch (lb) { \
+    case (sizeof (FbBits) - 3) | (1 << (FB_SHIFT - 3)): \
+	FbStorePart(dst,sizeof (FbBits) - 3,CARD8,xor); \
+	break; \
+    case (sizeof (FbBits) - 3) | (2 << (FB_SHIFT - 3)): \
+	FbStorePart(dst,sizeof (FbBits) - 3,CARD8,xor); \
+	FbStorePart(dst,sizeof (FbBits) - 2,CARD8,xor); \
+	break; \
+    case (sizeof (FbBits) - 2) | (1 << (FB_SHIFT - 3)): \
+	FbStorePart(dst,sizeof (FbBits) - 2,CARD8,xor); \
+	break; \
+    case sizeof (FbBits) - 3: \
+	FbStorePart(dst,sizeof (FbBits) - 3,CARD8,xor); \
+    case sizeof (FbBits) - 2: \
+	FbStorePart(dst,sizeof (FbBits) - 2,CARD16,xor); \
+	break; \
+    case sizeof (FbBits) - 1: \
+	FbStorePart(dst,sizeof (FbBits) - 1,CARD8,xor); \
+	break; \
+    default: \
+	WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, l)); \
+	break; \
+    } \
+}
+
+#define FbDoRightMaskByteRRop(dst,rb,r,and,xor) { \
+    switch (rb) { \
+    case 1: \
+	FbStorePart(dst,0,CARD8,xor); \
+	break; \
+    case 2: \
+	FbStorePart(dst,0,CARD16,xor); \
+	break; \
+    case 3: \
+	FbStorePart(dst,0,CARD16,xor); \
+	FbStorePart(dst,2,CARD8,xor); \
+	break; \
+    default: \
+	WRITE(dst, FbDoMaskRRop (READ(dst), and, xor, r)); \
+    } \
+}
+
+#define FbMaskStip(x,w,l,n,r) { \
+    n = (w); \
+    r = FbRightStipMask((x)+n); \
+    l = FbLeftStipMask(x); \
+    if (l) { \
+	n -= FB_STIP_UNIT - ((x) & FB_STIP_MASK); \
+	if (n < 0) { \
+	    n = 0; \
+	    l &= r; \
+	    r = 0; \
+	} \
+    } \
+    n >>= FB_STIP_SHIFT; \
+}
+
+/*
+ * These macros are used to transparently stipple
+ * in copy mode; the expected usage is with 'n' constant
+ * so all of the conditional parts collapse into a minimal
+ * sequence of partial word writes
+ *
+ * 'n' is the bytemask of which bytes to store, 'a' is the address
+ * of the FbBits base unit, 'o' is the offset within that unit
+ *
+ * The term "lane" comes from the hardware term "byte-lane" which
+ */
+
+#define FbLaneCase1(n,a,o)						\
+    if ((n) == 0x01) {							\
+	WRITE((CARD8 *) ((a)+FbPatternOffset(o,CARD8)), fgxor);		\
+    }
+
+#define FbLaneCase2(n,a,o)						\
+    if ((n) == 0x03) {							\
+	WRITE((CARD16 *) ((a)+FbPatternOffset(o,CARD16)), fgxor);	\
+    } else {								\
+	FbLaneCase1((n)&1,a,o)						\
+	FbLaneCase1((n)>>1,a,(o)+1)					\
+    }
+
+#define FbLaneCase4(n,a,o)						\
+    if ((n) == 0x0f) {							\
+	WRITE((CARD32 *) ((a)+FbPatternOffset(o,CARD32)), fgxor);	\
+    } else {								\
+	FbLaneCase2((n)&3,a,o)						\
+	FbLaneCase2((n)>>2,a,(o)+2)					\
+    }
+
+#define FbLaneCase(n,a)   FbLaneCase4(n,(CARD8 *) (a),0)
+
+typedef struct {
+	long changes;
+	long serial;
+	void *priv;
+
+	FbBits and, xor;            /* reduced rop values */
+	FbBits bgand, bgxor;        /* for stipples */
+	FbBits fg, bg, pm;          /* expanded and filled */
+	unsigned int dashLength;    /* total of all dash elements */
+	unsigned char evenStipple;  /* stipple is even */
+	unsigned char bpp;          /* current drawable bpp */
+} FbGCPrivate, *FbGCPrivPtr;
+
+static inline FbGCPrivate *fb_gc(GCPtr gc)
+{
+	return (FbGCPrivate *)gc->devPrivates;
+}
+static inline PixmapPtr fbGetWindowPixmap(WindowPtr window)
+{
+	return *(void **)window->devPrivates;
+}
+
+#ifdef ROOTLESS
+#define __fbPixDrawableX(p)	((p)->drawable.x)
+#define __fbPixDrawableY(p)	((p)->drawable.y)
+#else
+#define __fbPixDrawableX(p)	0
+#define __fbPixDrawableY(p)	0
+#endif
+
+#ifdef COMPOSITE
+#define __fbPixOffXWin(p)	(__fbPixDrawableX(p) - (p)->screen_x)
+#define __fbPixOffYWin(p)	(__fbPixDrawableY(p) - (p)->screen_y)
+#else
+#define __fbPixOffXWin(p)	(__fbPixDrawableX(p))
+#define __fbPixOffYWin(p)	(__fbPixDrawableY(p))
+#endif
+#define __fbPixOffXPix(p)	(__fbPixDrawableX(p))
+#define __fbPixOffYPix(p)	(__fbPixDrawableY(p))
+
+#define fbGetDrawablePixmap(drawable, pixmap, xoff, yoff) {		\
+    if ((drawable)->type != DRAWABLE_PIXMAP) {				\
+	(pixmap) = fbGetWindowPixmap((WindowPtr)drawable);		\
+	(xoff) = __fbPixOffXWin(pixmap);				\
+	(yoff) = __fbPixOffYWin(pixmap);				\
+    } else {								\
+	(pixmap) = (PixmapPtr) (drawable);				\
+	(xoff) = __fbPixOffXPix(pixmap);				\
+	(yoff) = __fbPixOffYPix(pixmap);				\
+    }									\
+}
+
+#define fbGetPixmapBitsData(pixmap, pointer, stride, bpp) {		\
+    (pointer) = (FbBits *) (pixmap)->devPrivate.ptr;			\
+    (stride) = ((int) (pixmap)->devKind) / sizeof (FbBits); (void)(stride);\
+    (bpp) = (pixmap)->drawable.bitsPerPixel;  (void)(bpp);		\
+}
+
+#define fbGetPixmapStipData(pixmap, pointer, stride, bpp) {		\
+    (pointer) = (FbStip *) (pixmap)->devPrivate.ptr;			\
+    (stride) = ((int) (pixmap)->devKind) / sizeof (FbStip); (void)(stride);\
+    (bpp) = (pixmap)->drawable.bitsPerPixel;  (void)(bpp);		\
+}
+
+#define fbGetDrawable(drawable, pointer, stride, bpp, xoff, yoff) {	\
+    PixmapPtr   _pPix;							\
+    fbGetDrawablePixmap(drawable, _pPix, xoff, yoff);			\
+    fbGetPixmapBitsData(_pPix, pointer, stride, bpp);			\
+}
+
+#define fbGetStipDrawable(drawable, pointer, stride, bpp, xoff, yoff) {	\
+    PixmapPtr   _pPix;							\
+    fbGetDrawablePixmap(drawable, _pPix, xoff, yoff);			\
+    fbGetPixmapStipData(_pPix, pointer, stride, bpp);			\
+}
+
+/*
+ * XFree86 empties the root BorderClip when the VT is inactive,
+ * here's a macro which uses that to disable GetImage and GetSpans
+ */
+#define fbWindowEnabled(pWin) \
+    RegionNotEmpty(&(pWin)->drawable.pScreen->root->borderClip)
+#define fbDrawableEnabled(drawable) \
+    ((drawable)->type == DRAWABLE_PIXMAP ? \
+     TRUE : fbWindowEnabled((WindowPtr) drawable))
+
+#define FbPowerOfTwo(w)	    (((w) & ((w) - 1)) == 0)
+/*
+ * Accelerated tiles are power of 2 width <= FB_UNIT
+ */
+#define FbEvenTile(w)	    ((w) <= FB_UNIT && FbPowerOfTwo(w))
+/*
+ * Accelerated stipples are power of 2 width and <= FB_UNIT/dstBpp
+ * with dstBpp a power of 2 as well
+ */
+#define FbEvenStip(w,bpp)   ((w) * (bpp) <= FB_UNIT && FbPowerOfTwo(w) && FbPowerOfTwo(bpp))
+
+inline static int16_t fbBound(int16_t a, uint16_t b)
+{
+	int v = (int)a + (int)b;
+	if (v > MAXSHORT)
+		return MAXSHORT;
+	return v;
+}
+
+extern void
+fbPolyArc(DrawablePtr drawable, GCPtr gc, int narcs, xArc * parcs);
+
+extern void
+fbBlt(FbBits *src, FbStride srcStride, int srcX,
+      FbBits *dst, FbStride dstStride, int dstX,
+      int width, int height,
+      int alu, FbBits pm, int bpp,
+      Bool reverse, Bool upsidedown);
+
+#if FB_STIP_SHIFT == FB_SHIFT
+static inline void
+fbBltStip(FbStip *src, FbStride srcStride, int srcX,
+	  FbStip *dst, FbStride dstStride, int dstX,
+	  int width, int height, int alu, FbBits pm, int bpp)
+{
+	fbBlt((FbBits *)src, srcStride, srcX,
+	      (FbBits *)dst, dstStride, dstX,
+	      width, height, alu, pm, bpp,
+	      FALSE, FALSE);
+}
+#else
+#error FB_STIP_SHIFT must equal FB_SHIFT
+#endif
+
+extern void
+fbBltOne(FbStip *src, FbStride srcStride, int srcX,
+         FbBits *dst, FbStride dstStride, int dstX,
+         int dstBpp, int width, int height,
+	 FbBits fgand, FbBits fbxor, FbBits bgand, FbBits bgxor);
+
+extern void
+fbBltPlane(FbBits *src, FbStride srcStride, int srcX, int srcBpp,
+           FbStip *dst, FbStride dstStride, int dstX,
+           int width, int height,
+           FbStip fgand, FbStip fgxor, FbStip bgand, FbStip bgxor,
+	   Pixel planeMask);
+
+extern void
+fbCopyNtoN(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+           BoxPtr pbox, int nbox,
+           int dx, int dy,
+           Bool reverse, Bool upsidedown, Pixel bitplane, void *closure);
+
+extern void
+fbCopy1toN(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+           BoxPtr pbox, int nbox,
+           int dx, int dy,
+           Bool reverse, Bool upsidedown, Pixel bitplane, void *closure);
+
+extern void
+fbCopyNto1(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+           BoxPtr pbox, int nbox,
+           int dx, int dy,
+           Bool reverse, Bool upsidedown, Pixel bitplane, void *closure);
+
+extern RegionPtr
+fbCopyArea(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+	   int sx, int sy,
+	   int width, int height,
+	   int dx, int dy);
+
+extern RegionPtr
+fbCopyPlane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+	    int sx, int sy,
+	    int width, int height,
+	    int dx, int dy,
+	    unsigned long bitplane);
+
+extern void
+fbFill(DrawablePtr drawable, GCPtr gc, int x, int y, int width, int height);
+
+extern void
+fbSolidBoxClipped(DrawablePtr drawable, GCPtr gc,
+                  int x1, int y1, int x2, int y2);
+
+extern void
+fbPolyFillRect(DrawablePtr drawable, GCPtr gc, int n, xRectangle *rec);
+
+extern void
+fbFillSpans(DrawablePtr drawable, GCPtr gc,
+            int n, DDXPointPtr pt, int *width, int fSorted);
+
+extern void
+fbPadPixmap(PixmapPtr pPixmap);
+
+extern void
+fbValidateGC(GCPtr gc, unsigned long changes, DrawablePtr drawable);
+
+extern void
+fbGetSpans(DrawablePtr drawable, int wMax,
+           DDXPointPtr pt, int *width, int n, char *dst);
+
+extern void
+fbPolyGlyphBlt(DrawablePtr drawable, GCPtr gc, int x, int y,
+               unsigned int n, CharInfoPtr *info, pointer glyphs);
+
+extern void
+fbImageGlyphBlt(DrawablePtr drawable, GCPtr gc, int x, int y,
+                unsigned int n, CharInfoPtr *info, pointer glyphs);
+
+extern void
+fbPutImage(DrawablePtr drawable, GCPtr gc, int depth,
+           int x, int y, int w, int h,
+	   int leftPad, int format, char *image);
+
+extern void
+fbPutXYImage(DrawablePtr drawable, GCPtr gc,
+             FbBits fg, FbBits bg, FbBits pm,
+             int alu, Bool opaque,
+             int x, int y, int width, int height,
+	     FbStip * src, FbStride srcStride, int srcX);
+
+extern void
+fbGetImage(DrawablePtr drawable,
+           int x, int y, int w, int h,
+	   unsigned int format, unsigned long planeMask, char *d);
+
+extern void
+fbPolyLine(DrawablePtr drawable, GCPtr gc, int mode, int n, DDXPointPtr pt);
+
+extern void
+fbFixCoordModePrevious(int n, DDXPointPtr pt);
+
+extern void
+fbPolySegment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg);
+
+extern RegionPtr
+fbBitmapToRegion(PixmapPtr pixmap);
+
+extern void
+fbPolyPoint(DrawablePtr drawable, GCPtr gc, int mode, int n, xPoint *pt);
+
+extern void
+fbPushImage(DrawablePtr drawable, GCPtr gc,
+            FbStip *src, FbStride srcStride, int srcX,
+	    int x, int y, int width, int height);
+
+extern void
+fbPushPixels(GCPtr gc, PixmapPtr pBitmap, DrawablePtr drawable,
+	     int dx, int dy, int xOrg, int yOrg);
+
+extern void
+fbSetSpans(DrawablePtr drawable, GCPtr gc,
+           char *src, DDXPointPtr pt, int *width, int n, int fSorted);
+
+extern void
+fbSegment(DrawablePtr drawable, GCPtr gc,
+          int xa, int ya, int xb, int yb,
+	  bool drawLast, int *dashOffset);
+
+extern void
+fbSegment1(DrawablePtr drawable, GCPtr gc, const BoxRec *clip,
+          int xa, int ya, int xb, int yb,
+	  bool drawLast, int *dashOffset);
+
+extern void
+fbTransparentSpan(FbBits * dst, FbBits stip, FbBits fgxor, int n);
+
+extern void
+fbStipple(FbBits *dst, FbStride dstStride, int dstX, int dstBpp,
+          int width, int height,
+          FbStip *stip, FbStride stipStride,
+          int stipWidth, int stipHeight,
+          Bool even,
+          FbBits fgand, FbBits fgxor, FbBits bgand, FbBits bgxor,
+	  int xRot, int yRot);
+
+extern void
+fbTile(FbBits *dst, FbStride dstStride, int dstX, int width, int height,
+       FbBits *tile, FbStride tileStride, int tileWidth, int tileHeight,
+       int alu, FbBits pm, int bpp,
+       int xRot, int yRot);
+
+extern FbBits fbReplicatePixel(Pixel p, int bpp);
+
+#endif  /* FB_H */
diff --git a/src/sna/fb/fbarc.c b/src/sna/fb/fbarc.c
new file mode 100644
index 0000000..2222d0b
--- /dev/null
+++ b/src/sna/fb/fbarc.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include <mi.h>
+#include <mizerarc.h>
+#include <limits.h>
+
+#define ARC	    fbArc8
+#define BITS	    BYTE
+#define BITS2	    CARD16
+#define BITS4	    CARD32
+#include "fbarcbits.h"
+#undef BITS
+#undef BITS2
+#undef BITS4
+#undef ARC
+
+#define ARC	    fbArc16
+#define BITS	    CARD16
+#define BITS2	    CARD32
+#include "fbarcbits.h"
+#undef BITS
+#undef BITS2
+#undef ARC
+
+#define ARC	    fbArc32
+#define BITS	    CARD32
+#include "fbarcbits.h"
+#undef BITS
+#undef ARC
+
+void
+fbPolyArc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
+{
+	DBG(("%s x %d, width=%d, fill=%d, line=%d\n",
+	     __FUNCTION__, n, gc->lineWidth, gc->lineStyle, gc->fillStyle));
+
+	if (gc->lineWidth == 0) {
+		void (*raster)(FbBits *dst, FbStride dstStride, int dstBpp,
+			       xArc *arc, int dx, int dy,
+			       FbBits and, FbBits xor);
+
+		raster = 0;
+		if (gc->lineStyle == LineSolid && gc->fillStyle == FillSolid) {
+			switch (drawable->bitsPerPixel) {
+			case 8:
+				raster = fbArc8;
+				break;
+			case 16:
+				raster = fbArc16;
+				break;
+			case 32:
+				raster = fbArc32;
+				break;
+			}
+		}
+		if (raster) {
+			FbGCPrivPtr pgc = fb_gc(gc);
+			FbBits *dst;
+			FbStride dstStride;
+			int dstBpp;
+			int dstXoff, dstYoff;
+			BoxRec box;
+			int x2, y2;
+
+			fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+			while (n--) {
+				if (miCanZeroArc(arc)) {
+					box.x1 = arc->x + drawable->x;
+					box.y1 = arc->y + drawable->y;
+					/*
+					 * Because box.x2 and box.y2 get truncated to 16 bits, and the
+					 * RECT_IN_REGION test treats the resulting number as a signed
+					 * integer, the RECT_IN_REGION test alone can go the wrong way.
+					 * This can result in a server crash because the rendering
+					 * routines in this file deal directly with cpu addresses
+					 * of pixels to be stored, and do not clip or otherwise check
+					 * that all such addresses are within their respective pixmaps.
+					 * So we only allow the RECT_IN_REGION test to be used for
+					 * values that can be expressed correctly in a signed short.
+					 */
+					x2 = box.x1 + (int) arc->width + 1;
+					box.x2 = x2;
+					y2 = box.y1 + (int) arc->height + 1;
+					box.y2 = y2;
+					if ((x2 <= SHRT_MAX) && (y2 <= SHRT_MAX) &&
+					    (RegionContainsRect(gc->pCompositeClip, &box) == rgnIN)) {
+						raster(dst, dstStride, dstBpp,
+						       arc, drawable->x + dstXoff,
+							drawable->y + dstYoff, pgc->and, pgc->xor);
+					} else
+						miZeroPolyArc(drawable, gc, 1, arc);
+				} else
+					miPolyArc(drawable, gc, 1, arc);
+				arc++;
+			}
+		} else
+			miZeroPolyArc(drawable, gc, n, arc);
+	} else
+		miPolyArc(drawable, gc, n, arc);
+}
diff --git a/src/sna/fb/fbarcbits.h b/src/sna/fb/fbarcbits.h
new file mode 100644
index 0000000..a37206c
--- /dev/null
+++ b/src/sna/fb/fbarcbits.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define isClipped(c,ul,lr)  (((c) | ((c) - (ul)) | ((lr) - (c))) & 0x80008000)
+#define RROP(b,a,x)	WRITE((b), FbDoRRop (READ(b), (a), (x)))
+
+#define ARCCOPY(d)  WRITE(d,xorBits)
+#define ARCRROP(d)  RROP(d,andBits,xorBits)
+
+static void
+ARC(FbBits * dst,
+    FbStride dstStride,
+    int dstBpp, xArc * arc, int drawX, int drawY, FbBits and, FbBits xor)
+{
+	BITS *bits;
+	FbStride bitsStride;
+	miZeroArcRec info;
+	Bool do360;
+	int x;
+	BITS *yorgp, *yorgop;
+	BITS andBits, xorBits;
+	int yoffset, dyoffset;
+	int y, a, b, d, mask;
+	int k1, k3, dx, dy;
+
+	bits = (BITS *) dst;
+	bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
+	andBits = (BITS) and;
+	xorBits = (BITS) xor;
+	do360 = miZeroArcSetup(arc, &info, TRUE);
+	yorgp = bits + ((info.yorg + drawY) * bitsStride);
+	yorgop = bits + ((info.yorgo + drawY) * bitsStride);
+	info.xorg = (info.xorg + drawX);
+	info.xorgo = (info.xorgo + drawX);
+	MIARCSETUP();
+	yoffset = y ? bitsStride : 0;
+	dyoffset = 0;
+	mask = info.initialMask;
+
+	if (!(arc->width & 1)) {
+		if (andBits == 0) {
+			if (mask & 2)
+				ARCCOPY(yorgp + info.xorgo);
+			if (mask & 8)
+				ARCCOPY(yorgop + info.xorgo);
+		} else {
+			if (mask & 2)
+				ARCRROP(yorgp + info.xorgo);
+			if (mask & 8)
+				ARCRROP(yorgop + info.xorgo);
+		}
+	}
+	if (!info.end.x || !info.end.y) {
+		mask = info.end.mask;
+		info.end = info.altend;
+	}
+	if (do360 && (arc->width == arc->height) && !(arc->width & 1)) {
+		int xoffset = bitsStride;
+		BITS *yorghb = yorgp + (info.h * bitsStride) + info.xorg;
+		BITS *yorgohb = yorghb - info.h;
+
+		yorgp += info.xorg;
+		yorgop += info.xorg;
+		yorghb += info.h;
+		while (1) {
+			if (andBits == 0) {
+				ARCCOPY(yorgp + yoffset + x);
+				ARCCOPY(yorgp + yoffset - x);
+				ARCCOPY(yorgop - yoffset - x);
+				ARCCOPY(yorgop - yoffset + x);
+			} else {
+				ARCRROP(yorgp + yoffset + x);
+				ARCRROP(yorgp + yoffset - x);
+				ARCRROP(yorgop - yoffset - x);
+				ARCRROP(yorgop - yoffset + x);
+			}
+			if (a < 0)
+				break;
+			if (andBits == 0) {
+				ARCCOPY(yorghb - xoffset - y);
+				ARCCOPY(yorgohb - xoffset + y);
+				ARCCOPY(yorgohb + xoffset + y);
+				ARCCOPY(yorghb + xoffset - y);
+			} else {
+				ARCRROP(yorghb - xoffset - y);
+				ARCRROP(yorgohb - xoffset + y);
+				ARCRROP(yorgohb + xoffset + y);
+				ARCRROP(yorghb + xoffset - y);
+			}
+			xoffset += bitsStride;
+			MIARCCIRCLESTEP(yoffset += bitsStride;
+				       );
+		}
+		yorgp -= info.xorg;
+		yorgop -= info.xorg;
+		x = info.w;
+		yoffset = info.h * bitsStride;
+	} else if (do360) {
+		while (y < info.h || x < info.w) {
+			MIARCOCTANTSHIFT(dyoffset = bitsStride;
+					);
+			if (andBits == 0) {
+				ARCCOPY(yorgp + yoffset + info.xorg + x);
+				ARCCOPY(yorgp + yoffset + info.xorgo - x);
+				ARCCOPY(yorgop - yoffset + info.xorgo - x);
+				ARCCOPY(yorgop - yoffset + info.xorg + x);
+			} else {
+				ARCRROP(yorgp + yoffset + info.xorg + x);
+				ARCRROP(yorgp + yoffset + info.xorgo - x);
+				ARCRROP(yorgop - yoffset + info.xorgo - x);
+				ARCRROP(yorgop - yoffset + info.xorg + x);
+			}
+			MIARCSTEP(yoffset += dyoffset;
+				  , yoffset += bitsStride;
+				 );
+		}
+	} else {
+		while (y < info.h || x < info.w) {
+			MIARCOCTANTSHIFT(dyoffset = bitsStride;
+					);
+			if ((x == info.start.x) || (y == info.start.y)) {
+				mask = info.start.mask;
+				info.start = info.altstart;
+			}
+			if (andBits == 0) {
+				if (mask & 1)
+					ARCCOPY(yorgp + yoffset + info.xorg + x);
+				if (mask & 2)
+					ARCCOPY(yorgp + yoffset + info.xorgo - x);
+				if (mask & 4)
+					ARCCOPY(yorgop - yoffset + info.xorgo - x);
+				if (mask & 8)
+					ARCCOPY(yorgop - yoffset + info.xorg + x);
+			} else {
+				if (mask & 1)
+					ARCRROP(yorgp + yoffset + info.xorg + x);
+				if (mask & 2)
+					ARCRROP(yorgp + yoffset + info.xorgo - x);
+				if (mask & 4)
+					ARCRROP(yorgop - yoffset + info.xorgo - x);
+				if (mask & 8)
+					ARCRROP(yorgop - yoffset + info.xorg + x);
+			}
+			if ((x == info.end.x) || (y == info.end.y)) {
+				mask = info.end.mask;
+				info.end = info.altend;
+			}
+			MIARCSTEP(yoffset += dyoffset;
+				  , yoffset += bitsStride;
+				 );
+		}
+	}
+	if ((x == info.start.x) || (y == info.start.y))
+		mask = info.start.mask;
+	if (andBits == 0) {
+		if (mask & 1)
+			ARCCOPY(yorgp + yoffset + info.xorg + x);
+		if (mask & 4)
+			ARCCOPY(yorgop - yoffset + info.xorgo - x);
+		if (arc->height & 1) {
+			if (mask & 2)
+				ARCCOPY(yorgp + yoffset + info.xorgo - x);
+			if (mask & 8)
+				ARCCOPY(yorgop - yoffset + info.xorg + x);
+		}
+	} else {
+		if (mask & 1)
+			ARCRROP(yorgp + yoffset + info.xorg + x);
+		if (mask & 4)
+			ARCRROP(yorgop - yoffset + info.xorgo - x);
+		if (arc->height & 1) {
+			if (mask & 2)
+				ARCRROP(yorgp + yoffset + info.xorgo - x);
+			if (mask & 8)
+				ARCRROP(yorgop - yoffset + info.xorg + x);
+		}
+	}
+}
+
+#undef ARCCOPY
+#undef ARCRROP
+
+#undef RROP
+#undef isClipped
diff --git a/src/sna/fb/fbbitmap.c b/src/sna/fb/fbbitmap.c
new file mode 100644
index 0000000..075d6cc
--- /dev/null
+++ b/src/sna/fb/fbbitmap.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "fb.h"
+
+static inline void add(RegionPtr region,
+		       int16_t x1, int16_t y1, int16_t x2, int16_t y2)
+{
+	BoxPtr r;
+
+	if (region->data->numRects == region->data->size)
+		RegionRectAlloc(region, 1);
+
+	r = RegionBoxptr(region) + region->data->numRects++;
+	r->x1 = x1; r->y1 = y1;
+	r->x2 = x2; r->y2 = y2;
+
+	if (x1 < region->extents.x1)
+		region->extents.x1 = x1;
+	if (x2 > region->extents.x2)
+		region->extents.x2 = x2;
+}
+
+/* Convert bitmap clip mask into clipping region.
+ * First, goes through each line and makes boxes by noting the transitions
+ * from 0 to 1 and 1 to 0.
+ * Then it coalesces the current line with the previous if they have boxes
+ * at the same X coordinates.
+ */
+RegionPtr
+fbBitmapToRegion(PixmapPtr pixmap)
+{
+	const register FbBits mask0 = FB_ALLONES & ~FbScrRight(FB_ALLONES, 1);
+	register RegionPtr region;
+	const FbBits *bits, *line, *end;
+	int width, y1, y2, base, x1;
+	int stride, i;
+
+	DBG(("%s bitmap=%dx%d\n", __FUNCTION__,
+	     pixmap->drawable.width, pixmap->drawable.height));
+
+	region = RegionCreate(NULL, 1);
+	if (!region)
+		return NullRegion;
+
+	line = (FbBits *) pixmap->devPrivate.ptr;
+	stride = pixmap->devKind >> (FB_SHIFT - 3);
+
+	width = pixmap->drawable.width;
+	region->extents.x1 = width;
+	region->extents.x2 = 0;
+	y2 = 0;
+	while (y2 < pixmap->drawable.height) {
+		y1 = y2++;
+		bits = line;
+		line += stride;
+		while (y2 < pixmap->drawable.height &&
+		       memcmp(bits, line, (width+7)>>3) == 0)
+			line += stride, y2++;
+
+		if (READ(bits) & mask0)
+			x1 = 0;
+		else
+			x1 =-1;
+
+		/* Process all words which are fully in the pixmap */
+		end = bits + (width >> FB_SHIFT);
+		for (base = 0; bits < end; base += FB_UNIT) {
+			FbBits w = READ(bits++);
+			if (x1 < 0) {
+				if (!~w)
+					continue;
+			} else {
+				if (!w)
+					continue;
+			}
+			for (i = 0; i < FB_UNIT; i++) {
+				if (w & mask0) {
+					if (x1 < 0)
+						x1 = base + i;
+				} else {
+					if (x1 >= 0) {
+						add(region, x1, y1, base + i, y2);
+						x1 = -1;
+					}
+				}
+				w = FbScrLeft(w, 1);
+			}
+		}
+		if (width & FB_MASK) {
+			FbBits w = READ(bits++);
+			for (i = 0; i < (width & FB_MASK); i++) {
+				if (w & mask0) {
+					if (x1 < 0)
+						x1 = base + i;
+				} else {
+					if (x1 >= 0) {
+						add(region, x1, y1, base + i, y2);
+						x1 = -1;
+					}
+				}
+				w = FbScrLeft(w, 1);
+			}
+		}
+		if (x1 >= 0)
+			add(region, x1, y1, width, y2);
+	}
+
+	if (region->data->numRects) {
+		region->extents.y1 = RegionBoxptr(region)->y1;
+		region->extents.y2 = RegionEnd(region)->y2;
+		if (region->data->numRects == 1) {
+			free(region->data);
+			region->data = NULL;
+		}
+	} else
+		region->extents.x1 = region->extents.x2 = 0;
+
+	return region;
+}
diff --git a/src/sna/fb/fbblt.c b/src/sna/fb/fbblt.c
new file mode 100644
index 0000000..247a331
--- /dev/null
+++ b/src/sna/fb/fbblt.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <string.h>
+#include "fb.h"
+
+typedef struct _mergeRopBits {
+    FbBits ca1, cx1, ca2, cx2;
+} FbMergeRopRec, *FbMergeRopPtr;
+
+#define O 0
+#define I FB_ALLONES
+
+static const FbMergeRopRec FbMergeRopBits[16] = {
+	{O, O, O, O},               /* clear         0x0         0 */
+	{I, O, O, O},               /* and           0x1         src AND dst */
+	{I, O, I, O},               /* andReverse    0x2         src AND NOT dst */
+	{O, O, I, O},               /* copy          0x3         src */
+	{I, I, O, O},               /* andInverted   0x4         NOT src AND dst */
+	{O, I, O, O},               /* noop          0x5         dst */
+	{O, I, I, O},               /* xor           0x6         src XOR dst */
+	{I, I, I, O},               /* or            0x7         src OR dst */
+	{I, I, I, I},               /* nor           0x8         NOT src AND NOT dst */
+	{O, I, I, I},               /* equiv         0x9         NOT src XOR dst */
+	{O, I, O, I},               /* invert        0xa         NOT dst */
+	{I, I, O, I},               /* orReverse     0xb         src OR NOT dst */
+	{O, O, I, I},               /* copyInverted  0xc         NOT src */
+	{I, O, I, I},               /* orInverted    0xd         NOT src OR dst */
+	{I, O, O, I},               /* nand          0xe         NOT src OR NOT dst */
+	{O, O, O, I},               /* set           0xf         1 */
+};
+
+#undef O
+#undef I
+
+#define FbDeclareMergeRop() FbBits   _ca1, _cx1, _ca2, _cx2;
+#define FbDeclarePrebuiltMergeRop()	FbBits	_cca, _ccx;
+
+#define FbInitializeMergeRop(alu,pm) {\
+    const FbMergeRopRec  *_bits; \
+    _bits = &FbMergeRopBits[alu]; \
+    _ca1 = _bits->ca1 &  pm; \
+    _cx1 = _bits->cx1 | ~pm; \
+    _ca2 = _bits->ca2 &  pm; \
+    _cx2 = _bits->cx2 &  pm; \
+}
+
+#define InitializeShifts(sx,dx,ls,rs) { \
+    if (sx != dx) { \
+	if (sx > dx) { \
+	    ls = sx - dx; \
+	    rs = FB_UNIT - ls; \
+	} else { \
+	    rs = dx - sx; \
+	    ls = FB_UNIT - rs; \
+	} \
+    } \
+}
+
+static void
+fbBlt__rop(FbBits *srcLine, FbStride srcStride, int srcX,
+	   FbBits *dstLine, FbStride dstStride, int dstX,
+	   int width, int height,
+	   int alu, FbBits pm, int bpp,
+	   Bool reverse, Bool upsidedown)
+{
+	FbBits *src, *dst;
+	int leftShift, rightShift;
+	FbBits startmask, endmask;
+	FbBits bits, bits1;
+	int n, nmiddle;
+	Bool destInvarient;
+	int startbyte, endbyte;
+
+	FbDeclareMergeRop();
+
+	FbInitializeMergeRop(alu, pm);
+	destInvarient = FbDestInvarientMergeRop();
+	if (upsidedown) {
+		srcLine += (height - 1) * (srcStride);
+		dstLine += (height - 1) * (dstStride);
+		srcStride = -srcStride;
+		dstStride = -dstStride;
+	}
+	FbMaskBitsBytes(dstX, width, destInvarient, startmask, startbyte,
+			nmiddle, endmask, endbyte);
+	if (reverse) {
+		srcLine += ((srcX + width - 1) >> FB_SHIFT) + 1;
+		dstLine += ((dstX + width - 1) >> FB_SHIFT) + 1;
+		srcX = (srcX + width - 1) & FB_MASK;
+		dstX = (dstX + width - 1) & FB_MASK;
+	} else {
+		srcLine += srcX >> FB_SHIFT;
+		dstLine += dstX >> FB_SHIFT;
+		srcX &= FB_MASK;
+		dstX &= FB_MASK;
+	}
+	if (srcX == dstX) {
+		while (height--) {
+			src = srcLine;
+			srcLine += srcStride;
+			dst = dstLine;
+			dstLine += dstStride;
+			if (reverse) {
+				if (endmask) {
+					bits = READ(--src);
+					--dst;
+					FbDoRightMaskByteMergeRop(dst, bits, endbyte, endmask);
+				}
+				n = nmiddle;
+				if (destInvarient) {
+					while (n--)
+						WRITE(--dst, FbDoDestInvarientMergeRop(READ(--src)));
+				} else {
+					while (n--) {
+						bits = READ(--src);
+						--dst;
+						WRITE(dst, FbDoMergeRop(bits, READ(dst)));
+					}
+				}
+				if (startmask) {
+					bits = READ(--src);
+					--dst;
+					FbDoLeftMaskByteMergeRop(dst, bits, startbyte, startmask);
+				}
+			} else {
+				if (startmask) {
+					bits = READ(src++);
+					FbDoLeftMaskByteMergeRop(dst, bits, startbyte, startmask);
+					dst++;
+				}
+				n = nmiddle;
+				if (destInvarient) {
+					while (n--)
+						WRITE(dst++, FbDoDestInvarientMergeRop(READ(src++)));
+				} else {
+					while (n--) {
+						bits = READ(src++);
+						WRITE(dst, FbDoMergeRop(bits, READ(dst)));
+						dst++;
+					}
+				}
+				if (endmask) {
+					bits = READ(src);
+					FbDoRightMaskByteMergeRop(dst, bits, endbyte, endmask);
+				}
+			}
+		}
+	} else {
+		if (srcX > dstX) {
+			leftShift = srcX - dstX;
+			rightShift = FB_UNIT - leftShift;
+		} else {
+			rightShift = dstX - srcX;
+			leftShift = FB_UNIT - rightShift;
+		}
+		while (height--) {
+			src = srcLine;
+			srcLine += srcStride;
+			dst = dstLine;
+			dstLine += dstStride;
+
+			bits1 = 0;
+			if (reverse) {
+				if (srcX < dstX)
+					bits1 = READ(--src);
+				if (endmask) {
+					bits = FbScrRight(bits1, rightShift);
+					if (FbScrRight(endmask, leftShift)) {
+						bits1 = READ(--src);
+						bits |= FbScrLeft(bits1, leftShift);
+					}
+					--dst;
+					FbDoRightMaskByteMergeRop(dst, bits, endbyte, endmask);
+				}
+				n = nmiddle;
+				if (destInvarient) {
+					while (n--) {
+						bits = FbScrRight(bits1, rightShift);
+						bits1 = READ(--src);
+						bits |= FbScrLeft(bits1, leftShift);
+						--dst;
+						WRITE(dst, FbDoDestInvarientMergeRop(bits));
+					}
+				} else {
+					while (n--) {
+						bits = FbScrRight(bits1, rightShift);
+						bits1 = READ(--src);
+						bits |= FbScrLeft(bits1, leftShift);
+						--dst;
+						WRITE(dst, FbDoMergeRop(bits, READ(dst)));
+					}
+				}
+				if (startmask) {
+					bits = FbScrRight(bits1, rightShift);
+					if (FbScrRight(startmask, leftShift)) {
+						bits1 = READ(--src);
+						bits |= FbScrLeft(bits1, leftShift);
+					}
+					--dst;
+					FbDoLeftMaskByteMergeRop(dst, bits, startbyte, startmask);
+				}
+			} else {
+				if (srcX > dstX)
+					bits1 = READ(src++);
+				if (startmask) {
+					bits = FbScrLeft(bits1, leftShift);
+					if (FbScrLeft(startmask, rightShift)) {
+						bits1 = READ(src++);
+						bits |= FbScrRight(bits1, rightShift);
+					}
+					FbDoLeftMaskByteMergeRop(dst, bits, startbyte, startmask);
+					dst++;
+				}
+				n = nmiddle;
+				if (destInvarient) {
+					while (n--) {
+						bits = FbScrLeft(bits1, leftShift);
+						bits1 = READ(src++);
+						bits |= FbScrRight(bits1, rightShift);
+						WRITE(dst, FbDoDestInvarientMergeRop(bits));
+						dst++;
+					}
+				} else {
+					while (n--) {
+						bits = FbScrLeft(bits1, leftShift);
+						bits1 = READ(src++);
+						bits |= FbScrRight(bits1, rightShift);
+						WRITE(dst, FbDoMergeRop(bits, READ(dst)));
+						dst++;
+					}
+				}
+				if (endmask) {
+					bits = FbScrLeft(bits1, leftShift);
+					if (FbScrLeft(endmask, rightShift)) {
+						bits1 = READ(src);
+						bits |= FbScrRight(bits1, rightShift);
+					}
+					FbDoRightMaskByteMergeRop(dst, bits, endbyte, endmask);
+				}
+			}
+		}
+	}
+}
+
+void
+fbBlt(FbBits *srcLine, FbStride srcStride, int srcX,
+      FbBits *dstLine, FbStride dstStride, int dstX,
+      int width, int height,
+      int alu, FbBits pm, int bpp,
+      Bool reverse, Bool upsidedown)
+{
+	DBG(("%s %dx%d, alu=%d, pm=%d, bpp=%d\n",
+	     __FUNCTION__, width, height, alu, pm, bpp));
+
+	if (alu == GXcopy && pm == FB_ALLONES && ((srcX|dstX|width) & 7) == 0) {
+		CARD8 *s = (CARD8 *) srcLine;
+		CARD8 *d = (CARD8 *) dstLine;
+		int i;
+
+		srcStride *= sizeof(FbBits);
+		dstStride *= sizeof(FbBits);
+		width >>= 3;
+		s += srcX >> 3;
+		d += dstX >> 3;
+
+		DBG(("%s fast blt\n", __FUNCTION__));
+
+		if ((srcLine < dstLine && srcLine + width > dstLine) ||
+		    (dstLine < srcLine && dstLine + width > srcLine)) {
+			if (!upsidedown)
+				for (i = 0; i < height; i++)
+					memmove(d + i * dstStride,
+						s + i * srcStride,
+						width);
+			else
+				for (i = height - 1; i >= 0; i--)
+					memmove(d + i * dstStride,
+						s + i * srcStride,
+						width);
+		} else {
+			if (!upsidedown)
+				for (i = 0; i < height; i++)
+					memcpy(d + i * dstStride,
+					       s + i * srcStride,
+					       width);
+			else
+				for (i = height - 1; i >= 0; i--)
+					memcpy(d + i * dstStride,
+					       s + i * srcStride,
+					       width);
+		}
+
+		return;
+	}
+
+	fbBlt__rop(srcLine, srcStride, srcX,
+		   dstLine, dstStride, dstX,
+		   width, height,
+		   alu, pm, bpp,
+		   reverse, upsidedown);
+}
diff --git a/src/sna/fb/fbbltone.c b/src/sna/fb/fbbltone.c
new file mode 100644
index 0000000..697d20b
--- /dev/null
+++ b/src/sna/fb/fbbltone.c
@@ -0,0 +1,413 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+
+#ifdef __clang__
+/* shift overflow is intentional */
+#pragma clang diagnostic ignored "-Wshift-overflow"
+#endif
+
+/*
+ *  Example: srcX = 13 dstX = 8	(FB unit 32 dstBpp 8)
+ *
+ *	**** **** **** **** **** **** **** ****
+ *			^
+ *	********  ********  ********  ********
+ *		  ^
+ *  leftShift = 12
+ *  rightShift = 20
+ *
+ *  Example: srcX = 0 dstX = 8 (FB unit 32 dstBpp 8)
+ *
+ *	**** **** **** **** **** **** **** ****
+ *	^		
+ *	********  ********  ********  ********
+ *		  ^
+ *
+ *  leftShift = 24
+ *  rightShift = 8
+ */
+
+#define LoadBits {\
+    if (leftShift) { \
+	bitsRight = (src < srcEnd ? READ(src++) : 0); \
+	bits = (FbStipLeft (bitsLeft, leftShift) | \
+		FbStipRight(bitsRight, rightShift)); \
+	bitsLeft = bitsRight; \
+    } else \
+	bits = (src < srcEnd ? READ(src++) : 0); \
+}
+
+#define LaneCases1(n,a)	    case n: FbLaneCase(n,a); break
+#define LaneCases2(n,a)	    LaneCases1(n,a); LaneCases1(n+1,a)
+#define LaneCases4(n,a)	    LaneCases2(n,a); LaneCases2(n+2,a)
+#define LaneCases8(n,a)	    LaneCases4(n,a); LaneCases4(n+4,a)
+#define LaneCases16(n,a)    LaneCases8(n,a); LaneCases8(n+8,a)
+#define LaneCases32(n,a)    LaneCases16(n,a); LaneCases16(n+16,a)
+#define LaneCases64(n,a)    LaneCases32(n,a); LaneCases32(n+32,a)
+#define LaneCases128(n,a)   LaneCases64(n,a); LaneCases64(n+64,a)
+#define LaneCases256(n,a)   LaneCases128(n,a); LaneCases128(n+128,a)
+
+#define LaneCases(a)	    LaneCases16(0,a)
+
+static const CARD8 fb8Lane[16] = {
+    0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
+};
+
+static const CARD8 fb16Lane[16] = {
+    0, 3, 12, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const CARD8 fb32Lane[16] = {
+    0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static const CARD8 * const fbLaneTable[33] = {
+    0, 0, 0, 0, 0, 0, 0, 0,
+    fb8Lane, 0, 0, 0, 0, 0, 0, 0,
+    fb16Lane, 0, 0, 0, 0, 0, 0, 0,
+    0, 0, 0, 0, 0, 0, 0, 0,
+    fb32Lane
+};
+
+void
+fbBltOne(FbStip * src, FbStride srcStride,      /* FbStip units per scanline */
+         int srcX,              /* bit position of source */
+         FbBits * dst, FbStride dstStride,      /* FbBits units per scanline */
+         int dstX,              /* bit position of dest */
+         int dstBpp,            /* bits per destination unit */
+         int width,             /* width in bits of destination */
+         int height,            /* height in scanlines */
+         FbBits fgand,          /* rrop values */
+         FbBits fgxor, FbBits bgand, FbBits bgxor)
+{
+	const FbBits *fbBits;
+	FbBits *srcEnd;
+	int pixelsPerDst;           /* dst pixels per FbBits */
+	int unitsPerSrc;            /* src patterns per FbStip */
+	int leftShift, rightShift;  /* align source with dest */
+	FbBits startmask, endmask;  /* dest scanline masks */
+	FbStip bits = 0, bitsLeft, bitsRight;       /* source bits */
+	FbStip left;
+	FbBits mask;
+	int nDst;                   /* dest longwords (w.o. end) */
+	int w;
+	int n, nmiddle;
+	int dstS;                   /* stipple-relative dst X coordinate */
+	Bool copy;                  /* accelerate dest-invariant */
+	Bool transparent;           /* accelerate 0 nop */
+	int srcinc;                 /* source units consumed */
+	Bool endNeedsLoad = FALSE;  /* need load for endmask */
+	const CARD8 *fbLane;
+	int startbyte, endbyte;
+
+	/*
+	 * Do not read past the end of the buffer!
+	 */
+	srcEnd = src + height * srcStride;
+
+	/*
+	 * Number of destination units in FbBits == number of stipple pixels
+	 * used each time
+	 */
+	pixelsPerDst = FB_UNIT / dstBpp;
+
+	/*
+	 * Number of source stipple patterns in FbStip 
+	 */
+	unitsPerSrc = FB_STIP_UNIT / pixelsPerDst;
+
+	copy = FALSE;
+	transparent = FALSE;
+	if (bgand == 0 && fgand == 0)
+		copy = TRUE;
+	else if (bgand == FB_ALLONES && bgxor == 0)
+		transparent = TRUE;
+
+	/*
+	 * Adjust source and dest to nearest FbBits boundary
+	 */
+	src += srcX >> FB_STIP_SHIFT;
+	dst += dstX >> FB_SHIFT;
+	srcX &= FB_STIP_MASK;
+	dstX &= FB_MASK;
+
+	FbMaskBitsBytes(dstX, width, copy,
+			startmask, startbyte, nmiddle, endmask, endbyte);
+
+	/*
+	 * Compute effective dest alignment requirement for
+	 * source -- must align source to dest unit boundary
+	 */
+	dstS = dstX / dstBpp;
+	/*
+	 * Compute shift constants for effective alignement
+	 */
+	if (srcX >= dstS) {
+		leftShift = srcX - dstS;
+		rightShift = FB_STIP_UNIT - leftShift;
+	} else {
+		rightShift = dstS - srcX;
+		leftShift = FB_STIP_UNIT - rightShift;
+	}
+	/*
+	 * Get pointer to stipple mask array for this depth
+	 */
+	fbBits = 0;                 /* unused */
+	if (pixelsPerDst <= 8)
+		fbBits = fbStippleTable[pixelsPerDst];
+	fbLane = 0;
+	if (transparent && fgand == 0 && dstBpp >= 8)
+		fbLane = fbLaneTable[dstBpp];
+
+	/*
+	 * Compute total number of destination words written, but 
+	 * don't count endmask 
+	 */
+	nDst = nmiddle;
+	if (startmask)
+		nDst++;
+
+	dstStride -= nDst;
+
+	/*
+	 * Compute total number of source words consumed
+	 */
+
+	srcinc = (nDst + unitsPerSrc - 1) / unitsPerSrc;
+
+	if (srcX > dstS)
+		srcinc++;
+	if (endmask) {
+		endNeedsLoad = nDst % unitsPerSrc == 0;
+		if (endNeedsLoad)
+			srcinc++;
+	}
+
+	srcStride -= srcinc;
+
+	/*
+	 * Copy rectangle
+	 */
+	while (height--) {
+		w = nDst;               /* total units across scanline */
+		n = unitsPerSrc;        /* units avail in single stipple */
+		if (n > w)
+			n = w;
+
+		bitsLeft = 0;
+		if (srcX > dstS)
+			bitsLeft = READ(src++);
+		if (n) {
+			/*
+			 * Load first set of stipple bits
+			 */
+			LoadBits;
+
+			/*
+			 * Consume stipple bits for startmask
+			 */
+			if (startmask) {
+				mask = fbBits[FbLeftStipBits(bits, pixelsPerDst)];
+				if (fbLane) {
+					fbTransparentSpan(dst, mask & startmask, fgxor, 1);
+				} else {
+					if (mask || !transparent)
+						FbDoLeftMaskByteStippleRRop(dst, mask,
+									    fgand, fgxor, bgand, bgxor,
+									    startbyte, startmask);
+				}
+				bits = FbStipLeft(bits, pixelsPerDst);
+				dst++;
+				n--;
+				w--;
+			}
+			/*
+			 * Consume stipple bits across scanline
+			 */
+			for (;;) {
+				w -= n;
+				if (copy) {
+					while (n--) {
+#if FB_UNIT > 32
+						if (pixelsPerDst == 16)
+							mask = FbStipple16Bits(FbLeftStipBits(bits, 16));
+						else
+#endif
+							mask = fbBits[FbLeftStipBits(bits, pixelsPerDst)];
+						WRITE(dst, FbOpaqueStipple(mask, fgxor, bgxor));
+						dst++;
+						bits = FbStipLeft(bits, pixelsPerDst);
+					}
+				}
+				else {
+					if (fbLane) {
+						while (bits && n) {
+							switch (fbLane[FbLeftStipBits(bits, pixelsPerDst)]) {
+								LaneCases((CARD8 *) dst);
+							}
+							bits = FbStipLeft(bits, pixelsPerDst);
+							dst++;
+							n--;
+						}
+						dst += n;
+					} else {
+						while (n--) {
+							left = FbLeftStipBits(bits, pixelsPerDst);
+							if (left || !transparent) {
+								mask = fbBits[left];
+								WRITE(dst, FbStippleRRop(READ(dst), mask,
+											 fgand, fgxor, bgand,
+											 bgxor));
+							}
+							dst++;
+							bits = FbStipLeft(bits, pixelsPerDst);
+						}
+					}
+				}
+				if (!w)
+					break;
+				/*
+				 * Load another set and reset number of available units
+				 */
+				LoadBits;
+				n = unitsPerSrc;
+				if (n > w)
+					n = w;
+			}
+		}
+		/*
+		 * Consume stipple bits for endmask
+		 */
+		if (endmask) {
+			if (endNeedsLoad) {
+				LoadBits;
+			}
+			mask = fbBits[FbLeftStipBits(bits, pixelsPerDst)];
+			if (fbLane) {
+				fbTransparentSpan(dst, mask & endmask, fgxor, 1);
+			} else {
+				if (mask || !transparent)
+					FbDoRightMaskByteStippleRRop(dst, mask,
+								     fgand, fgxor, bgand, bgxor,
+								     endbyte, endmask);
+			}
+		}
+		dst += dstStride;
+		src += srcStride;
+	}
+}
+
+/*
+ * Not very efficient, but simple -- copy a single plane
+ * from an N bit image to a 1 bit image
+ */
+
+void
+fbBltPlane(FbBits * src,
+           FbStride srcStride,
+           int srcX,
+           int srcBpp,
+           FbStip * dst,
+           FbStride dstStride,
+           int dstX,
+           int width,
+           int height,
+           FbStip fgand,
+           FbStip fgxor, FbStip bgand, FbStip bgxor, Pixel planeMask)
+{
+	FbBits *s;
+	FbBits pm;
+	FbBits srcMask;
+	FbBits srcMaskFirst;
+	FbBits srcMask0 = 0;
+	FbBits srcBits;
+
+	FbStip dstBits;
+	FbStip *d;
+	FbStip dstMask;
+	FbStip dstMaskFirst;
+	FbStip dstUnion;
+	int w;
+	int wt;
+
+	if (!width)
+		return;
+
+	src += srcX >> FB_SHIFT;
+	srcX &= FB_MASK;
+
+	dst += dstX >> FB_STIP_SHIFT;
+	dstX &= FB_STIP_MASK;
+
+	w = width / srcBpp;
+
+	pm = fbReplicatePixel(planeMask, srcBpp);
+	srcMaskFirst = pm & FbBitsMask(srcX, srcBpp);
+	srcMask0 = pm & FbBitsMask(0, srcBpp);
+
+	dstMaskFirst = FbStipMask(dstX, 1);
+	while (height--) {
+		d = dst;
+		dst += dstStride;
+		s = src;
+		src += srcStride;
+
+		srcMask = srcMaskFirst;
+		srcBits = READ(s++);
+
+		dstMask = dstMaskFirst;
+		dstUnion = 0;
+		dstBits = 0;
+
+		wt = w;
+
+		while (wt--) {
+			if (!srcMask) {
+				srcBits = READ(s++);
+				srcMask = srcMask0;
+			}
+			if (!dstMask) {
+				WRITE(d, FbStippleRRopMask(READ(d), dstBits,
+							   fgand, fgxor, bgand, bgxor,
+							   dstUnion));
+				d++;
+				dstMask = FbStipMask(0, 1);
+				dstUnion = 0;
+				dstBits = 0;
+			}
+			if (srcBits & srcMask)
+				dstBits |= dstMask;
+			dstUnion |= dstMask;
+			if (srcBpp == FB_UNIT)
+				srcMask = 0;
+			else
+				srcMask = FbScrRight(srcMask, srcBpp);
+			dstMask = FbStipRight(dstMask, 1);
+		}
+		if (dstUnion)
+			WRITE(d, FbStippleRRopMask(READ(d), dstBits,
+						   fgand, fgxor, bgand, bgxor, dstUnion));
+	}
+}
diff --git a/src/sna/fb/fbclip.c b/src/sna/fb/fbclip.c
new file mode 100644
index 0000000..9b33e4a
--- /dev/null
+++ b/src/sna/fb/fbclip.c
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#include "fb.h"
+#include "fbclip.h"
+
+static const BoxRec *
+find_c0(const BoxRec *begin, const BoxRec *end, int16_t y)
+{
+	const BoxRec *mid;
+
+	if (end == begin)
+		return end;
+
+	if (end - begin == 1) {
+		if (begin->y2 > y)
+			return begin;
+		else
+			return end;
+	}
+
+	mid = begin + (end - begin) / 2;
+	if (mid->y2 > y)
+		return find_c0(begin, mid, y);
+	else
+		return find_c0(mid, end, y);
+}
+
+static const BoxRec *
+find_c1(const BoxRec *begin, const BoxRec *end, int16_t y)
+{
+	const BoxRec *mid;
+
+	if (end == begin)
+		return end;
+
+	if (end - begin == 1) {
+		if (begin->y1 > y)
+			return begin;
+		else
+			return end;
+	}
+
+	mid = begin + (end - begin) / 2;
+	if (mid->y1 > y)
+		return find_c1(begin, mid, y);
+	else
+		return find_c1(mid, end, y);
+}
+
+const BoxRec *
+fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end)
+{
+	const BoxRec *c0, *c1;
+
+	DBG(("%s: box=(%d, %d),(%d, %d); region=(%d, %d),(%d, %d) x %ld\n",
+	     __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2,
+	     region->extents.x1, region->extents.y1,
+	     region->extents.x2, region->extents.y2,
+	     region->data ? region->data->numRects : 1));
+
+	if (box->x1 >= region->extents.x2 || box->x2 <= region->extents.x1 ||
+	    box->y1 >= region->extents.y2 || box->y2 <= region->extents.y1) {
+		DBG(("%s: no intersection\n", __FUNCTION__));
+		return *end = box;
+	}
+
+	if (region->data == NULL) {
+		*end = &region->extents + 1;
+		return &region->extents;
+	}
+
+	c0 = (const BoxRec *)region->data + 1;
+	c1 = c0 + region->data->numRects;
+
+	c0 = find_c0(c0, c1, box->y1);
+	c1 = find_c1(c0, c1, box->y2);
+
+	DBG(("%s: c0=(%d, %d),(%d, %d); c1=(%d, %d),(%d, %d)\n",
+	     __FUNCTION__,
+	     c0->x1, c0->y1, c0->x2, c0->y2,
+	     c1[-1].x1, c1[-1].y1, c1[-1].x2, c1[-1].y2));
+
+	*end = c1;
+	return c0;
+}
diff --git a/src/sna/fb/fbclip.h b/src/sna/fb/fbclip.h
new file mode 100644
index 0000000..0436c40
--- /dev/null
+++ b/src/sna/fb/fbclip.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Chris Wilson <chris at chris-wilson.co.uk>
+ *
+ */
+
+#ifndef FBCLIP_H
+#define FBCLIP_H
+
+extern const BoxRec *
+fbClipBoxes(const RegionRec *region, const BoxRec *box, const BoxRec **end);
+
+inline static bool
+box_intersect(BoxPtr a, const BoxRec *b)
+{
+	if (a->x1 < b->x1)
+		a->x1 = b->x1;
+	if (a->x2 > b->x2)
+		a->x2 = b->x2;
+	if (a->y1 < b->y1)
+		a->y1 = b->y1;
+	if (a->y2 > b->y2)
+		a->y2 = b->y2;
+
+	return a->x1 < a->x2 && a->y1 < a->y2;
+}
+
+static inline void
+fbDrawableRun(DrawablePtr d, GCPtr gc, const BoxRec *box,
+	      void (*func)(DrawablePtr, GCPtr, const BoxRec *b, void *data),
+	      void *data)
+{
+	const BoxRec *c, *end;
+	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
+		BoxRec b;
+
+		if (box->x2 <= c->x1 || box->x1 >= c->x2)
+			continue;
+
+		b = *box;
+		if (box_intersect(&b, c))
+			func(d, gc, &b, data);
+	}
+}
+
+static inline void
+fbDrawableRunUnclipped(DrawablePtr d, GCPtr gc, const BoxRec *box,
+		       void (*func)(DrawablePtr, GCPtr, const BoxRec *b, void *data),
+		       void *data)
+{
+	const BoxRec *c, *end;
+	for (c = fbClipBoxes(gc->pCompositeClip, box, &end); c != end; c++) {
+		if (box->x2 <= c->x1 || box->x1 >= c->x2)
+			continue;
+		func(d, gc, c, data);
+	}
+}
+
+#endif /* FBCLIP_H */
diff --git a/src/sna/fb/fbcopy.c b/src/sna/fb/fbcopy.c
new file mode 100644
index 0000000..a2b1ded
--- /dev/null
+++ b/src/sna/fb/fbcopy.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "fb.h"
+#include <mi.h>
+
+void
+fbCopyNtoN(DrawablePtr src_drawable, DrawablePtr dst_drawable, GCPtr gc,
+           BoxPtr box, int nbox,
+           int dx, int dy,
+	   Bool reverse, Bool upsidedown, Pixel bitplane,
+	   void *closure)
+{
+	CARD8 alu = gc ? gc->alu : GXcopy;
+	FbBits pm = gc ? fb_gc(gc)->pm : FB_ALLONES;
+	FbBits *src;
+	FbStride srcStride;
+	int srcBpp;
+	int srcXoff, srcYoff;
+	FbBits *dst;
+	FbStride dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+
+	fbGetDrawable(src_drawable, src, srcStride, srcBpp, srcXoff, srcYoff);
+	fbGetDrawable(dst_drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+
+	while (nbox--) {
+		if (pm == FB_ALLONES && alu == GXcopy && !reverse && !upsidedown) {
+			if (!pixman_blt
+			    ((uint32_t *) src, (uint32_t *) dst, srcStride, dstStride,
+			     srcBpp, dstBpp, (box->x1 + dx + srcXoff),
+			     (box->y1 + dy + srcYoff), (box->x1 + dstXoff),
+			     (box->y1 + dstYoff), (box->x2 - box->x1),
+			     (box->y2 - box->y1)))
+				goto fallback;
+			else
+				goto next;
+		}
+fallback:
+		fbBlt(src + (box->y1 + dy + srcYoff) * srcStride,
+		      srcStride,
+		      (box->x1 + dx + srcXoff) * srcBpp,
+		      dst + (box->y1 + dstYoff) * dstStride,
+		      dstStride,
+		      (box->x1 + dstXoff) * dstBpp,
+		      (box->x2 - box->x1) * dstBpp,
+		      (box->y2 - box->y1), alu, pm, dstBpp, reverse, upsidedown);
+next:
+		box++;
+	}
+}
+
+void
+fbCopy1toN(DrawablePtr src_drawable, DrawablePtr dst_drawable, GCPtr gc,
+           BoxPtr box, int nbox,
+           int dx, int dy,
+	   Bool reverse, Bool upsidedown, Pixel bitplane,
+	   void *closure)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	FbBits *src;
+	FbStride srcStride;
+	int srcBpp;
+	int srcXoff, srcYoff;
+	FbBits *dst;
+	FbStride dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+
+	fbGetDrawable(src_drawable, src, srcStride, srcBpp, srcXoff, srcYoff);
+	fbGetDrawable(dst_drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+
+	while (nbox--) {
+		if (dstBpp == 1) {
+			fbBlt(src + (box->y1 + dy + srcYoff) * srcStride,
+			      srcStride,
+			      (box->x1 + dx + srcXoff) * srcBpp,
+			      dst + (box->y1 + dstYoff) * dstStride,
+			      dstStride,
+			      (box->x1 + dstXoff) * dstBpp,
+			      (box->x2 - box->x1) * dstBpp,
+			      (box->y2 - box->y1),
+			      FbOpaqueStipple1Rop(gc->alu,
+						  gc->fgPixel, gc->bgPixel),
+			      pgc->pm, dstBpp, reverse, upsidedown);
+		} else {
+			fbBltOne((FbStip *) (src + (box->y1 + dy + srcYoff) * srcStride),
+				 srcStride * (FB_UNIT / FB_STIP_UNIT),
+				 (box->x1 + dx + srcXoff),
+				 dst + (box->y1 + dstYoff) * dstStride,
+				 dstStride,
+				 (box->x1 + dstXoff) * dstBpp,
+				 dstBpp,
+				 (box->x2 - box->x1) * dstBpp,
+				 (box->y2 - box->y1),
+				 pgc->and, pgc->xor, pgc->bgand, pgc->bgxor);
+		}
+		box++;
+	}
+}
+
+void
+fbCopyNto1(DrawablePtr src_drawable, DrawablePtr dst_drawable, GCPtr gc,
+           BoxPtr box, int nbox,
+           int dx, int dy,
+	   Bool reverse, Bool upsidedown, Pixel bitplane, void *closure)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+
+	while (nbox--) {
+		if (dst_drawable->bitsPerPixel == 1) {
+			FbBits *src;
+			FbStride srcStride;
+			int srcBpp;
+			int srcXoff, srcYoff;
+
+			FbStip *dst;
+			FbStride dstStride;
+			int dstBpp;
+			int dstXoff, dstYoff;
+
+			fbGetDrawable(src_drawable, src,
+				      srcStride, srcBpp, srcXoff, srcYoff);
+			fbGetStipDrawable(dst_drawable,
+					  dst, dstStride, dstBpp, dstXoff, dstYoff);
+			fbBltPlane(src + (box->y1 + dy + srcYoff) * srcStride, srcStride,
+				   (box->x1 + dx + srcXoff) * srcBpp, srcBpp,
+				   dst + (box->y1 + dstYoff) * dstStride, dstStride,
+				   (box->x1 + dstXoff) * dstBpp,
+				   (box->x2 - box->x1) * srcBpp, (box->y2 - box->y1),
+				   (FbStip) pgc->and, (FbStip) pgc->xor,
+				   (FbStip) pgc->bgand, (FbStip) pgc->bgxor, bitplane);
+		} else {
+			FbBits *src;
+			FbStride srcStride;
+			int srcBpp;
+			int srcXoff, srcYoff;
+
+			FbBits *dst;
+			FbStride dstStride;
+			int dstBpp;
+			int dstXoff, dstYoff;
+
+			FbStip *tmp;
+			FbStride tmpStride;
+			int width, height;
+
+			width = box->x2 - box->x1;
+			height = box->y2 - box->y1;
+
+			tmpStride = ((width + FB_STIP_MASK) >> FB_STIP_SHIFT);
+			tmp = malloc(tmpStride * height * sizeof(FbStip));
+			if (!tmp)
+				return;
+
+			fbGetDrawable(src_drawable, src,
+				      srcStride, srcBpp, srcXoff, srcYoff);
+			fbGetDrawable(dst_drawable, dst,
+				      dstStride, dstBpp, dstXoff, dstYoff);
+
+			fbBltPlane(src + (box->y1 + dy + srcYoff) * srcStride,
+				   srcStride,
+				   (box->x1 + dx + srcXoff) * srcBpp,
+				   srcBpp,
+				   tmp,
+				   tmpStride,
+				   0,
+				   width * srcBpp,
+				   height,
+				   fbAndStip(GXcopy, FB_ALLONES, FB_ALLONES),
+				   fbXorStip(GXcopy, FB_ALLONES, FB_ALLONES),
+				   fbAndStip(GXcopy, 0, FB_ALLONES),
+				   fbXorStip(GXcopy, 0, FB_ALLONES), bitplane);
+			fbBltOne(tmp,
+				 tmpStride,
+				 0,
+				 dst + (box->y1 + dstYoff) * dstStride,
+				 dstStride,
+				 (box->x1 + dstXoff) * dstBpp,
+				 dstBpp,
+				 width * dstBpp,
+				 height,
+				 pgc->and, pgc->xor, pgc->bgand, pgc->bgxor);
+			free(tmp);
+		}
+		box++;
+	}
+}
+
+RegionPtr
+fbCopyArea(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+	   int sx, int sy,
+	   int width, int height,
+	   int dx, int dy)
+{
+	return miDoCopy(src, dst, gc, sx, sy, width, height, dx, dy,
+			fbCopyNtoN, 0, 0);
+}
+
+RegionPtr
+fbCopyPlane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
+	    int sx, int sy,
+	    int width, int height,
+	    int dx, int dy,
+	    unsigned long bitplane)
+{
+	if (src->bitsPerPixel > 1)
+		return miDoCopy(src, dst, gc, sx, sy, width, height, dx, dy,
+				fbCopyNto1, (Pixel) bitplane, 0);
+	else if (bitplane & 1)
+		return miDoCopy(src, dst, gc, sx, sy, width, height, dx, dy,
+				fbCopy1toN, (Pixel) bitplane, 0);
+	else
+		return miHandleExposures(src, dst, gc,
+					 sx, sy, width, height, dx, dy,
+					 bitplane);
+}
diff --git a/src/sna/fb/fbfill.c b/src/sna/fb/fbfill.c
new file mode 100644
index 0000000..3df1f9c
--- /dev/null
+++ b/src/sna/fb/fbfill.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include "fbclip.h"
+
+static void
+fbSolid(FbBits * dst,
+        FbStride dstStride,
+        int dstX, int bpp, int width, int height, FbBits and, FbBits xor)
+{
+	FbBits startmask, endmask;
+	int n, nmiddle;
+	int startbyte, endbyte;
+
+	dst += dstX >> FB_SHIFT;
+	dstX &= FB_MASK;
+	FbMaskBitsBytes(dstX, width, and == 0, startmask, startbyte,
+			nmiddle, endmask, endbyte);
+	if (startmask)
+		dstStride--;
+	dstStride -= nmiddle;
+	while (height--) {
+		if (startmask) {
+			FbDoLeftMaskByteRRop(dst, startbyte, startmask, and, xor);
+			dst++;
+		}
+		n = nmiddle;
+		if (!and)
+			while (n--)
+				WRITE(dst++, xor);
+		else
+			while (n--) {
+				WRITE(dst, FbDoRRop(READ(dst), and, xor));
+				dst++;
+			}
+		if (endmask)
+			FbDoRightMaskByteRRop(dst, endbyte, endmask, and, xor);
+		dst += dstStride;
+	}
+}
+
+void
+fbFill(DrawablePtr drawable, GCPtr gc, int x, int y, int width, int height)
+{
+	FbBits *dst;
+	FbStride dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+	FbGCPrivPtr pgc = fb_gc(gc);
+
+	DBG(("%s (%d, %d)x(%d, %d), style=%d\n",
+	     __FUNCTION__, x, y, width, height, gc->fillStyle));
+
+	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+
+	switch (gc->fillStyle) {
+	case FillSolid:
+		if (pgc->and ||
+		    !pixman_fill((uint32_t *) dst, dstStride, dstBpp,
+				 x + dstXoff, y + dstYoff,
+				 width, height, pgc->xor))
+			fbSolid(dst + (y + dstYoff) * dstStride,
+				dstStride,
+				(x + dstXoff) * dstBpp,
+				dstBpp, width * dstBpp, height, pgc->and, pgc->xor);
+		break;
+
+	case FillStippled:
+	case FillOpaqueStippled:
+		{
+			PixmapPtr pStip = gc->stipple;
+			int stipWidth = pStip->drawable.width;
+			int stipHeight = pStip->drawable.height;
+
+			if (dstBpp == 1) {
+				int alu;
+				FbBits *stip;
+				FbStride stipStride;
+				int stipBpp;
+				_X_UNUSED int stipXoff, stipYoff;
+
+				if (gc->fillStyle == FillStippled)
+					alu = FbStipple1Rop(gc->alu, gc->fgPixel);
+				else
+					alu = FbOpaqueStipple1Rop(gc->alu, gc->fgPixel, gc->bgPixel);
+				fbGetDrawable(&pStip->drawable, stip, stipStride, stipBpp, stipXoff,
+					      stipYoff);
+				fbTile(dst + (y + dstYoff) * dstStride, dstStride, x + dstXoff,
+				       width, height, stip, stipStride, stipWidth, stipHeight, alu,
+				       pgc->pm, dstBpp, (gc->patOrg.x + drawable->x + dstXoff),
+				       gc->patOrg.y + drawable->y - y);
+			} else {
+				FbStip *stip;
+				FbStride stipStride;
+				int stipBpp;
+				_X_UNUSED int stipXoff, stipYoff;
+				FbBits fgand, fgxor, bgand, bgxor;
+
+				fgand = pgc->and;
+				fgxor = pgc->xor;
+				if (gc->fillStyle == FillStippled) {
+					bgand = fbAnd(GXnoop, (FbBits) 0, FB_ALLONES);
+					bgxor = fbXor(GXnoop, (FbBits) 0, FB_ALLONES);
+				} else {
+					bgand = pgc->bgand;
+					bgxor = pgc->bgxor;
+				}
+
+				fbGetStipDrawable(&pStip->drawable, stip, stipStride, stipBpp,
+						  stipXoff, stipYoff);
+				fbStipple(dst + (y + dstYoff) * dstStride, dstStride,
+					  (x + dstXoff) * dstBpp, dstBpp, width * dstBpp, height,
+					  stip, stipStride, stipWidth, stipHeight,
+					  pgc->evenStipple, fgand, fgxor, bgand, bgxor,
+					  gc->patOrg.x + drawable->x + dstXoff,
+					  gc->patOrg.y + drawable->y - y);
+			}
+			break;
+		}
+
+	case FillTiled:
+		{
+			PixmapPtr pTile = gc->tile.pixmap;
+			FbBits *tile;
+			FbStride tileStride;
+			int tileBpp;
+			int tileWidth;
+			int tileHeight;
+			_X_UNUSED int tileXoff, tileYoff;
+
+			fbGetDrawable(&pTile->drawable, tile,
+				      tileStride, tileBpp, tileXoff, tileYoff);
+			tileWidth = pTile->drawable.width;
+			tileHeight = pTile->drawable.height;
+			fbTile(dst + (y + dstYoff) * dstStride,
+			       dstStride,
+			       (x + dstXoff) * dstBpp,
+			       width * dstBpp, height,
+			       tile,
+			       tileStride,
+			       tileWidth * tileBpp,
+			       tileHeight,
+			       gc->alu, pgc->pm,
+			       dstBpp,
+			       (gc->patOrg.x + drawable->x + dstXoff) * dstBpp,
+			       gc->patOrg.y + drawable->y - y);
+			break;
+		}
+	}
+}
+
+static void
+_fbSolidBox(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	FbBits *dst;
+	FbStride stride;
+	int dx, dy, bpp;
+	FbBits and = fbAnd(GXcopy, fb_gc(gc)->bg, fb_gc(gc)->pm);
+	FbBits xor = fbXor(GXcopy, fb_gc(gc)->bg, fb_gc(gc)->pm);
+
+	fbGetDrawable(drawable, dst, stride, bpp, dx, dy);
+
+	if (and ||
+	    !pixman_fill((uint32_t *) dst, stride, bpp,
+			 b->x1 + dx, b->y1 + dy,
+			 (b->x2 - b->x1), (b->y2 - b->y1), xor))
+		fbSolid(dst + (b->y1 + dy) * stride, stride,
+			(b->x1 + dx) * bpp, bpp,
+			(b->x2 - b->x1) * bpp, (b->y2 - b->y1),
+			and, xor);
+}
+
+void
+fbSolidBoxClipped(DrawablePtr drawable, GCPtr gc,
+                  int x1, int y1, int x2, int y2)
+{
+	BoxRec box;
+
+	box.x1 = x1;
+	box.y1 = y1;
+	box.x2 = x2;
+	box.y2 = y2;
+
+	fbDrawableRun(drawable, gc, &box, _fbSolidBox, NULL);
+}
+
+inline static void
+fbFillBox(DrawablePtr drawable, GCPtr gc, const BoxRec *box, void *data)
+{
+	DBG(("%s box=(%d, %d), (%d, %d)\n", __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2));
+	fbFill(drawable, gc,
+	       box->x1, box->y1,
+	       box->x2 - box->x1, box->y2 - box->y1);
+}
+
+void
+fbPolyFillRect(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
+{
+	DBG(("%s x %d\n", __FUNCTION__, n));
+	while (n--) {
+		BoxRec b;
+
+		b.x1 = r->x + drawable->x;
+		b.y1 = r->y + drawable->y;
+		b.x2 = fbBound(b.x1, r->width);
+		b.y2 = fbBound(b.y1, r->height);
+		r++;
+
+		DBG(("%s: rectangle (%d, %d), (%d, %d)\n",
+		     __FUNCTION__, b.x1, b.y1, b.x2, b.y2));
+		fbDrawableRun(drawable, gc, &b, fbFillBox, NULL);
+	}
+}
diff --git a/src/sna/fb/fbgc.c b/src/sna/fb/fbgc.c
new file mode 100644
index 0000000..0969040
--- /dev/null
+++ b/src/sna/fb/fbgc.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include <gcstruct.h>
+#include <migc.h>
+#include <scrnintstr.h>
+
+/*
+ * Pad pixmap to FB_UNIT bits wide
+ */
+void
+fbPadPixmap(PixmapPtr pPixmap)
+{
+	int width;
+	FbBits *bits;
+	FbBits b;
+	FbBits mask;
+	int height;
+	int w;
+	int stride;
+	int bpp;
+	_X_UNUSED int xOff, yOff;
+
+	fbGetDrawable(&pPixmap->drawable, bits, stride, bpp, xOff, yOff);
+
+	width = pPixmap->drawable.width * pPixmap->drawable.bitsPerPixel;
+	height = pPixmap->drawable.height;
+	mask = FbBitsMask(0, width);
+	while (height--) {
+		b = READ(bits) & mask;
+		w = width;
+		while (w < FB_UNIT) {
+			b = b | FbScrRight(b, w);
+			w <<= 1;
+		}
+		WRITE(bits, b);
+		bits += stride;
+	}
+}
+
+/*
+ * Verify that 'bits' repeats every 'len' bits
+ */
+static Bool
+fbBitsRepeat(FbBits bits, int len, int width)
+{
+	FbBits mask = FbBitsMask(0, len);
+	FbBits orig = bits & mask;
+	int i;
+
+	if (width > FB_UNIT)
+		width = FB_UNIT;
+	for (i = 0; i < width / len; i++) {
+		if ((bits & mask) != orig)
+			return FALSE;
+		bits = FbScrLeft(bits, len);
+	}
+	return TRUE;
+}
+
+/*
+ * Check whether an entire bitmap line is a repetition of
+ * the first 'len' bits
+ */
+static Bool
+fbLineRepeat(FbBits * bits, int len, int width)
+{
+	FbBits first = bits[0];
+
+	if (!fbBitsRepeat(first, len, width))
+		return FALSE;
+	width = (width + FB_UNIT - 1) >> FB_SHIFT;
+	bits++;
+	while (--width)
+		if (READ(bits) != first)
+			return FALSE;
+	return TRUE;
+}
+
+/*
+ * The even stipple code wants the first FB_UNIT/bpp bits on
+ * each scanline to represent the entire stipple
+ */
+static Bool
+fbCanEvenStipple(PixmapPtr pStipple, int bpp)
+{
+	int len = FB_UNIT / bpp;
+	FbBits *bits;
+	int stride;
+	int stip_bpp;
+	_X_UNUSED int stipXoff, stipYoff;
+	int h;
+
+	/* make sure the stipple width is a multiple of the even stipple width */
+	if (pStipple->drawable.width % len != 0)
+		return FALSE;
+
+	fbGetDrawable(&pStipple->drawable, bits, stride, stip_bpp, stipXoff,
+		      stipYoff);
+	h = pStipple->drawable.height;
+	/* check to see that the stipple repeats horizontally */
+	while (h--) {
+		if (!fbLineRepeat(bits, len, pStipple->drawable.width))
+			return FALSE;
+
+		bits += stride;
+	}
+	return TRUE;
+}
+
+void
+fbValidateGC(GCPtr gc, unsigned long changes, DrawablePtr drawable)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	FbBits mask;
+
+	DBG(("%s changes=%lx\n", __FUNCTION__, changes));
+
+	if (changes & GCStipple) {
+		pgc->evenStipple = FALSE;
+
+		if (gc->stipple) {
+			/* can we do an even stipple ?? */
+			if (FbEvenStip(gc->stipple->drawable.width,
+				       drawable->bitsPerPixel) &&
+			    (fbCanEvenStipple(gc->stipple, drawable->bitsPerPixel)))
+				pgc->evenStipple = TRUE;
+		}
+	}
+
+	/*
+	 * Recompute reduced rop values
+	 */
+	if (changes & (GCForeground | GCBackground | GCPlaneMask | GCFunction)) {
+		int s;
+		FbBits depthMask;
+
+		mask = FbFullMask(drawable->bitsPerPixel);
+		depthMask = FbFullMask(drawable->depth);
+
+		pgc->fg = gc->fgPixel & mask;
+		pgc->bg = gc->bgPixel & mask;
+
+		if ((gc->planemask & depthMask) == depthMask)
+			pgc->pm = mask;
+		else
+			pgc->pm = gc->planemask & mask;
+
+		s = drawable->bitsPerPixel;
+		while (s < FB_UNIT) {
+			pgc->fg |= pgc->fg << s;
+			pgc->bg |= pgc->bg << s;
+			pgc->pm |= pgc->pm << s;
+			s <<= 1;
+		}
+		pgc->and = fbAnd(gc->alu, pgc->fg, pgc->pm);
+		pgc->xor = fbXor(gc->alu, pgc->fg, pgc->pm);
+		pgc->bgand = fbAnd(gc->alu, pgc->bg, pgc->pm);
+		pgc->bgxor = fbXor(gc->alu, pgc->bg, pgc->pm);
+	}
+
+	if (changes & GCDashList) {
+		unsigned short n = gc->numInDashList;
+		unsigned char *dash = gc->dash;
+		unsigned int dashLength = 0;
+
+		while (n--)
+			dashLength += (unsigned int) *dash++;
+		pgc->dashLength = dashLength;
+	}
+}
diff --git a/src/sna/fb/fbglyph.c b/src/sna/fb/fbglyph.c
new file mode 100644
index 0000000..789e5b8
--- /dev/null
+++ b/src/sna/fb/fbglyph.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include <X11/fonts/fontstruct.h>
+#include <dixfontstr.h>
+
+#define GLYPH	    fbGlyph8
+#define BITS	    BYTE
+#define BITS2	    CARD16
+#define BITS4	    CARD32
+#include "fbglyphbits.h"
+#undef BITS
+#undef BITS2
+#undef BITS4
+#undef GLYPH
+
+#define GLYPH	    fbGlyph16
+#define BITS	    CARD16
+#define BITS2	    CARD32
+#include "fbglyphbits.h"
+#undef BITS
+#undef BITS2
+#undef GLYPH
+
+#define GLYPH	    fbGlyph32
+#define BITS	    CARD32
+#include "fbglyphbits.h"
+#undef BITS
+#undef GLYPH
+
+static bool
+fbGlyphIn(GCPtr gc, int x, int y, int width, int height)
+{
+	BoxRec box;
+	BoxPtr extents = RegionExtents(gc->pCompositeClip);
+
+	/*
+	 * Check extents by hand to avoid 16 bit overflows
+	 */
+	if (x < (int) extents->x1 || (int) extents->x2 < x + width)
+		return FALSE;
+	if (y < (int) extents->y1 || (int) extents->y2 < y + height)
+		return FALSE;
+
+	box.x1 = x;
+	box.x2 = x + width;
+	box.y1 = y;
+	box.y2 = y + height;
+	return RegionContainsRect(gc->pCompositeClip, &box) == rgnIN;
+}
+
+#define WRITE1(d,n,fg)	WRITE((d) + (n), (CARD8) fg)
+#define WRITE2(d,n,fg)	WRITE((CARD16 *) &(d[n]), (CARD16) fg)
+#define WRITE4(d,n,fg)	WRITE((CARD32 *) &(d[n]), (CARD32) fg)
+
+/*
+ * This is a bit tricky, but it's brief.  Write 12 bytes worth
+ * of dest, which is four pixels, at a time.  This gives constant
+ * code for each pattern as they're always aligned the same
+ *
+ *  a b c d  a b c d  a b c d	bytes
+ *  A B C A  B C A B  C A B C	pixels
+ * 
+ *    f0        f1       f2
+ *  A B C A  B C A B  C A B C	pixels LSB
+ *  C A B C  A B C A  B C A B	pixels MSB
+ *
+ *		LSB	MSB
+ *  A		f0	f1
+ *  B		f1	f2
+ *  C		f2	f0
+ *  A B		f0	f2
+ *  B C		f1	f0
+ *  C A		f2	f1
+ *  A B C A	f0	f1
+ *  B C A B	f1    	f2
+ *  C A B C	f2	f0
+ */
+
+#undef _A
+#undef _B
+#undef _C
+#undef _AB
+#undef _BC
+#undef _CA
+#undef _ABCA
+#undef _BCAB
+#undef _CABC
+
+#define _A	f0
+#define _B	f1
+#define _C	f2
+#define _AB	f0
+#define _BC	f1
+#define _CA	f2
+#define _ABCA	f0
+#define _BCAB	f1
+#define _CABC	f2
+#define CASE(a,b,c,d)	(a | (b << 1) | (c << 2) | (d << 3))
+
+void
+fbPolyGlyphBlt(DrawablePtr drawable, GCPtr gc,
+               int x, int y,
+               unsigned int nglyph, CharInfoPtr * ppci, pointer glyphs)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	CharInfoPtr pci;
+	unsigned char *pglyph;      /* pointer bits in glyph */
+	int gx, gy;
+	int gWidth, gHeight;        /* width and height of glyph */
+	FbStride gStride;           /* stride of glyph */
+	void (*raster) (FbBits *, FbStride, int, FbStip *, FbBits, int, int);
+	FbBits *dst = 0;
+	FbStride dstStride = 0;
+	int dstBpp = 0;
+	int dstXoff = 0, dstYoff = 0;
+
+	DBG(("%s x %d\n", __FUNCTION__, nglyph));
+
+	raster = 0;
+	if (gc->fillStyle == FillSolid && pgc->and == 0) {
+		dstBpp = drawable->bitsPerPixel;
+		switch (dstBpp) {
+		case 8:
+			raster = fbGlyph8;
+			break;
+		case 16:
+			raster = fbGlyph16;
+			break;
+		case 32:
+			raster = fbGlyph32;
+			break;
+		}
+	}
+	x += drawable->x;
+	y += drawable->y;
+
+	while (nglyph--) {
+		pci = *ppci++;
+		pglyph = FONTGLYPHBITS(glyphs, pci);
+		gWidth = GLYPHWIDTHPIXELS(pci);
+		gHeight = GLYPHHEIGHTPIXELS(pci);
+		if (gWidth && gHeight) {
+			gx = x + pci->metrics.leftSideBearing;
+			gy = y - pci->metrics.ascent;
+			if (raster && gWidth <= sizeof(FbStip) * 8 &&
+			    fbGlyphIn(gc, gx, gy, gWidth, gHeight)) {
+				fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff,
+					      dstYoff);
+				raster(dst + (gy + dstYoff) * dstStride, dstStride, dstBpp,
+					  (FbStip *) pglyph, pgc->xor, gx + dstXoff, gHeight);
+			} else {
+				gStride = GLYPHWIDTHBYTESPADDED(pci) / sizeof(FbStip);
+				fbPushImage(drawable, gc,
+					    (FbStip *)pglyph,
+					    gStride, 0, gx, gy, gWidth, gHeight);
+			}
+		}
+		x += pci->metrics.characterWidth;
+	}
+}
+
+void
+fbImageGlyphBlt(DrawablePtr drawable, GCPtr gc,
+                int x, int y,
+                unsigned int nglyph, CharInfoPtr * ppciInit, pointer glyphs)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	CharInfoPtr *ppci;
+	CharInfoPtr pci;
+	unsigned char *pglyph;      /* pointer bits in glyph */
+	int gWidth, gHeight;        /* width and height of glyph */
+	FbStride gStride;           /* stride of glyph */
+	bool opaque;
+	int n;
+	int gx, gy;
+	void (*raster)(FbBits *, FbStride, int, FbStip *, FbBits, int, int);
+	FbBits *dst = 0;
+	FbStride dstStride = 0;
+	int dstBpp = 0;
+	int dstXoff = 0, dstYoff = 0;
+
+	DBG(("%s x %d\n", __FUNCTION__, nglyph));
+
+	raster = 0;
+	if (pgc->and == 0) {
+		dstBpp = drawable->bitsPerPixel;
+		switch (dstBpp) {
+		case 8:
+			raster = fbGlyph8;
+			break;
+		case 16:
+			raster = fbGlyph16;
+			break;
+		case 32:
+			raster = fbGlyph32;
+			break;
+		}
+	}
+
+	x += drawable->x;
+	y += drawable->y;
+
+	if (TERMINALFONT(gc->font) && !raster) {
+		opaque = TRUE;
+	} else {
+		int xBack, widthBack;
+		int yBack, heightBack;
+
+		ppci = ppciInit;
+		n = nglyph;
+		widthBack = 0;
+		while (n--)
+			widthBack += (*ppci++)->metrics.characterWidth;
+
+		xBack = x;
+		if (widthBack < 0) {
+			xBack += widthBack;
+			widthBack = -widthBack;
+		}
+		yBack = y - FONTASCENT(gc->font);
+		heightBack = FONTASCENT(gc->font) + FONTDESCENT(gc->font);
+		fbSolidBoxClipped(drawable, gc,
+				  xBack, yBack,
+				  xBack + widthBack,
+				  yBack + heightBack);
+		opaque = FALSE;
+	}
+
+	ppci = ppciInit;
+	while (nglyph--) {
+		pci = *ppci++;
+		pglyph = FONTGLYPHBITS(glyphs, pci);
+		gWidth = GLYPHWIDTHPIXELS(pci);
+		gHeight = GLYPHHEIGHTPIXELS(pci);
+		if (gWidth && gHeight) {
+			gx = x + pci->metrics.leftSideBearing;
+			gy = y - pci->metrics.ascent;
+			if (raster && gWidth <= sizeof(FbStip) * 8 &&
+			    fbGlyphIn(gc, gx, gy, gWidth, gHeight)) {
+				fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff,
+					      dstYoff);
+				raster(dst + (gy + dstYoff) * dstStride, dstStride, dstBpp,
+				       (FbStip *) pglyph, pgc->fg, gx + dstXoff, gHeight);
+			} else {
+				gStride = GLYPHWIDTHBYTESPADDED(pci) / sizeof(FbStip);
+				fbPutXYImage(drawable, gc,
+					     pgc->fg, pgc->bg, pgc->pm,
+					     GXcopy, opaque,
+					     gx, gy, gWidth, gHeight,
+					     (FbStip *) pglyph, gStride, 0);
+			}
+		}
+		x += pci->metrics.characterWidth;
+	}
+}
diff --git a/src/sna/fb/fbglyphbits.h b/src/sna/fb/fbglyphbits.h
new file mode 100644
index 0000000..af0f00f
--- /dev/null
+++ b/src/sna/fb/fbglyphbits.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define isClipped(c,ul,lr)  (((c) | ((c) - (ul)) | ((lr) - (c))) & 0x80008000)
+#define RROP(b,a,x)	WRITE((b), FbDoRRop (READ(b), (a), (x)))
+
+#define WRITE_ADDR1(n)	    (n)
+#define WRITE_ADDR2(n)	    (n)
+#define WRITE_ADDR4(n)	    (n)
+
+#define WRITE1(d,n,fg)	    WRITE(d + WRITE_ADDR1(n), (BITS) (fg))
+
+#ifdef BITS2
+#define WRITE2(d,n,fg)	    WRITE((BITS2 *) &((d)[WRITE_ADDR2(n)]), (BITS2) (fg))
+#else
+#define WRITE2(d,n,fg)	    (WRITE1(d,n,fg), WRITE1(d,(n)+1,fg))
+#endif
+
+#ifdef BITS4
+#define WRITE4(d,n,fg)	    WRITE((BITS4 *) &((d)[WRITE_ADDR4(n)]), (BITS4) (fg))
+#else
+#define WRITE4(d,n,fg)	    (WRITE2(d,n,fg), WRITE2(d,(n)+2,fg))
+#endif
+
+static void
+GLYPH(FbBits * dstBits,
+      FbStride dstStride,
+      int dstBpp, FbStip * stipple, FbBits fg, int x, int height)
+{
+	int lshift;
+	FbStip bits;
+	BITS *dstLine;
+	BITS *dst;
+	int n;
+	int shift;
+
+	dstLine = (BITS *) dstBits;
+	dstLine += x & ~3;
+	dstStride *= (sizeof(FbBits) / sizeof(BITS));
+	shift = x & 3;
+	lshift = 4 - shift;
+	while (height--) {
+		bits = *stipple++;
+		dst = (BITS *) dstLine;
+		n = lshift;
+		while (bits) {
+			switch (FbStipMoveLsb(FbLeftStipBits(bits, n), 4, n)) {
+			case 0:
+				break;
+			case 1:
+				WRITE1(dst, 0, fg);
+				break;
+			case 2:
+				WRITE1(dst, 1, fg);
+				break;
+			case 3:
+				WRITE2(dst, 0, fg);
+				break;
+			case 4:
+				WRITE1(dst, 2, fg);
+				break;
+			case 5:
+				WRITE1(dst, 0, fg);
+				WRITE1(dst, 2, fg);
+				break;
+			case 6:
+				WRITE1(dst, 1, fg);
+				WRITE1(dst, 2, fg);
+				break;
+			case 7:
+				WRITE2(dst, 0, fg);
+				WRITE1(dst, 2, fg);
+				break;
+			case 8:
+				WRITE1(dst, 3, fg);
+				break;
+			case 9:
+				WRITE1(dst, 0, fg);
+				WRITE1(dst, 3, fg);
+				break;
+			case 10:
+				WRITE1(dst, 1, fg);
+				WRITE1(dst, 3, fg);
+				break;
+			case 11:
+				WRITE2(dst, 0, fg);
+				WRITE1(dst, 3, fg);
+				break;
+			case 12:
+				WRITE2(dst, 2, fg);
+				break;
+			case 13:
+				WRITE1(dst, 0, fg);
+				WRITE2(dst, 2, fg);
+				break;
+			case 14:
+				WRITE1(dst, 1, fg);
+				WRITE2(dst, 2, fg);
+				break;
+			case 15:
+				WRITE4(dst, 0, fg);
+				break;
+			}
+			bits = FbStipLeft(bits, n);
+			n = 4;
+			dst += 4;
+		}
+		dstLine += dstStride;
+	}
+}
+
+#undef WRITE_ADDR1
+#undef WRITE_ADDR2
+#undef WRITE_ADDR4
+#undef WRITE1
+#undef WRITE2
+#undef WRITE4
+
+#undef RROP
+#undef isClipped
diff --git a/src/sna/fb/fbimage.c b/src/sna/fb/fbimage.c
new file mode 100644
index 0000000..5af2389
--- /dev/null
+++ b/src/sna/fb/fbimage.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "fb.h"
+#include "fbclip.h"
+
+struct fbPutZImage {
+	FbStip *src, *dst;
+	FbStride src_stride, dst_stride;
+
+	int dst_x, dst_y;
+	int x0, y0;
+};
+
+inline static void
+_fbPutZImage(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbPutZImage *data = _data;
+	int bpp = drawable->bitsPerPixel;
+
+	fbBltStip(data->src + (b->y1 - data->y0) * data->src_stride, data->src_stride,
+		  (b->x1 - data->x0) * bpp,
+		  data->dst + (b->y1 + data->dst_y) * data->dst_stride,
+		  data->dst_stride,
+		  (b->x1 + data->dst_x) * bpp,
+		  (b->x2 - b->x1) * bpp, (b->y2 - b->y1),
+		  gc->alu, fb_gc(gc)->pm, bpp);
+}
+
+static void
+fbPutZImage(DrawablePtr drawable, GCPtr gc,
+            int x, int y, int width, int height,
+	    FbStip *src, FbStride srcStride)
+{
+	PixmapPtr pixmap;
+	struct fbPutZImage data;
+	BoxRec box;
+
+	box.x1 = data.x0 = x;
+	box.y1 = data.y0 = y;
+	box.x2 = x + width;
+	box.y2 = y + height;
+	data.src = src;
+	data.src_stride = srcStride;
+
+	fbGetDrawablePixmap(drawable, pixmap, data.dst_x, data.dst_y);
+	data.dst = pixmap->devPrivate.ptr;
+	data.dst_stride = pixmap->devKind / sizeof(FbStip);
+
+	fbDrawableRun(drawable, gc, &box, _fbPutZImage, &data);
+}
+
+struct fbPutXYImage {
+	FbStip *src, *dst;
+	FbStride src_stride, dst_stride;
+
+	int dst_x, dst_y, src_x;
+	int x0, y0;
+
+	int alu, pm;
+	FbBits fgand, fgxor, bgand, bgxor;
+};
+
+inline static void
+_fbPutXYImage1(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbPutXYImage *data = _data;
+	int bpp = drawable->bitsPerPixel;
+
+	fbBltStip(data->src + (b->y1 - data->y0) * data->src_stride, data->src_stride,
+		  (b->x1 - data->x0) + data->src_x,
+		  (FbStip *) (data->dst + (b->y1 + data->dst_y) * data->dst_stride),
+		  data->dst_stride,
+		  (b->x1 + data->dst_x) * bpp,
+		  (b->x2 - b->x1) * bpp, (b->y2 - b->y1),
+		  data->alu, data->pm, bpp);
+}
+
+inline static void
+_fbPutXYImageN(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbPutXYImage *data = _data;
+	int bpp = drawable->bitsPerPixel;
+
+	fbBltOne(data->src + (b->y1 - data->y0) * data->src_stride,
+		 data->src_stride,
+		 (b->x1 - data->x0) + data->src_x,
+		 data->dst + (b->y1 + data->dst_y) * data->dst_stride,
+		 data->dst_stride,
+		 (b->x1 + data->dst_x) * bpp, bpp,
+		 (b->x2 - b->x1) * bpp, (b->y2 - b->y1),
+		 data->fgand, data->fgxor,
+		 data->bgand, data->bgxor);
+}
+
+void
+fbPutXYImage(DrawablePtr drawable, GCPtr gc,
+             FbBits fg, FbBits bg, FbBits pm, int alu, Bool opaque,
+             int x, int y, int width, int height,
+	     FbStip *src, FbStride srcStride, int srcX)
+{
+	PixmapPtr pixmap;
+	struct fbPutXYImage data;
+	BoxRec box;
+
+	box.x1 = data.x0 = x;
+	box.y1 = data.y0 = y;
+	box.x2 = x + width;
+	box.y2 = y + height;
+	data.src = src;
+	data.src_stride = srcStride;
+	data.src_x = srcX;
+
+	fbGetDrawablePixmap(drawable, pixmap, data.dst_x, data.dst_y);
+	data.dst = pixmap->devPrivate.ptr;
+	data.dst_stride = pixmap->devKind / sizeof(FbStip);
+
+	if (drawable->bitsPerPixel == 1) {
+		if (opaque)
+			data.alu = FbOpaqueStipple1Rop(alu, fg, bg);
+		else
+			data.alu = FbStipple1Rop(alu, fg);
+		data.pm = pm;
+
+		fbDrawableRun(drawable, gc, &box, _fbPutXYImage1, &data);
+	} else {
+		data.fgand = fbAnd(alu, fg, pm);
+		data.fgxor = fbXor(alu, fg, pm);
+		if (opaque) {
+			data.bgand = fbAnd(alu, bg, pm);
+			data.bgxor = fbXor(alu, bg, pm);
+		} else {
+			data.bgand = fbAnd(GXnoop, (FbBits) 0, FB_ALLONES);
+			data.bgxor = fbXor(GXnoop, (FbBits) 0, FB_ALLONES);
+		}
+
+		fbDrawableRun(drawable, gc, &box, _fbPutXYImageN, &data);
+	}
+}
+
+void
+fbPutImage(DrawablePtr drawable, GCPtr gc, int depth,
+           int x, int y, int w, int h,
+	   int leftPad, int format, char *image)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	unsigned long i;
+	FbStride srcStride;
+	FbStip *src = (FbStip *)image;
+
+	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
+
+	x += drawable->x;
+	y += drawable->y;
+
+	switch (format) {
+	case XYBitmap:
+		srcStride = BitmapBytePad(w + leftPad) / sizeof(FbStip);
+		fbPutXYImage(drawable, gc,
+			     pgc->fg, pgc->bg, pgc->pm,
+			     gc->alu, TRUE,
+			     x, y, w, h,
+			     src, srcStride, leftPad);
+		break;
+	case XYPixmap:
+		srcStride = BitmapBytePad(w + leftPad) / sizeof(FbStip);
+		for (i = (unsigned long) 1 << (drawable->depth - 1); i; i >>= 1) {
+			if (i & gc->planemask) {
+				fbPutXYImage(drawable, gc,
+					     FB_ALLONES,
+					     0,
+					     fbReplicatePixel(i, drawable->bitsPerPixel),
+					     gc->alu,
+					     TRUE, x, y, w, h, src, srcStride, leftPad);
+				src += srcStride * h;
+			}
+		}
+		break;
+	case ZPixmap:
+		srcStride = PixmapBytePad(w, drawable->depth) / sizeof(FbStip);
+		fbPutZImage(drawable, gc,
+			    x, y, w, h, src, srcStride);
+	}
+}
+
+void
+fbGetImage(DrawablePtr drawable,
+           int x, int y, int w, int h,
+	   unsigned int format, unsigned long planeMask, char *d)
+{
+	FbBits *src;
+	FbStride srcStride;
+	int srcBpp;
+	int srcXoff, srcYoff;
+	FbStip *dst;
+	FbStride dstStride;
+
+	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
+
+	fbGetDrawable(drawable, src, srcStride, srcBpp, srcXoff, srcYoff);
+
+	x += drawable->x;
+	y += drawable->y;
+
+	dst = (FbStip *) d;
+	if (format == ZPixmap || srcBpp == 1) {
+		FbBits pm;
+
+		pm = fbReplicatePixel(planeMask, srcBpp);
+		dstStride = PixmapBytePad(w, drawable->depth);
+		if (pm != FB_ALLONES)
+			memset(d, 0, dstStride * h);
+		dstStride /= sizeof(FbStip);
+		fbBltStip((FbStip *)(src + (y + srcYoff) * srcStride), srcStride,
+			  (x + srcXoff) * srcBpp,
+			  dst, dstStride, 0, w * srcBpp, h, GXcopy, pm, srcBpp);
+	} else {
+		dstStride = BitmapBytePad(w) / sizeof(FbStip);
+		fbBltPlane(src + (y + srcYoff) * srcStride,
+			   srcStride,
+			   (x + srcXoff) * srcBpp,
+			   srcBpp,
+			   dst,
+			   dstStride,
+			   0,
+			   w * srcBpp, h,
+			   fbAndStip(GXcopy, FB_STIP_ALLONES, FB_STIP_ALLONES),
+			   fbXorStip(GXcopy, FB_STIP_ALLONES, FB_STIP_ALLONES),
+			   fbAndStip(GXcopy, 0, FB_STIP_ALLONES),
+			   fbXorStip(GXcopy, 0, FB_STIP_ALLONES), planeMask);
+	}
+}
diff --git a/src/sna/fb/fbline.c b/src/sna/fb/fbline.c
new file mode 100644
index 0000000..04d5343
--- /dev/null
+++ b/src/sna/fb/fbline.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include <mi.h>
+#include <micoord.h>
+#include <miline.h>
+#include <scrnintstr.h>
+
+#define POLYLINE    fbPolyline8
+#define POLYSEGMENT fbPolySegment8
+#define BITS	    BYTE
+#define BITS2	    CARD16
+#define BITS4	    CARD32
+#include "fblinebits.h"
+#undef BITS
+#undef BITS2
+#undef BITS4
+#undef POLYSEGMENT
+#undef POLYLINE
+
+#define POLYLINE    fbPolyline16
+#define POLYSEGMENT fbPolySegment16
+#define BITS	    CARD16
+#define BITS2	    CARD32
+#include "fblinebits.h"
+#undef BITS
+#undef BITS2
+#undef POLYSEGMENT
+#undef POLYLINE
+
+#define POLYLINE    fbPolyline32
+#define POLYSEGMENT fbPolySegment32
+#define BITS	    CARD32
+#include "fblinebits.h"
+#undef BITS
+#undef POLYSEGMENT
+#undef POLYLINE
+
+static void
+fbZeroLine(DrawablePtr drawable, GCPtr gc, int mode, int n, DDXPointPtr pt)
+{
+	int x1, y1, x2, y2;
+	int x, y;
+	int dashOffset;
+
+	x = drawable->x;
+	y = drawable->y;
+	x1 = pt->x;
+	y1 = pt->y;
+	dashOffset = gc->dashOffset;
+	while (--n) {
+		++pt;
+		x2 = pt->x;
+		y2 = pt->y;
+		if (mode == CoordModePrevious) {
+			x2 += x1;
+			y2 += y1;
+		}
+		fbSegment(drawable, gc,
+			  x1 + x, y1 + y,
+			  x2 + x, y2 + y,
+			  n == 1 && gc->capStyle != CapNotLast, &dashOffset);
+		x1 = x2;
+		y1 = y2;
+	}
+}
+
+static void
+fbZeroSegment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
+{
+	int dashOffset;
+	int16_t x, y;
+	Bool drawLast = gc->capStyle != CapNotLast;
+
+	x = drawable->x;
+	y = drawable->y;
+	while (n--) {
+		dashOffset = gc->dashOffset;
+		fbSegment(drawable, gc,
+			  seg->x1 + x, seg->y1 + y,
+			  seg->x2 + x, seg->y2 + y,
+			  drawLast, &dashOffset);
+		seg++;
+	}
+}
+
+void
+fbFixCoordModePrevious(int n, DDXPointPtr pt)
+{
+	int16_t x = pt->x;
+	int16_t y = pt->y;
+	while (--n) {
+		pt++;
+		x = (pt->x += x);
+		y = (pt->y += y);
+	}
+}
+
+void
+fbPolyLine(DrawablePtr drawable, GCPtr gc, int mode, int n, DDXPointPtr pt)
+{
+	void (*raster)(DrawablePtr, GCPtr, int mode, int n, DDXPointPtr pt);
+
+	DBG(("%s x %d, width=%d, fill=%d, line=%d\n",
+	     __FUNCTION__, n, gc->lineWidth, gc->fillStyle, gc->lineStyle));
+
+	if (gc->lineWidth == 0) {
+		raster = fbZeroLine;
+		if (gc->fillStyle == FillSolid && gc->lineStyle == LineSolid) {
+			switch (drawable->bitsPerPixel) {
+			case 8:
+				raster = fbPolyline8;
+				break;
+			case 16:
+				raster = fbPolyline16;
+				break;
+			case 32:
+				raster = fbPolyline32;
+				break;
+			}
+		}
+	} else {
+		if (gc->lineStyle != LineSolid)
+			raster = miWideDash;
+		else
+			raster = miWideLine;
+	}
+	raster(drawable, gc, mode, n, pt);
+}
+
+void
+fbPolySegment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
+{
+	void (*raster)(DrawablePtr drawable, GCPtr gc, int n, xSegment * seg);
+
+	DBG(("%s x %d, width=%d, fill=%d, line=%d\n",
+	     __FUNCTION__, n, gc->lineWidth, gc->fillStyle, gc->lineStyle));
+
+	if (gc->lineWidth == 0) {
+		raster = fbZeroSegment;
+		if (gc->fillStyle == FillSolid && gc->lineStyle == LineSolid) {
+			switch (drawable->bitsPerPixel) {
+			case 8:
+				raster = fbPolySegment8;
+				break;
+			case 16:
+				raster = fbPolySegment16;
+				break;
+			case 32:
+				raster = fbPolySegment32;
+				break;
+			}
+		}
+	} else
+		raster = miPolySegment;
+
+	raster(drawable, gc, n, seg);
+}
diff --git a/src/sna/fb/fblinebits.h b/src/sna/fb/fblinebits.h
new file mode 100644
index 0000000..db315d8
--- /dev/null
+++ b/src/sna/fb/fblinebits.h
@@ -0,0 +1,284 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define isClipped(c,ul,lr)  (((c) | ((c) - (ul)) | ((lr) - (c))) & 0x80008000)
+#define RROP(b,a,x)	WRITE((b), FbDoRRop (READ(b), (a), (x)))
+
+static void
+POLYLINE(DrawablePtr drawable, GCPtr gc, int mode, int n_0, DDXPointPtr pt_0)
+{
+	int xoff = drawable->x;
+	int yoff = drawable->y;
+	unsigned int bias = miGetZeroLineBias(drawable->pScreen);
+	const BoxRec *clip = RegionRects(gc->pCompositeClip);
+	const BoxRec *const last_clip = clip + RegionNumRects(gc->pCompositeClip);
+
+	FbBits *dst;
+	int dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+
+	BITS *bits, *bitsBase;
+	FbStride bitsStride;
+	BITS xor = fb_gc(gc)->xor;
+	BITS and = fb_gc(gc)->and;
+
+
+	int e, e1, e3, len;
+	int stepmajor, stepminor;
+	int octant;
+
+	if (mode == CoordModePrevious)
+		fbFixCoordModePrevious(n_0, pt_0);
+
+	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+	bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
+	bitsBase =
+		((BITS *) dst) + (yoff + dstYoff) * bitsStride + (xoff + dstXoff);
+	do {
+		INT32 *pt = (INT32 *)pt_0;
+		int n = n_0;
+		INT32 pt1, pt2;
+
+		INT32 ul = coordToInt(clip->x1 - xoff, clip->y1 - yoff);
+		INT32 lr = coordToInt(clip->x2 - xoff - 1, clip->y2 - yoff - 1);
+
+		pt1 = *pt++; n--;
+		pt2 = *pt++; n--;
+		for (;;) {
+			if (isClipped(pt1, ul, lr) | isClipped(pt2, ul, lr)) {
+				int dashoffset = 0;
+				fbSegment1(drawable, gc, clip,
+					  intToX(pt1) + xoff, intToY(pt1) + yoff,
+					  intToX(pt2) + xoff, intToY(pt2) + yoff,
+					  n == 0 && gc->capStyle != CapNotLast, &dashoffset);
+				if (!n)
+					return;
+
+				pt1 = pt2;
+				pt2 = *pt++;
+				n--;
+			} else {
+				bits = bitsBase + intToY(pt1) * bitsStride + intToX(pt1);
+				for (;;) {
+					CalcLineDeltas(intToX(pt1), intToY(pt1),
+						       intToX(pt2), intToY(pt2),
+						       len, e1, stepmajor, stepminor, 1, bitsStride,
+						       octant);
+					if (len < e1) {
+						e3 = len;
+						len = e1;
+						e1 = e3;
+
+						e3 = stepminor;
+						stepminor = stepmajor;
+						stepmajor = e3;
+						SetYMajorOctant(octant);
+					}
+					e = -len;
+					e1 <<= 1;
+					e3 = e << 1;
+					FIXUP_ERROR(e, octant, bias);
+					if (and == 0) {
+						while (len--) {
+							WRITE(bits, xor);
+							bits += stepmajor;
+							e += e1;
+							if (e >= 0) {
+								bits += stepminor;
+								e += e3;
+							}
+						}
+					} else {
+						while (len--) {
+							RROP(bits, and, xor);
+							bits += stepmajor;
+							e += e1;
+							if (e >= 0) {
+								bits += stepminor;
+								e += e3;
+							}
+						}
+					}
+					if (!n) {
+						if (gc->capStyle != CapNotLast &&
+						    pt2 != *((INT32 *)pt_0)) {
+							RROP(bits, and, xor);
+						}
+						return;
+					}
+					pt1 = pt2;
+					pt2 = *pt++;
+					--n;
+					if (isClipped(pt2, ul, lr))
+						break;
+				}
+			}
+		}
+	} while (++clip != last_clip);
+}
+
+static void
+POLYSEGMENT(DrawablePtr drawable, GCPtr gc, int n_0, xSegment *seg_0)
+{
+	int xoff = drawable->x;
+	int yoff = drawable->y;
+	unsigned int bias = miGetZeroLineBias(drawable->pScreen);
+	const BoxRec *clip = RegionRects(gc->pCompositeClip);
+	const BoxRec *const last_clip = clip + RegionNumRects(gc->pCompositeClip);
+
+	FbBits *dst;
+	int dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+
+	BITS *bits, *bitsBase;
+	FbStride bitsStride;
+	FbBits xor = fb_gc(gc)->xor;
+	FbBits and = fb_gc(gc)->and;
+
+	int e, e1, e3, len;
+	int stepmajor, stepminor;
+	int octant;
+	bool capNotLast = gc->capStyle == CapNotLast;
+
+	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+	bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
+	bitsBase =
+		((BITS *) dst) + (yoff + dstYoff) * bitsStride + (xoff + dstXoff);
+
+	do {
+		INT32 ul = coordToInt(clip->x1 - xoff, clip->y1 - yoff);
+		INT32 lr = coordToInt(clip->x2 - xoff - 1, clip->y2 - yoff - 1);
+		uint64_t *pt = (uint64_t *)seg_0;
+		int n = n_0;
+
+		while (n--) {
+			union {
+				int32_t pt32[2];
+				uint64_t pt64;
+			} u;
+
+			u.pt64 = *pt++;
+			if (isClipped(u.pt32[0], ul, lr) | isClipped(u.pt32[1], ul, lr)) {
+				int dashoffset = 0;
+				fbSegment1(drawable, gc, clip,
+					  intToX(u.pt32[0]) + xoff, intToY(u.pt32[0]) + yoff,
+					  intToX(u.pt32[1]) + xoff, intToY(u.pt32[1]) + yoff,
+					  !capNotLast, &dashoffset);
+			} else {
+				CalcLineDeltas(intToX(u.pt32[0]), intToY(u.pt32[0]),
+					       intToX(u.pt32[1]), intToY(u.pt32[1]),
+					       len, e1, stepmajor, stepminor, 1, bitsStride,
+					       octant);
+				if (e1 == 0 && len > 3) {
+					int x1, x2;
+					FbBits *dstLine;
+					int dstX, width;
+					FbBits startmask, endmask;
+					int nmiddle;
+
+					if (stepmajor < 0) {
+						x1 = intToX(u.pt32[1]);
+						x2 = intToX(u.pt32[0]) + 1;
+						if (capNotLast)
+							x1++;
+					} else {
+						x1 = intToX(u.pt32[0]);
+						x2 = intToX(u.pt32[1]);
+						if (!capNotLast)
+							x2++;
+					}
+					dstX = (x1 + xoff + dstXoff) * (sizeof(BITS) * 8);
+					width = (x2 - x1) * (sizeof(BITS) * 8);
+
+					dstLine = dst + (intToY(u.pt32[0]) + yoff + dstYoff) * dstStride;
+					dstLine += dstX >> FB_SHIFT;
+					dstX &= FB_MASK;
+					FbMaskBits(dstX, width, startmask, nmiddle, endmask);
+					if (startmask) {
+						WRITE(dstLine,
+						      FbDoMaskRRop(READ(dstLine), and, xor,
+								   startmask));
+						dstLine++;
+					}
+					if (!and)
+						while (nmiddle--)
+							WRITE(dstLine++, xor);
+					else
+						while (nmiddle--) {
+							WRITE(dstLine,
+							      FbDoRRop(READ(dstLine), and, xor));
+							dstLine++;
+						}
+					if (endmask)
+						WRITE(dstLine,
+						      FbDoMaskRRop(READ(dstLine), and, xor,
+								   endmask));
+				} else {
+					bits = bitsBase + intToY(u.pt32[0]) * bitsStride + intToX(u.pt32[0]);
+					if (len < e1) {
+						e3 = len;
+						len = e1;
+						e1 = e3;
+
+						e3 = stepminor;
+						stepminor = stepmajor;
+						stepmajor = e3;
+						SetYMajorOctant(octant);
+					}
+					e = -len;
+					e1 <<= 1;
+					e3 = e << 1;
+					FIXUP_ERROR(e, octant, bias);
+					if (!capNotLast)
+						len++;
+					if (and == 0) {
+						while (len--) {
+							WRITE(bits, xor);
+							bits += stepmajor;
+							e += e1;
+							if (e >= 0) {
+								bits += stepminor;
+								e += e3;
+							}
+						}
+					} else {
+						while (len--) {
+							RROP(bits, and, xor);
+							bits += stepmajor;
+							e += e1;
+							if (e >= 0) {
+								bits += stepminor;
+								e += e3;
+							}
+						}
+					}
+				}
+			}
+		}
+	} while (++clip != last_clip);
+}
+
+#undef RROP
+#undef isClipped
diff --git a/src/sna/fb/fbpict.c b/src/sna/fb/fbpict.c
new file mode 100644
index 0000000..f6bcb64
--- /dev/null
+++ b/src/sna/fb/fbpict.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright © 2000 SuSE, Inc.
+ * Copyright © 2007 Red Hat, Inc.
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of SuSE not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission.  SuSE makes no representations about the
+ * suitability of this software for any purpose.  It is provided "as is"
+ * without express or implied warranty.
+ *
+ * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
+ * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Author:  Keith Packard, SuSE, Inc.
+ */
+
+#include <string.h>
+
+#include "fb.h"
+
+#include <picturestr.h>
+#include <mipict.h>
+#include "fbpict.h"
+
+void
+fbComposite(CARD8 op,
+            PicturePtr pSrc,
+            PicturePtr pMask,
+            PicturePtr pDst,
+            INT16 xSrc,
+            INT16 ySrc,
+            INT16 xMask,
+            INT16 yMask, INT16 xDst, INT16 yDst, CARD16 width, CARD16 height)
+{
+	pixman_image_t *src, *mask, *dest;
+	int src_xoff, src_yoff;
+	int msk_xoff, msk_yoff;
+	int dst_xoff, dst_yoff;
+
+	miCompositeSourceValidate(pSrc);
+	if (pMask)
+		miCompositeSourceValidate(pMask);
+
+	src = image_from_pict(pSrc, FALSE, &src_xoff, &src_yoff);
+	mask = image_from_pict(pMask, FALSE, &msk_xoff, &msk_yoff);
+	dest = image_from_pict(pDst, TRUE, &dst_xoff, &dst_yoff);
+
+	if (src && dest && !(pMask && !mask)) {
+		pixman_image_composite(op, src, mask, dest,
+				       xSrc + src_xoff, ySrc + src_yoff,
+				       xMask + msk_xoff, yMask + msk_yoff,
+				       xDst + dst_xoff, yDst + dst_yoff, width, height);
+	}
+
+	free_pixman_pict(pSrc, src);
+	free_pixman_pict(pMask, mask);
+	free_pixman_pict(pDst, dest);
+}
+
+static pixman_image_t *
+create_solid_fill_image(PicturePtr pict)
+{
+	PictSolidFill *solid = &pict->pSourcePict->solidFill;
+	pixman_color_t color;
+	CARD32 a, r, g, b;
+
+	a = (solid->color & 0xff000000) >> 24;
+	r = (solid->color & 0x00ff0000) >> 16;
+	g = (solid->color & 0x0000ff00) >> 8;
+	b = (solid->color & 0x000000ff) >> 0;
+
+	color.alpha = (a << 8) | a;
+	color.red = (r << 8) | r;
+	color.green = (g << 8) | g;
+	color.blue = (b << 8) | b;
+
+	return pixman_image_create_solid_fill(&color);
+}
+
+static pixman_image_t *
+create_linear_gradient_image(PictGradient * gradient)
+{
+	PictLinearGradient *linear = (PictLinearGradient *) gradient;
+	pixman_point_fixed_t p1;
+	pixman_point_fixed_t p2;
+
+	p1.x = linear->p1.x;
+	p1.y = linear->p1.y;
+	p2.x = linear->p2.x;
+	p2.y = linear->p2.y;
+
+	return pixman_image_create_linear_gradient(&p1, &p2,
+						   (pixman_gradient_stop_t *)
+						   gradient->stops,
+						   gradient->nstops);
+}
+
+static pixman_image_t *
+create_radial_gradient_image(PictGradient * gradient)
+{
+	PictRadialGradient *radial = (PictRadialGradient *) gradient;
+	pixman_point_fixed_t c1;
+	pixman_point_fixed_t c2;
+
+	c1.x = radial->c1.x;
+	c1.y = radial->c1.y;
+	c2.x = radial->c2.x;
+	c2.y = radial->c2.y;
+
+	return pixman_image_create_radial_gradient(&c1, &c2, radial->c1.radius,
+						   radial->c2.radius,
+						   (pixman_gradient_stop_t *)
+						   gradient->stops,
+						   gradient->nstops);
+}
+
+static pixman_image_t *
+create_conical_gradient_image(PictGradient * gradient)
+{
+	PictConicalGradient *conical = (PictConicalGradient *) gradient;
+	pixman_point_fixed_t center;
+
+	center.x = conical->center.x;
+	center.y = conical->center.y;
+
+	return pixman_image_create_conical_gradient(&center, conical->angle,
+						    (pixman_gradient_stop_t *)
+						    gradient->stops,
+						    gradient->nstops);
+}
+
+static pixman_image_t *
+create_bits_picture(PicturePtr pict, Bool has_clip, int *xoff, int *yoff)
+{
+	PixmapPtr pixmap;
+	FbBits *bits;
+	FbStride stride;
+	int bpp;
+	pixman_image_t *image;
+
+	fbGetDrawablePixmap(pict->pDrawable, pixmap, *xoff, *yoff);
+	fbGetPixmapBitsData(pixmap, bits, stride, bpp);
+
+	image = pixman_image_create_bits((pixman_format_code_t) pict->format,
+					 pixmap->drawable.width,
+					 pixmap->drawable.height, (uint32_t *) bits,
+					 stride * sizeof(FbStride));
+
+	if (!image)
+		return NULL;
+
+	/* pCompositeClip is undefined for source pictures, so
+	 * only set the clip region for pictures with drawables
+	 */
+	if (has_clip) {
+		if (pict->clientClipType != CT_NONE)
+			pixman_image_set_has_client_clip(image, TRUE);
+
+		if (*xoff || *yoff)
+			pixman_region_translate(pict->pCompositeClip, *xoff, *yoff);
+
+		pixman_image_set_clip_region(image, pict->pCompositeClip);
+
+		if (*xoff || *yoff)
+			pixman_region_translate(pict->pCompositeClip, -*xoff, -*yoff);
+	}
+
+	/* Indexed table */
+	if (pict->pFormat->index.devPrivate)
+		pixman_image_set_indexed(image, pict->pFormat->index.devPrivate);
+
+	/* Add in drawable origin to position within the image */
+	*xoff += pict->pDrawable->x;
+	*yoff += pict->pDrawable->y;
+
+	return image;
+}
+
+static pixman_image_t *image_from_pict_internal(PicturePtr pict, Bool has_clip,
+                                                int *xoff, int *yoff,
+                                                Bool is_alpha_map);
+
+static void
+set_image_properties(pixman_image_t * image, PicturePtr pict, Bool has_clip,
+                     int *xoff, int *yoff, Bool is_alpha_map)
+{
+	pixman_repeat_t repeat;
+	pixman_filter_t filter;
+
+	if (pict->transform) {
+		/* For source images, adjust the transform to account
+		 * for the drawable offset within the pixman image,
+		 * then set the offset to 0 as it will be used
+		 * to compute positions within the transformed image.
+		 */
+		if (!has_clip) {
+			struct pixman_transform adjusted;
+
+			adjusted = *pict->transform;
+			pixman_transform_translate(&adjusted,
+						   NULL,
+						   pixman_int_to_fixed(*xoff),
+						   pixman_int_to_fixed(*yoff));
+			pixman_image_set_transform(image, &adjusted);
+			*xoff = 0;
+			*yoff = 0;
+		}
+		else
+			pixman_image_set_transform(image, pict->transform);
+	}
+
+	switch (pict->repeatType) {
+	default:
+	case RepeatNone:
+		repeat = PIXMAN_REPEAT_NONE;
+		break;
+
+	case RepeatPad:
+		repeat = PIXMAN_REPEAT_PAD;
+		break;
+
+	case RepeatNormal:
+		repeat = PIXMAN_REPEAT_NORMAL;
+		break;
+
+	case RepeatReflect:
+		repeat = PIXMAN_REPEAT_REFLECT;
+		break;
+	}
+
+	pixman_image_set_repeat(image, repeat);
+
+	/* Fetch alpha map unless 'pict' is being used
+	 * as the alpha map for this operation
+	 */
+	if (pict->alphaMap && !is_alpha_map) {
+		int alpha_xoff, alpha_yoff;
+		pixman_image_t *alpha_map =
+			image_from_pict_internal(pict->alphaMap, FALSE, &alpha_xoff,
+						 &alpha_yoff, TRUE);
+
+		pixman_image_set_alpha_map(image, alpha_map, pict->alphaOrigin.x,
+					   pict->alphaOrigin.y);
+
+		free_pixman_pict(pict->alphaMap, alpha_map);
+	}
+
+	pixman_image_set_component_alpha(image, pict->componentAlpha);
+
+	switch (pict->filter) {
+	default:
+	case PictFilterNearest:
+	case PictFilterFast:
+		filter = PIXMAN_FILTER_NEAREST;
+		break;
+
+	case PictFilterBilinear:
+	case PictFilterGood:
+		filter = PIXMAN_FILTER_BILINEAR;
+		break;
+
+	case PictFilterConvolution:
+		filter = PIXMAN_FILTER_CONVOLUTION;
+		break;
+	}
+
+	pixman_image_set_filter(image, filter,
+				(pixman_fixed_t *) pict->filter_params,
+				pict->filter_nparams);
+	pixman_image_set_source_clipping(image, TRUE);
+}
+
+static pixman_image_t *
+image_from_pict_internal(PicturePtr pict, Bool has_clip, int *xoff, int *yoff,
+                         Bool is_alpha_map)
+{
+	pixman_image_t *image = NULL;
+
+	if (!pict)
+		return NULL;
+
+	if (pict->pDrawable) {
+		image = create_bits_picture(pict, has_clip, xoff, yoff);
+	}
+	else if (pict->pSourcePict) {
+		SourcePict *sp = pict->pSourcePict;
+
+		if (sp->type == SourcePictTypeSolidFill) {
+			image = create_solid_fill_image(pict);
+		}
+		else {
+			PictGradient *gradient = &pict->pSourcePict->gradient;
+
+			if (sp->type == SourcePictTypeLinear)
+				image = create_linear_gradient_image(gradient);
+			else if (sp->type == SourcePictTypeRadial)
+				image = create_radial_gradient_image(gradient);
+			else if (sp->type == SourcePictTypeConical)
+				image = create_conical_gradient_image(gradient);
+		}
+		*xoff = *yoff = 0;
+	}
+
+	if (image)
+		set_image_properties(image, pict, has_clip, xoff, yoff, is_alpha_map);
+
+	return image;
+}
+
+pixman_image_t *
+image_from_pict(PicturePtr pict, Bool has_clip, int *xoff, int *yoff)
+{
+	return image_from_pict_internal(pict, has_clip, xoff, yoff, FALSE);
+}
+
+void
+free_pixman_pict(PicturePtr pict, pixman_image_t * image)
+{
+	if (image)
+                pixman_image_unref(image);
+}
diff --git a/src/sna/fb/fbpict.h b/src/sna/fb/fbpict.h
new file mode 100644
index 0000000..6bcee34
--- /dev/null
+++ b/src/sna/fb/fbpict.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef FBPICT_H
+#define FBPICT_H
+
+extern void
+fbComposite(CARD8 op,
+            PicturePtr pSrc,
+            PicturePtr pMask,
+            PicturePtr pDst,
+            INT16 xSrc,
+            INT16 ySrc,
+            INT16 xMask,
+            INT16 yMask, INT16 xDst, INT16 yDst, CARD16 width, CARD16 height);
+
+extern pixman_image_t *image_from_pict(PicturePtr pict,
+				       Bool has_clip,
+				       int *xoff, int *yoff);
+
+extern void free_pixman_pict(PicturePtr, pixman_image_t *);
+
+#endif  /* FBPICT_H */
diff --git a/src/sna/fb/fbpoint.c b/src/sna/fb/fbpoint.c
new file mode 100644
index 0000000..d3f796e
--- /dev/null
+++ b/src/sna/fb/fbpoint.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include <micoord.h>
+
+#define DOTS	    fbDots8
+#define BITS	    BYTE
+#define BITS2	    CARD16
+#define BITS4	    CARD32
+#include "fbpointbits.h"
+#undef BITS
+#undef BITS2
+#undef BITS4
+#undef DOTS
+
+#define DOTS	    fbDots16
+#define BITS	    CARD16
+#define BITS2	    CARD32
+#include "fbpointbits.h"
+#undef BITS
+#undef BITS2
+#undef DOTS
+
+#define DOTS	    fbDots32
+#define BITS	    CARD32
+#include "fbpointbits.h"
+#undef ARC
+#undef BITS
+#undef DOTS
+
+static void
+fbDots(FbBits *dstOrig, FbStride dstStride, int dstBpp,
+       RegionPtr clip,
+       xPoint *pts, int n,
+       int xorg, int yorg,
+       int xoff, int yoff,
+       FbBits andOrig, FbBits xorOrig)
+{
+	FbStip *dst = (FbStip *) dstOrig;
+	FbStip and = andOrig;
+	FbStip xor = xorOrig;
+
+	while (n--) {
+		int x = pts->x + xorg;
+		int y = pts->y + yorg;
+		pts++;
+		if (RegionContainsPoint(clip, x, y, NULL)) {
+			FbStip mask;
+			FbStip *d;
+
+			x = (x + xoff) * dstBpp;
+			d = dst + ((y + yoff) * dstStride) + (x >> FB_STIP_SHIFT);
+			x &= FB_STIP_MASK;
+
+			mask = FbStipMask(x, dstBpp);
+			WRITE(d, FbDoMaskRRop(READ(d), and, xor, mask));
+		}
+	}
+}
+
+void
+fbPolyPoint(DrawablePtr drawable, GCPtr gc,
+	    int mode, int n, xPoint *pt)
+{
+	FbBits *dst;
+	FbStride dstStride;
+	int dstBpp;
+	int dstXoff, dstYoff;
+	void (*dots)(FbBits *dst, FbStride dstStride, int dstBpp,
+		     RegionPtr clip,
+		     xPoint *pts, int n,
+		     int xorg, int yorg,
+		     int xoff, int yoff,
+		     FbBits and, FbBits xor);
+	FbBits and, xor;
+
+	DBG(("%s x %d\n", __FUNCTION__, n));
+
+	if (mode == CoordModePrevious)
+		fbFixCoordModePrevious(n, pt);
+
+	fbGetDrawable(drawable, dst, dstStride, dstBpp, dstXoff, dstYoff);
+	and = fb_gc(gc)->and;
+	xor = fb_gc(gc)->xor;
+	dots = fbDots;
+	switch (dstBpp) {
+	case 8:
+		dots = fbDots8;
+		break;
+	case 16:
+		dots = fbDots16;
+		break;
+	case 32:
+		dots = fbDots32;
+		break;
+	}
+	dots(dst, dstStride, dstBpp, gc->pCompositeClip, pt, n,
+	     drawable->x, drawable->y, dstXoff, dstYoff, and, xor);
+}
diff --git a/src/sna/fb/fbpointbits.h b/src/sna/fb/fbpointbits.h
new file mode 100644
index 0000000..40a25c6
--- /dev/null
+++ b/src/sna/fb/fbpointbits.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define RROP(b,a,x) WRITE((b), FbDoRRop (READ(b), (a), (x)))
+#define isClipped(c,ul,lr)  (((c) | ((c) - (ul)) | ((lr) - (c))) & 0x80008000)
+
+static void
+DOTS(FbBits * dst,
+     FbStride dstStride,
+     int dstBpp,
+     RegionPtr region,
+     xPoint * ptsOrig,
+     int npt, int xorg, int yorg, int xoff, int yoff, FbBits and, FbBits xor)
+{
+	uint32_t *pts = (uint32_t *) ptsOrig;
+	BITS *bits = (BITS *) dst;
+	BITS bxor = (BITS) xor;
+	BITS band = (BITS) and;
+	FbStride bitsStride = dstStride * (sizeof(FbBits) / sizeof(BITS));
+
+	bits += bitsStride * (yorg + yoff) + (xorg + xoff);
+
+	if (region->data == NULL) {
+		INT32 ul = coordToInt(region->extents.x1 - xorg,
+				      region->extents.y1 - yorg);
+		INT32 lr = coordToInt(region->extents.x2 - xorg - 1,
+				      region->extents.y2 - yorg - 1);
+
+		if (and == 0) {
+			while (npt >= 2) {
+				union {
+					uint32_t pt32[2];
+					uint64_t pt64;
+				} pt;
+				pt.pt64 = *(uint64_t *)pts;
+				if (!isClipped(pt.pt32[0], ul, lr)) {
+					BITS *point = bits + intToY(pt.pt32[0]) * bitsStride + intToX(pt.pt32[0]);
+					WRITE(point, bxor);
+				}
+				if (!isClipped(pt.pt32[1], ul, lr)) {
+					BITS *point = bits + intToY(pt.pt32[1]) * bitsStride + intToX(pt.pt32[1]);
+					WRITE(point, bxor);
+				}
+
+				pts += 2;
+				npt -= 2;
+			}
+			if (npt) {
+				uint32_t pt = *pts;
+				if (!isClipped(pt, ul, lr)) {
+					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+					WRITE(point, bxor);
+				}
+			}
+		} else {
+			while (npt--) {
+				uint32_t pt = *pts++;
+				if (!isClipped(pt, ul, lr)) {
+					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+					RROP(point, band, bxor);
+				}
+			}
+		}
+	} else {
+		if (and == 0) {
+			while (npt--) {
+				uint32_t pt = *pts++;
+				if (RegionContainsPoint(region,
+							intToX(pt), intToY(pt),
+							NULL)) {
+					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+					WRITE(point, bxor);
+				}
+			}
+		} else {
+			while (npt--) {
+				uint32_t pt = *pts++;
+				if (RegionContainsPoint(region,
+							intToX(pt), intToY(pt),
+							NULL)) {
+					BITS *point = bits + intToY(pt) * bitsStride + intToX(pt);
+					RROP(point, band, bxor);
+				}
+			}
+		}
+	}
+}
+
+#undef RROP
+#undef isClipped
diff --git a/src/sna/fb/fbpush.c b/src/sna/fb/fbpush.c
new file mode 100644
index 0000000..c53f0ad
--- /dev/null
+++ b/src/sna/fb/fbpush.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include "fbclip.h"
+
+static void
+fbPushPattern(DrawablePtr drawable, GCPtr gc,
+              FbStip *src, FbStride srcStride, int srcX,
+	      int x, int y, int width, int height)
+{
+	FbStip *s, bitsMask, bitsMask0, bits;
+	int xspan;
+	int w;
+	int lenspan;
+
+	src += srcX >> FB_STIP_SHIFT;
+	srcX &= FB_STIP_MASK;
+
+	bitsMask0 = FbStipMask(srcX, 1);
+
+	while (height--) {
+		bitsMask = bitsMask0;
+		w = width;
+		s = src;
+		src += srcStride;
+		bits = READ(s++);
+		xspan = x;
+		while (w) {
+			if (bits & bitsMask) {
+				lenspan = 0;
+				do {
+					if (++lenspan == w)
+						break;
+
+					bitsMask = FbStipRight(bitsMask, 1);
+					if (!bitsMask) {
+						bits = READ(s++);
+						bitsMask = FbBitsMask(0, 1);
+					}
+				} while (bits & bitsMask);
+				fbFill(drawable, gc, xspan, y, lenspan, 1);
+				xspan += lenspan;
+				w -= lenspan;
+			} else {
+				do {
+					xspan++;
+					if (!--w)
+						break;
+
+					bitsMask = FbStipRight(bitsMask, 1);
+					if (!bitsMask) {
+						bits = READ(s++);
+						bitsMask = FbBitsMask(0, 1);
+					}
+				} while (!(bits & bitsMask));
+			}
+		}
+		y++;
+	}
+}
+
+static void
+fbPushFill(DrawablePtr drawable, GCPtr gc,
+           FbStip *src, FbStride srcStride, int srcX,
+	   int x, int y, int width, int height)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+
+	if (gc->fillStyle == FillSolid) {
+		FbBits *dst;
+		FbStride dstStride;
+		int dstBpp;
+		int dstXoff, dstYoff;
+		int dstX;
+		int dstWidth;
+
+		fbGetDrawable(drawable, dst,
+			      dstStride, dstBpp, dstXoff, dstYoff);
+		dst = dst + (y + dstYoff) * dstStride;
+		dstX = (x + dstXoff) * dstBpp;
+		dstWidth = width * dstBpp;
+		if (dstBpp == 1) {
+			fbBltStip(src, srcStride, srcX,
+				  (FbStip *)dst, dstStride, dstX,
+				  dstWidth, height,
+				  FbStipple1Rop(gc->alu, gc->fgPixel), pgc->pm, dstBpp);
+		} else {
+			fbBltOne(src, srcStride, srcX,
+				 dst, dstStride, dstX, dstBpp,
+				 dstWidth, height,
+				 pgc->and, pgc->xor,
+				 fbAnd(GXnoop, (FbBits) 0, FB_ALLONES),
+				 fbXor(GXnoop, (FbBits) 0, FB_ALLONES));
+		}
+	} else
+		fbPushPattern(drawable, gc, src, srcStride, srcX,
+			      x, y, width, height);
+}
+
+struct fbPushImage {
+	FbStip *src;
+	FbStride stride;
+	int x0, y0;
+};
+
+inline static void
+_fbPushImage(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbPushImage *data = _data;
+
+	fbPushFill(drawable, gc,
+		   data->src + (b->y1 - data->y0) * data->stride, data->stride,
+		   b->x1 - data->x0,
+		   b->x1, b->y1,
+		   b->x2 - b->x1, b->y2 - b->y1);
+}
+
+void
+fbPushImage(DrawablePtr drawable, GCPtr gc,
+            FbStip *src, FbStride stride, int dx,
+	    int x, int y, int width, int height)
+{
+	struct fbPushImage data;
+	BoxRec box;
+
+	DBG(("%s (%d, %d)x(%d, %d)", __FUNCTION__, x, y, width, height));
+
+	data.src = src;
+	data.stride = stride;
+	data.y0 = y;
+	data.x0 = x - dx;
+
+	box.x1 = x;
+	box.y1 = y;
+	box.x2 = x + width;
+	box.y2 = y + height;
+	fbDrawableRun(drawable, gc, &box, _fbPushImage, &data);
+}
+
+void
+fbPushPixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
+	     int dx, int dy, int xOrg, int yOrg)
+{
+	FbStip *stip;
+	FbStride stipStride;
+	int stipBpp;
+	_X_UNUSED int stipXoff, stipYoff;
+
+	DBG(("%s bitmap=%x%d\n", __FUNCTION__,
+	     bitmap->drawable.width, bitmap->drawable.height));
+
+	fbGetStipDrawable(&bitmap->drawable, stip,
+			  stipStride, stipBpp, stipXoff, stipYoff);
+
+	fbPushImage(drawable, gc, stip, stipStride, 0, xOrg, yOrg, dx, dy);
+}
diff --git a/src/sna/fb/fbrop.h b/src/sna/fb/fbrop.h
new file mode 100644
index 0000000..9eb1fc3
--- /dev/null
+++ b/src/sna/fb/fbrop.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _FBROP_H_
+#define _FBROP_H_
+
+#define FbDestInvarientRop(alu,pm)  ((pm) == FB_ALLONES && \
+				     (((alu) >> 1 & 5) == ((alu) & 5)))
+
+#define FbDestInvarientMergeRop()   (_ca1 == 0 && _cx1 == 0)
+
+/* AND has higher precedence than XOR */
+
+#define FbDoMergeRop(src, dst) \
+    (((dst) & (((src) & _ca1) ^ _cx1)) ^ (((src) & _ca2) ^ _cx2))
+
+#define FbDoDestInvarientMergeRop(src)	(((src) & _ca2) ^ _cx2)
+
+#define FbDoMaskMergeRop(src, dst, mask) \
+    (((dst) & ((((src) & _ca1) ^ _cx1) | ~(mask))) ^ ((((src) & _ca2) ^ _cx2) & (mask)))
+
+#define FbDoLeftMaskByteMergeRop(dst, src, lb, l) { \
+    FbBits  __xor = ((src) & _ca2) ^ _cx2; \
+    FbDoLeftMaskByteRRop(dst,lb,l,((src) & _ca1) ^ _cx1,__xor); \
+}
+
+#define FbDoRightMaskByteMergeRop(dst, src, rb, r) { \
+    FbBits  __xor = ((src) & _ca2) ^ _cx2; \
+    FbDoRightMaskByteRRop(dst,rb,r,((src) & _ca1) ^ _cx1,__xor); \
+}
+
+#define FbDoRRop(dst, and, xor)	(((dst) & (and)) ^ (xor))
+
+#define FbDoMaskRRop(dst, and, xor, mask) \
+    (((dst) & ((and) | ~(mask))) ^ (xor & mask))
+
+/*
+ * Take a single bit (0 or 1) and generate a full mask
+ */
+#define fbFillFromBit(b,t)	(~((t) ((b) & 1)-1))
+
+#define fbXorT(rop,fg,pm,t) ((((fg) & fbFillFromBit((rop) >> 1,t)) | \
+			      (~(fg) & fbFillFromBit((rop) >> 3,t))) & (pm))
+
+#define fbAndT(rop,fg,pm,t) ((((fg) & fbFillFromBit (rop ^ (rop>>1),t)) | \
+			      (~(fg) & fbFillFromBit((rop>>2) ^ (rop>>3),t))) | \
+			     ~(pm))
+
+#define fbXor(rop,fg,pm)	fbXorT(rop,fg,pm,FbBits)
+
+#define fbAnd(rop,fg,pm)	fbAndT(rop,fg,pm,FbBits)
+
+#define fbXorStip(rop,fg,pm)    fbXorT(rop,fg,pm,FbStip)
+
+#define fbAndStip(rop,fg,pm)	fbAndT(rop,fg,pm,FbStip)
+
+/*
+ * Stippling operations; 
+ */
+extern const FbBits *const fbStippleTable[];
+
+#define FbStippleRRop(dst, b, fa, fx, ba, bx) \
+    (FbDoRRop(dst, fa, fx) & b) | (FbDoRRop(dst, ba, bx) & ~b)
+
+#define FbStippleRRopMask(dst, b, fa, fx, ba, bx, m) \
+    (FbDoMaskRRop(dst, fa, fx, m) & (b)) | (FbDoMaskRRop(dst, ba, bx, m) & ~(b))
+
+#define FbDoLeftMaskByteStippleRRop(dst, b, fa, fx, ba, bx, lb, l) { \
+    FbBits  __xor = ((fx) & (b)) | ((bx) & ~(b)); \
+    FbDoLeftMaskByteRRop(dst, lb, l, ((fa) & (b)) | ((ba) & ~(b)), __xor); \
+}
+
+#define FbDoRightMaskByteStippleRRop(dst, b, fa, fx, ba, bx, rb, r) { \
+    FbBits  __xor = ((fx) & (b)) | ((bx) & ~(b)); \
+    FbDoRightMaskByteRRop(dst, rb, r, ((fa) & (b)) | ((ba) & ~(b)), __xor); \
+}
+
+#define FbOpaqueStipple(b, fg, bg) (((fg) & (b)) | ((bg) & ~(b)))
+
+/*
+ * Compute rop for using tile code for 1-bit dest stipples; modifies
+ * existing rop to flip depending on pixel values
+ */
+#define FbStipple1RopPick(alu,b)    (((alu) >> (2 - (((b) & 1) << 1))) & 3)
+
+#define FbOpaqueStipple1Rop(alu,fg,bg)    (FbStipple1RopPick(alu,fg) | \
+					   (FbStipple1RopPick(alu,bg) << 2))
+
+#define FbStipple1Rop(alu,fg)	    (FbStipple1RopPick(alu,fg) | 4)
+
+#endif
diff --git a/src/sna/fb/fbseg.c b/src/sna/fb/fbseg.c
new file mode 100644
index 0000000..5b8173f
--- /dev/null
+++ b/src/sna/fb/fbseg.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <stdlib.h>
+
+#include "fb.h"
+#include "fbclip.h"
+#include <mi.h>
+#include <miline.h>
+#include <scrnintstr.h>
+
+#define FbDashDeclare	\
+    unsigned char	*__dash, *__firstDash, *__lastDash
+
+#define FbDashInit(gc,pgc,dashOffset,dashlen,even) {	    \
+    (even) = TRUE;					    \
+    __firstDash = (gc)->dash;				    \
+    __lastDash = __firstDash + (gc)->numInDashList;	    \
+    (dashOffset) %= (pgc)->dashLength;		    \
+							    \
+    __dash = __firstDash;				    \
+    while ((dashOffset) >= ((dashlen) = *__dash)) {	    \
+	(dashOffset) -= (dashlen);			    \
+	(even) = 1-(even);				    \
+	if (++__dash == __lastDash)			    \
+	    __dash = __firstDash;			    \
+    }							    \
+    (dashlen) -= (dashOffset);				    \
+}
+
+#define FbDashNext(dashlen) {				    \
+    if (++__dash == __lastDash)				    \
+	__dash = __firstDash;				    \
+    (dashlen) = *__dash;				    \
+}
+
+/* as numInDashList is always even, this case can skip a test */
+
+#define FbDashNextEven(dashlen) {			    \
+    (dashlen) = *++__dash;				    \
+}
+
+#define FbDashNextOdd(dashlen)	FbDashNext(dashlen)
+
+#define FbDashStep(dashlen,even) {			    \
+    if (!--(dashlen)) {					    \
+	FbDashNext(dashlen);				    \
+	(even) = 1-(even);				    \
+    }							    \
+}
+
+#define fbBresShiftMask(mask,dir,bpp) ((bpp == FB_STIP_UNIT) ? 0 : \
+					((dir < 0) ? FbStipLeft(mask,bpp) : \
+					 FbStipRight(mask,bpp)))
+
+typedef void FbBres(DrawablePtr drawable,
+                    GCPtr gc,
+                    int dashOffset,
+                    int sdx,
+                    int sdy,
+                    int axis, int x, int y, int e, int e1, int e3, int len);
+
+#define BRESSOLID   fbBresSolid8
+#define BRESSOLIDR  fbBresSolidR8
+#define BRESDASH    fbBresDash8
+#define BITS	    BYTE
+#define BITS2	    CARD16
+#define BITS4	    CARD32
+#include "fbsegbits.h"
+#undef BRESSOLID
+#undef BRESSOLIDR
+#undef BRESDASH
+#undef BITS
+#undef BITS2
+#undef BITS4
+
+#define BRESSOLID   fbBresSolid16
+#define BRESSOLIDR  fbBresSolidR16
+#define BRESDASH    fbBresDash16
+#define BITS	    CARD16
+#define BITS2	    CARD32
+#include "fbsegbits.h"
+#undef BRESSOLID
+#undef BRESSOLIDR
+#undef BRESDASH
+#undef BITS
+#undef BITS2
+
+#define BRESSOLID   fbBresSolid32
+#define BRESSOLIDR  fbBresSolidR32
+#define BRESDASH    fbBresDash32
+#define BITS	    CARD32
+#include "fbsegbits.h"
+#undef BRESSOLID
+#undef BRESSOLIDR
+#undef BRESDASH
+#undef BITS
+
+static void
+fbBresSolid(DrawablePtr drawable, GCPtr gc, int dashOffset,
+            int sdx, int sdy, int axis,
+	    int x1, int y1,
+	    int e, int e1, int e3, int len)
+{
+	FbStip *dst;
+	FbStride stride;
+	int bpp;
+	int dx, dy;
+	FbGCPrivPtr pgc = fb_gc(gc);
+	FbStip and = (FbStip) pgc->and;
+	FbStip xor = (FbStip) pgc->xor;
+	FbStip mask, mask0;
+	FbStip bits;
+
+	fbGetStipDrawable(drawable, dst, stride, bpp, dx, dy);
+	dst += ((y1 + dy) * stride);
+	x1 = (x1 + dx) * bpp;
+	dst += x1 >> FB_STIP_SHIFT;
+	x1 &= FB_STIP_MASK;
+	mask0 = FbStipMask(0, bpp);
+	mask = FbStipRight(mask0, x1);
+	if (sdx < 0)
+		mask0 = FbStipRight(mask0, FB_STIP_UNIT - bpp);
+	if (sdy < 0)
+		stride = -stride;
+	if (axis == X_AXIS) {
+		bits = 0;
+		while (len--) {
+			bits |= mask;
+			mask = fbBresShiftMask(mask, sdx, bpp);
+			if (!mask) {
+				WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, bits));
+				bits = 0;
+				dst += sdx;
+				mask = mask0;
+			}
+			e += e1;
+			if (e >= 0) {
+				WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, bits));
+				bits = 0;
+				dst += stride;
+				e += e3;
+			}
+		}
+		if (bits)
+			WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, bits));
+	} else {
+		while (len--) {
+			WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, mask));
+			dst += stride;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				mask = fbBresShiftMask(mask, sdx, bpp);
+				if (!mask) {
+					dst += sdx;
+					mask = mask0;
+				}
+			}
+		}
+	}
+}
+
+static void
+fbBresDash(DrawablePtr drawable, GCPtr gc, int dashOffset,
+           int sdx, int sdy, int axis,
+	   int x1, int y1,
+	   int e, int e1, int e3, int len)
+{
+	FbStip *dst;
+	FbStride stride;
+	int bpp;
+	int dx, dy;
+	FbGCPrivPtr pgc = fb_gc(gc);
+	FbStip and = (FbStip) pgc->and;
+	FbStip xor = (FbStip) pgc->xor;
+	FbStip bgand = (FbStip) pgc->bgand;
+	FbStip bgxor = (FbStip) pgc->bgxor;
+	FbStip mask, mask0;
+
+	FbDashDeclare;
+	int dashlen;
+	bool even;
+	bool doOdd;
+
+	fbGetStipDrawable(drawable, dst, stride, bpp, dx, dy);
+	doOdd = gc->lineStyle == LineDoubleDash;
+
+	FbDashInit(gc, pgc, dashOffset, dashlen, even);
+
+	dst += ((y1 + dy) * stride);
+	x1 = (x1 + dx) * bpp;
+	dst += x1 >> FB_STIP_SHIFT;
+	x1 &= FB_STIP_MASK;
+	mask0 = FbStipMask(0, bpp);
+	mask = FbStipRight(mask0, x1);
+	if (sdx < 0)
+		mask0 = FbStipRight(mask0, FB_STIP_UNIT - bpp);
+	if (sdy < 0)
+		stride = -stride;
+	while (len--) {
+		if (even)
+			WRITE(dst, FbDoMaskRRop(READ(dst), and, xor, mask));
+		else if (doOdd)
+			WRITE(dst, FbDoMaskRRop(READ(dst), bgand, bgxor, mask));
+		if (axis == X_AXIS) {
+			mask = fbBresShiftMask(mask, sdx, bpp);
+			if (!mask) {
+				dst += sdx;
+				mask = mask0;
+			}
+			e += e1;
+			if (e >= 0) {
+				dst += stride;
+				e += e3;
+			}
+		} else {
+			dst += stride;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				mask = fbBresShiftMask(mask, sdx, bpp);
+				if (!mask) {
+					dst += sdx;
+					mask = mask0;
+				}
+			}
+		}
+		FbDashStep(dashlen, even);
+	}
+}
+
+static void
+fbBresFill(DrawablePtr drawable, GCPtr gc, int dashOffset,
+           int sdx, int sdy, int axis,
+	   int x1, int y1,
+	   int e, int e1, int e3, int len)
+{
+	while (len--) {
+		fbFill(drawable, gc, x1, y1, 1, 1);
+		if (axis == X_AXIS) {
+			x1 += sdx;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				y1 += sdy;
+			}
+		} else {
+			y1 += sdy;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				x1 += sdx;
+			}
+		}
+	}
+}
+
+static void
+fbSetFg(DrawablePtr drawable, GCPtr gc, Pixel fg)
+{
+	if (fg != gc->fgPixel) {
+		gc->fgPixel = fg;
+		fbValidateGC(gc, GCForeground, drawable);
+	}
+}
+
+static void
+fbBresFillDash(DrawablePtr drawable,
+               GCPtr gc,
+               int dashOffset,
+               int sdx,
+               int sdy,
+               int axis, int x1, int y1, int e, int e1, int e3, int len)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+
+	FbDashDeclare;
+	int dashlen;
+	bool even;
+	bool doOdd;
+	bool doBg;
+	Pixel fg, bg;
+
+	fg = gc->fgPixel;
+	bg = gc->bgPixel;
+
+	/* whether to fill the odd dashes */
+	doOdd = gc->lineStyle == LineDoubleDash;
+	/* whether to switch fg to bg when filling odd dashes */
+	doBg = doOdd && (gc->fillStyle == FillSolid ||
+			 gc->fillStyle == FillStippled);
+
+	/* compute current dash position */
+	FbDashInit(gc, pgc, dashOffset, dashlen, even);
+
+	while (len--) {
+		if (even || doOdd) {
+			if (doBg) {
+				if (even)
+					fbSetFg(drawable, gc, fg);
+				else
+					fbSetFg(drawable, gc, bg);
+			}
+			fbFill(drawable, gc, x1, y1, 1, 1);
+		}
+		if (axis == X_AXIS) {
+			x1 += sdx;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				y1 += sdy;
+			}
+		} else {
+			y1 += sdy;
+			e += e1;
+			if (e >= 0) {
+				e += e3;
+				x1 += sdx;
+			}
+		}
+		FbDashStep(dashlen, even);
+	}
+	if (doBg)
+		fbSetFg(drawable, gc, fg);
+}
+
+static FbBres *
+fbSelectBres(DrawablePtr drawable, GCPtr gc)
+{
+	FbGCPrivPtr pgc = fb_gc(gc);
+	int bpp = drawable->bitsPerPixel;
+	FbBres *bres;
+
+	DBG(("%s: line=%d, fill=%d, and=%lx, bgand=%lx\n",
+	     __FUNCTION__, gc->lineStyle, gc->fillStyle, pgc->and, pgc->bgand));
+	assert(gc->lineWidth == 0);
+
+	if (gc->lineStyle == LineSolid) {
+		bres = fbBresFill;
+		if (gc->fillStyle == FillSolid) {
+			bres = fbBresSolid;
+			if (pgc->and == 0) {
+				switch (bpp) {
+				case 8:
+					bres = fbBresSolid8;
+					break;
+				case 16:
+					bres = fbBresSolid16;
+					break;
+				case 32:
+					bres = fbBresSolid32;
+					break;
+				}
+			} else {
+				switch (bpp) {
+				case 8:
+					bres = fbBresSolidR8;
+					break;
+				case 16:
+					bres = fbBresSolidR16;
+					break;
+				case 32:
+					bres = fbBresSolidR32;
+					break;
+				}
+			}
+		}
+	} else {
+		bres = fbBresFillDash;
+		if (gc->fillStyle == FillSolid) {
+			bres = fbBresDash;
+			if (pgc->and == 0 &&
+			    (gc->lineStyle == LineOnOffDash || pgc->bgand == 0)) {
+				switch (bpp) {
+				case 8:
+					bres = fbBresDash8;
+					break;
+				case 16:
+					bres = fbBresDash16;
+					break;
+				case 32:
+					bres = fbBresDash32;
+					break;
+				}
+			}
+		}
+	}
+	return bres;
+}
+
+struct fbSegment {
+	FbBres *bres;
+	bool drawLast;
+	int *dashOffset;
+	int x1, y1, x2, y2;
+};
+
+static void
+_fbSegment(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbSegment *data = _data;
+	const unsigned int bias = miGetZeroLineBias(drawable->pScreen);
+	int adx, ady;               /* abs values of dx and dy */
+	int sdx, sdy;               /* sign of dx and dy */
+	int e, e1, e2, e3;          /* bresenham error and increments */
+	int len, axis, octant;
+	int dashoff, doff;
+	unsigned int oc1, oc2;
+
+	DBG(("%s box=(%d, %d),(%d, %d)\n",
+	     __FUNCTION__, b->x1, b->y1, b->x2, b->y2));
+
+	CalcLineDeltas(data->x1, data->y1, data->x2, data->y2,
+		       adx, ady, sdx, sdy, 1, 1, octant);
+
+	if (adx > ady) {
+		axis = X_AXIS;
+		e1 = ady << 1;
+		e2 = e1 - (adx << 1);
+		e = e1 - adx;
+		len = adx;
+	} else {
+		axis = Y_AXIS;
+		e1 = adx << 1;
+		e2 = e1 - (ady << 1);
+		e = e1 - ady;
+		SetYMajorOctant(octant);
+		len = ady;
+	}
+
+	FIXUP_ERROR(e, octant, bias);
+
+	/*
+	 * Adjust error terms to compare against zero
+	 */
+	e3 = e2 - e1;
+	e = e - e1;
+
+	if (data->drawLast)
+		len++;
+	dashoff = *data->dashOffset;
+	*data->dashOffset = dashoff + len;
+
+	oc1 = 0;
+	oc2 = 0;
+	OUTCODES(oc1, data->x1, data->y1, b);
+	OUTCODES(oc2, data->x2, data->y2, b);
+	if ((oc1 | oc2) == 0) {
+		data->bres(drawable, gc, dashoff,
+			   sdx, sdy, axis, data->x1, data->y1, e, e1, e3, len);
+	} else if (oc1 & oc2) {
+	} else {
+		int new_x1 = data->x1, new_y1 = data->y1;
+		int new_x2 = data->x2, new_y2 = data->y2;
+		int clip1 = 0, clip2 = 0;
+		int clipdx, clipdy;
+		int err;
+
+		if (miZeroClipLine(b->x1, b->y1, b->x2-1, b->y2-1,
+				   &new_x1, &new_y1, &new_x2, &new_y2,
+				   adx, ady, &clip1, &clip2,
+				   octant, bias, oc1, oc2) == -1)
+			return;
+
+		if (axis == X_AXIS)
+			len = abs(new_x2 - new_x1);
+		else
+			len = abs(new_y2 - new_y1);
+		if (clip2 != 0 || data->drawLast)
+			len++;
+		if (len) {
+			/* unwind bresenham error term to first point */
+			doff = dashoff;
+			err = e;
+			if (clip1) {
+				clipdx = abs(new_x1 - data->x1);
+				clipdy = abs(new_y1 - data->y1);
+				if (axis == X_AXIS) {
+					doff += clipdx;
+					err += e3 * clipdy + e1 * clipdx;
+				} else {
+					doff += clipdy;
+					err += e3 * clipdx + e1 * clipdy;
+				}
+			}
+			data->bres(drawable, gc, doff,
+				   sdx, sdy, axis, new_x1, new_y1,
+				   err, e1, e3, len);
+		}
+	}
+}
+
+void
+fbSegment(DrawablePtr drawable, GCPtr gc,
+          int x1, int y1, int x2, int y2,
+	  bool drawLast, int *dashOffset)
+{
+	struct fbSegment data;
+	BoxRec box;
+
+	DBG(("%s (%d, %d), (%d, %d), drawLast?=%d\n",
+	     __FUNCTION__, x1, y1, x2, y2, drawLast));
+
+	/* simple overestimate of line extents for clipping */
+	box.x1 = x1 - 1;
+	box.y1 = y1 - 1;
+	box.x2 = x2 + 1;
+	box.y2 = y2 + 1;
+
+	data.x1 = x1;
+	data.y1 = y1;
+	data.x2 = x2;
+	data.y2 = y2;
+
+	data.dashOffset = dashOffset;
+	data.drawLast = drawLast;
+	data.bres = fbSelectBres(drawable, gc);
+
+	fbDrawableRunUnclipped(drawable, gc, &box, _fbSegment, &data);
+}
+
+void
+fbSegment1(DrawablePtr drawable, GCPtr gc, const BoxRec *b,
+	   int x1, int y1, int x2, int y2,
+	   bool drawLast, int *dashOffset)
+{
+	struct fbSegment data;
+
+	DBG(("%s (%d, %d), (%d, %d), drawLast?=%d\n",
+	     __FUNCTION__, x1, y1, x2, y2, drawLast));
+
+	data.x1 = x1;
+	data.y1 = y1;
+	data.x2 = x2;
+	data.y2 = y2;
+
+	data.dashOffset = dashOffset;
+	data.drawLast = drawLast;
+	data.bres = fbSelectBres(drawable, gc);
+
+	_fbSegment(drawable, gc, b, &data);
+}
diff --git a/src/sna/fb/fbsegbits.h b/src/sna/fb/fbsegbits.h
new file mode 100644
index 0000000..590ad30
--- /dev/null
+++ b/src/sna/fb/fbsegbits.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define isClipped(c,ul,lr)  (((c) | ((c) - (ul)) | ((lr) - (c))) & 0x80008000)
+#define RROP(b,a,x)	WRITE((b), FbDoRRop (READ(b), (a), (x)))
+
+static void
+BRESSOLID(DrawablePtr drawable, GCPtr gc, int dashOffset,
+	  int sdx, int sdy, int axis,
+	  int x1, int y1, int e, int e1, int e3, int len)
+{
+	FbBits *dst;
+	FbStride stride;
+	int bpp, dx, dy;
+	BITS *bits;
+	FbStride major, minor;
+	BITS xor = fb_gc(gc)->xor;
+
+	fbGetDrawable(drawable, dst, stride, bpp, dx, dy);
+	bits = (BITS *)(dst + (y1 + dy) * stride) + (x1 + dx);
+	stride = stride * (sizeof(FbBits) / sizeof(BITS));
+	if (sdy < 0)
+		stride = -stride;
+	if (axis == X_AXIS) {
+		major = sdx;
+		minor = stride;
+	} else {
+		major = stride;
+		minor = sdx;
+	}
+	while (len--) {
+		WRITE(bits, xor);
+		bits += major;
+		e += e1;
+		if (e >= 0) {
+			bits += minor;
+			e += e3;
+		}
+	}
+}
+
+static void
+BRESSOLIDR(DrawablePtr drawable, GCPtr gc, int dashOffset,
+	   int sdx, int sdy, int axis,
+	   int x1, int y1, int e, int e1, int e3, int len)
+{
+	FbBits *dst;
+	FbStride stride;
+	int bpp, dx, dy;
+	BITS *bits;
+	FbStride major, minor;
+	BITS and = fb_gc(gc)->and;
+	BITS xor = fb_gc(gc)->xor;
+
+	fbGetDrawable(drawable, dst, stride, bpp, dx, dy);
+	bits = (BITS *)(dst + (y1 + dy) * stride) + (x1 + dx);
+	stride = stride * (sizeof(FbBits) / sizeof(BITS));
+	if (sdy < 0)
+		stride = -stride;
+	if (axis == X_AXIS) {
+		major = sdx;
+		minor = stride;
+	} else {
+		major = stride;
+		minor = sdx;
+	}
+	while (len--) {
+		RROP(bits, and, xor);
+		bits += major;
+		e += e1;
+		if (e >= 0) {
+			bits += minor;
+			e += e3;
+		}
+	}
+}
+
+static void
+BRESDASH(DrawablePtr drawable, GCPtr gc, int dashOffset,
+	 int sdx, int sdy, int axis,
+	 int x1, int y1, int e, int e1, int e3, int len)
+{
+	FbBits *dst;
+	FbStride stride;
+	int bpp, dx, dy;
+	BITS *bits;
+	FbStride major, minor;
+
+	FbDashDeclare;
+	int dashlen;
+	bool even;
+	bool doOdd = gc->lineStyle == LineDoubleDash;
+	BITS xorfg = fb_gc(gc)->xor;
+	BITS xorbg = fb_gc(gc)->bgxor;
+
+	fbGetDrawable(drawable, dst, stride, bpp, dx, dy);
+
+	FbDashInit(gc, fb_gc(gc), dashOffset, dashlen, even);
+
+	bits = ((BITS *) (dst + ((y1 + dy) * stride))) + (x1 + dx);
+	stride = stride * (sizeof(FbBits) / sizeof(BITS));
+	if (sdy < 0)
+		stride = -stride;
+	if (axis == X_AXIS) {
+		major = sdx;
+		minor = stride;
+	} else {
+		major = stride;
+		minor = sdx;
+	}
+	if (dashlen >= len)
+		dashlen = len;
+	if (doOdd) {
+		if (!even)
+			goto doubleOdd;
+		for (;;) {
+			len -= dashlen;
+			while (dashlen--) {
+				WRITE(bits, xorfg);
+				bits += major;
+				if ((e += e1) >= 0) {
+					e += e3;
+					bits += minor;
+				}
+			}
+			if (!len)
+				break;
+
+			FbDashNextEven(dashlen);
+
+			if (dashlen >= len)
+				dashlen = len;
+doubleOdd:
+			len -= dashlen;
+			while (dashlen--) {
+				WRITE(bits, xorbg);
+				bits += major;
+				if ((e += e1) >= 0) {
+					e += e3;
+					bits += minor;
+				}
+			}
+			if (!len)
+				break;
+
+			FbDashNextOdd(dashlen);
+
+			if (dashlen >= len)
+				dashlen = len;
+		}
+	} else {
+		if (!even)
+			goto onOffOdd;
+		for (;;) {
+			len -= dashlen;
+			while (dashlen--) {
+				WRITE(bits, xorfg);
+				bits += major;
+				if ((e += e1) >= 0) {
+					e += e3;
+					bits += minor;
+				}
+			}
+			if (!len)
+				break;
+
+			FbDashNextEven(dashlen);
+
+			if (dashlen >= len)
+				dashlen = len;
+onOffOdd:
+			len -= dashlen;
+			while (dashlen--) {
+				bits += major;
+				if ((e += e1) >= 0) {
+					e += e3;
+					bits += minor;
+				}
+			}
+			if (!len)
+				break;
+
+			FbDashNextOdd(dashlen);
+
+			if (dashlen >= len)
+				dashlen = len;
+		}
+	}
+}
+
+#undef RROP
+#undef isClipped
diff --git a/src/sna/fb/fbspan.c b/src/sna/fb/fbspan.c
new file mode 100644
index 0000000..45cb7cc
--- /dev/null
+++ b/src/sna/fb/fbspan.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+#include "fbclip.h"
+
+inline static void
+fbFillSpan(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *data)
+{
+	DBG(("%s (%d,%d)+%d\n", __FUNCTION__, b->x1, b->y1, b->x2-b->x1));
+	fbFill(drawable, gc, b->x1, b->y1, b->x2 - b->x1, 1);
+}
+
+void
+fbFillSpans(DrawablePtr drawable, GCPtr gc,
+	    int n, DDXPointPtr pt, int *width, int fSorted)
+{
+	DBG(("%s x %d\n", __FUNCTION__, n));
+	while (n--) {
+		BoxRec box;
+
+		*(DDXPointPtr)&box = *pt++;
+		box.x2 = box.x1 + *width++;
+		box.y2 = box.y1 + 1;
+
+		/* XXX fSorted */
+		fbDrawableRun(drawable, gc, &box, fbFillSpan, NULL);
+	}
+}
+
+struct fbSetSpan {
+	char *src;
+	DDXPointRec pt;
+	FbStride stride;
+	FbBits *dst;
+	int dx, dy;
+};
+
+inline static void
+fbSetSpan(DrawablePtr drawable, GCPtr gc, const BoxRec *b, void *_data)
+{
+	struct fbSetSpan *data = _data;
+	int xoff, bpp;
+
+	xoff = (int) (((long)data->src) & (FB_MASK >> 3));
+	bpp = drawable->bitsPerPixel;
+
+	fbBlt((FbBits *)(data->src - xoff), 0,
+	      (b->x1 - data->pt.x) * bpp + (xoff << 3),
+	      data->dst + (b->y1 + data->dy) * data->stride, data->stride,
+	      (b->x1 + data->dx) * bpp,
+	      (b->x2 - b->x1) * bpp, 1,
+	      gc->alu, fb_gc(gc)->pm, bpp,
+	      FALSE, FALSE);
+}
+
+void
+fbSetSpans(DrawablePtr drawable, GCPtr gc,
+           char *src, DDXPointPtr pt, int *width, int n, int fSorted)
+{
+	struct fbSetSpan data;
+	PixmapPtr pixmap;
+
+	DBG(("%s x %d\n", __FUNCTION__, n));
+
+	fbGetDrawablePixmap(drawable, pixmap, data.dx, data.dy);
+	data.dst = pixmap->devPrivate.ptr;
+	data.stride = pixmap->devKind / sizeof(FbStip);
+
+	data.src = src;
+	while (n--) {
+		BoxRec box;
+
+		*(DDXPointPtr)&box = data.pt = *pt;
+		box.x2 = box.x1 + *width;
+		box.y2 = box.y1 + 1;
+
+		fbDrawableRun(drawable, gc, &box, fbSetSpan, &data);
+
+		data.src += PixmapBytePad(*width, drawable->depth);
+		width++;
+		pt++;
+	}
+}
+
+void
+fbGetSpans(DrawablePtr drawable, int wMax,
+           DDXPointPtr pt, int *width, int n, char *dst)
+{
+	FbBits *src, *d;
+	FbStride srcStride;
+	int srcBpp;
+	int srcXoff, srcYoff;
+	int xoff;
+
+	fbGetDrawable(drawable, src, srcStride, srcBpp, srcXoff, srcYoff);
+
+	DBG(("%s x %d\n", __FUNCTION__, n));
+	while (n--) {
+		xoff = (int) (((long) dst) & (FB_MASK >> 3));
+		d = (FbBits *) (dst - xoff);
+		fbBlt(src + (pt->y + srcYoff) * srcStride, srcStride,
+		      (pt->x + srcXoff) * srcBpp,
+		      d, 1, xoff << 3, *width * srcBpp,
+		      1, GXcopy, FB_ALLONES, srcBpp,
+		      FALSE, FALSE);
+		dst += PixmapBytePad(*width, drawable->depth);
+		pt++;
+		width++;
+	}
+}
diff --git a/src/sna/fb/fbstipple.c b/src/sna/fb/fbstipple.c
new file mode 100644
index 0000000..d02970a
--- /dev/null
+++ b/src/sna/fb/fbstipple.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+
+/*
+ * This is a slight abuse of the preprocessor to generate repetitive
+ * code, the idea is to generate code for each case of a copy-mode
+ * transparent stipple
+ */
+#define LaneCases1(c,a) \
+	case c: while (n--) { FbLaneCase(c,a); a++; } break
+#define LaneCases2(c,a)	    LaneCases1(c,a); LaneCases1(c+1,a)
+#define LaneCases4(c,a)	    LaneCases2(c,a); LaneCases2(c+2,a)
+#define LaneCases8(c,a)	    LaneCases4(c,a); LaneCases4(c+4,a)
+#define LaneCases16(c,a)    LaneCases8(c,a); LaneCases8(c+8,a)
+
+#define LaneCases(a)	    LaneCases16(0,a)
+
+/*
+ * Repeat a transparent stipple across a scanline n times
+ */
+
+void
+fbTransparentSpan(FbBits * dst, FbBits stip, FbBits fgxor, int n)
+{
+	FbStip s;
+
+	s = ((FbStip) (stip) & 0x01);
+	s |= ((FbStip) (stip >> 8) & 0x02);
+	s |= ((FbStip) (stip >> 16) & 0x04);
+	s |= ((FbStip) (stip >> 24) & 0x08);
+	switch (s) {
+		LaneCases(dst);
+	}
+}
+
+static void
+fbEvenStipple(FbBits *dst, FbStride dstStride, int dstX, int dstBpp,
+              int width, int height,
+              FbStip *stip, FbStride stipStride,
+              int stipHeight,
+              FbBits fgand, FbBits fgxor, FbBits bgand, FbBits bgxor,
+	      int xRot, int yRot)
+{
+	FbBits startmask, endmask;
+	FbBits mask, and, xor;
+	int nmiddle, n;
+	FbStip *s, *stipEnd, bits;
+	int rot, stipX, stipY;
+	int pixelsPerDst;
+	const FbBits *fbBits;
+	Bool transparent;
+	int startbyte, endbyte;
+
+	/*
+	 * Check for a transparent stipple (stencil)
+	 */
+	transparent = FALSE;
+	if (dstBpp >= 8 && fgand == 0 && bgand == FB_ALLONES && bgxor == 0)
+		transparent = TRUE;
+
+	pixelsPerDst = FB_UNIT / dstBpp;
+	/*
+	 * Adjust dest pointers
+	 */
+	dst += dstX >> FB_SHIFT;
+	dstX &= FB_MASK;
+	FbMaskBitsBytes(dstX, width, fgand == 0 && bgand == 0,
+			startmask, startbyte, nmiddle, endmask, endbyte);
+
+	if (startmask)
+		dstStride--;
+	dstStride -= nmiddle;
+
+	xRot *= dstBpp;
+	/*
+	 * Compute stip start scanline and rotation parameters
+	 */
+	stipEnd = stip + stipStride * stipHeight;
+	modulus(-yRot, stipHeight, stipY);
+	s = stip + stipStride * stipY;
+	modulus(-xRot, FB_UNIT, stipX);
+	rot = stipX;
+
+	/*
+	 * Get pointer to stipple mask array for this depth
+	 */
+	/* fbStippleTable covers all valid bpp (4,8,16,32) */
+	fbBits = fbStippleTable[pixelsPerDst];
+
+	while (height--) {
+		/*
+		 * Extract stipple bits for this scanline;
+		 */
+		bits = READ(s);
+		s += stipStride;
+		if (s == stipEnd)
+			s = stip;
+		mask = fbBits[FbLeftStipBits(bits, pixelsPerDst)];
+		/*
+		 * Rotate into position and compute reduced rop values
+		 */
+		mask = FbRotLeft(mask, rot);
+		and = (fgand & mask) | (bgand & ~mask);
+		xor = (fgxor & mask) | (bgxor & ~mask);
+
+		if (transparent) {
+			if (startmask) {
+				fbTransparentSpan(dst, mask & startmask, fgxor, 1);
+				dst++;
+			}
+			fbTransparentSpan(dst, mask, fgxor, nmiddle);
+			dst += nmiddle;
+			if (endmask)
+				fbTransparentSpan(dst, mask & endmask, fgxor, 1);
+		} else {
+			/*
+			 * Fill scanline
+			 */
+			if (startmask) {
+				FbDoLeftMaskByteRRop(dst, startbyte, startmask, and, xor);
+				dst++;
+			}
+			n = nmiddle;
+			if (!and)
+				while (n--)
+					WRITE(dst++, xor);
+			else {
+				while (n--) {
+					WRITE(dst, FbDoRRop(READ(dst), and, xor));
+					dst++;
+				}
+			}
+			if (endmask)
+				FbDoRightMaskByteRRop(dst, endbyte, endmask, and, xor);
+		}
+		dst += dstStride;
+	}
+}
+
+static void
+fbOddStipple(FbBits *dst, FbStride dstStride, int dstX, int dstBpp,
+             int width, int height,
+             FbStip *stip, FbStride stipStride,
+             int stipWidth, int stipHeight,
+             FbBits fgand, FbBits fgxor, FbBits bgand, FbBits bgxor,
+	     int xRot, int yRot)
+{
+	int stipX, stipY, sx;
+	int widthTmp;
+	int h, w;
+	int x, y;
+
+	modulus(-yRot, stipHeight, stipY);
+	modulus(dstX / dstBpp - xRot, stipWidth, stipX);
+	y = 0;
+	while (height) {
+		h = stipHeight - stipY;
+		if (h > height)
+			h = height;
+		height -= h;
+		widthTmp = width;
+		x = dstX;
+		sx = stipX;
+		while (widthTmp) {
+			w = (stipWidth - sx) * dstBpp;
+			if (w > widthTmp)
+				w = widthTmp;
+			widthTmp -= w;
+			fbBltOne(stip + stipY * stipStride,
+				 stipStride,
+				 sx,
+				 dst + y * dstStride,
+				 dstStride, x, dstBpp, w, h, fgand, fgxor, bgand, bgxor);
+			x += w;
+			sx = 0;
+		}
+		y += h;
+		stipY = 0;
+	}
+}
+
+void
+fbStipple(FbBits *dst, FbStride dstStride, int dstX, int dstBpp,
+          int width, int height,
+          FbStip *stip, FbStride stipStride,
+          int stipWidth, int stipHeight, Bool even,
+          FbBits fgand, FbBits fgxor, FbBits bgand, FbBits bgxor,
+	  int xRot, int yRot)
+{
+	DBG(("%s stipple=%dx%d, size=%dx%d\n",
+	     __FUNCTION__, stipWidth, stipHeight, width, height));
+
+	if (even)
+		fbEvenStipple(dst, dstStride, dstX, dstBpp, width, height,
+			      stip, stipStride, stipHeight,
+			      fgand, fgxor, bgand, bgxor, xRot, yRot);
+	else
+		fbOddStipple(dst, dstStride, dstX, dstBpp, width, height,
+			     stip, stipStride, stipWidth, stipHeight,
+			     fgand, fgxor, bgand, bgxor, xRot, yRot);
+}
diff --git a/src/sna/fb/fbtile.c b/src/sna/fb/fbtile.c
new file mode 100644
index 0000000..5586553
--- /dev/null
+++ b/src/sna/fb/fbtile.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+
+/*
+ * Accelerated tile fill -- tile width is a power of two not greater
+ * than FB_UNIT
+ */
+
+static void
+fbEvenTile(FbBits *dst, FbStride dstStride, int dstX, int width, int height,
+           FbBits *tile, FbStride tileStride, int tileHeight,
+	   int alu, FbBits pm,
+	   int xRot, int yRot)
+{
+	FbBits *t, *tileEnd, bits;
+	FbBits startmask, endmask;
+	FbBits and, xor;
+	int n, nmiddle;
+	int tileX, tileY;
+	int rot;
+	int startbyte, endbyte;
+
+	dst += dstX >> FB_SHIFT;
+	dstX &= FB_MASK;
+	FbMaskBitsBytes(dstX, width, FbDestInvarientRop(alu, pm),
+			startmask, startbyte, nmiddle, endmask, endbyte);
+	if (startmask)
+		dstStride--;
+	dstStride -= nmiddle;
+
+	/*
+	 * Compute tile start scanline and rotation parameters
+	 */
+	tileEnd = tile + tileHeight * tileStride;
+	modulus(-yRot, tileHeight, tileY);
+	t = tile + tileY * tileStride;
+	modulus(-xRot, FB_UNIT, tileX);
+	rot = tileX;
+
+	while (height--) {
+		/*
+		 * Pick up bits for this scanline
+		 */
+		bits = READ(t);
+		t += tileStride;
+		if (t >= tileEnd)
+			t = tile;
+		bits = FbRotLeft(bits, rot);
+		and = fbAnd(alu, bits, pm);
+		xor = fbXor(alu, bits, pm);
+
+		if (startmask) {
+			FbDoLeftMaskByteRRop(dst, startbyte, startmask, and, xor);
+			dst++;
+		}
+		n = nmiddle;
+		if (!and)
+			while (n--)
+				WRITE(dst++, xor);
+		else
+			while (n--) {
+				WRITE(dst, FbDoRRop(READ(dst), and, xor));
+				dst++;
+			}
+		if (endmask)
+			FbDoRightMaskByteRRop(dst, endbyte, endmask, and, xor);
+		dst += dstStride;
+	}
+}
+
+static void
+fbOddTile(FbBits *dst, FbStride dstStride, int dstX,
+          int width, int height,
+          FbBits *tile, FbStride tileStride,
+          int tileWidth, int tileHeight,
+	  int alu, FbBits pm, int bpp,
+	  int xRot, int yRot)
+{
+	int tileX, tileY;
+	int widthTmp;
+	int h, w;
+	int x, y;
+
+	modulus(-yRot, tileHeight, tileY);
+	y = 0;
+	while (height) {
+		h = tileHeight - tileY;
+		if (h > height)
+			h = height;
+		height -= h;
+		widthTmp = width;
+		x = dstX;
+		modulus(dstX - xRot, tileWidth, tileX);
+		while (widthTmp) {
+			w = tileWidth - tileX;
+			if (w > widthTmp)
+				w = widthTmp;
+			widthTmp -= w;
+			fbBlt(tile + tileY * tileStride,
+			      tileStride,
+			      tileX,
+			      dst + y * dstStride,
+			      dstStride, x, w, h, alu, pm, bpp, FALSE, FALSE);
+			x += w;
+			tileX = 0;
+		}
+		y += h;
+		tileY = 0;
+	}
+}
+
+void
+fbTile(FbBits *dst, FbStride dstStride, int dstX,
+       int width, int height,
+       FbBits *tile, FbStride tileStride,
+       int tileWidth, int tileHeight,
+       int alu, FbBits pm, int bpp,
+       int xRot, int yRot)
+{
+	DBG(("%s tile=%dx%d, size=%dx%d\n", __FUNCTION__,
+	     tileWidth, tileHeight, width, height));
+
+	if (FbEvenTile(tileWidth))
+		fbEvenTile(dst, dstStride, dstX, width, height,
+			   tile, tileStride, tileHeight, alu, pm, xRot, yRot);
+	else
+		fbOddTile(dst, dstStride, dstX, width, height,
+			  tile, tileStride, tileWidth, tileHeight,
+			  alu, pm, bpp, xRot, yRot);
+}
diff --git a/src/sna/fb/fbutil.c b/src/sna/fb/fbutil.c
new file mode 100644
index 0000000..61b63ad
--- /dev/null
+++ b/src/sna/fb/fbutil.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright © 1998 Keith Packard
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that
+ * copyright notice and this permission notice appear in supporting
+ * documentation, and that the name of Keith Packard not be used in
+ * advertising or publicity pertaining to distribution of the software without
+ * specific, written prior permission.  Keith Packard makes no
+ * representations about the suitability of this software for any purpose.  It
+ * is provided "as is" without express or implied warranty.
+ *
+ * KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "fb.h"
+
+FbBits
+fbReplicatePixel(Pixel p, int bpp)
+{
+	FbBits b = p;
+
+	b &= FbFullMask(bpp);
+	while (bpp < FB_UNIT) {
+		b |= b << bpp;
+		bpp <<= 1;
+	}
+	return b;
+}
+
+/*
+ * Stipple masks are independent of bit/byte order as long
+ * as bitorder == byteorder.  FB doesn't handle the case
+ * where these differ
+ */
+#define __mask(x,w)	((FB_ALLONES << ((x) & FB_MASK)) & \
+			 (FB_ALLONES >> ((FB_UNIT - ((x) + (w))) & FB_MASK)))
+#define _mask(x,w)	__mask((x)*(w),(w))
+#define mask(b,n,w)	((((b) >> (n)) & 1) * _mask(n,w))
+
+#define _C1(b,n,w) mask(b,n,w)
+#define _C2(b,n,w) (_C1(b,n,w) | _C1(b,n+1,w))
+#define _C4(b,n,w) (_C2(b,n,w) | _C2(b,n+2,w))
+#define C8(b,w) (_C4(b,0,w) | _C4(b,4,w))
+#define C4(b,w) _C4(b,0,w)
+#define C2(b,w) _C2(b,0,w)
+#define C1(b,w) _C1(b,0,w)
+
+static const FbBits fbStipple8Bits[256] = {
+	C8(0, 4), C8(1, 4), C8(2, 4), C8(3, 4), C8(4, 4), C8(5, 4),
+	C8(6, 4), C8(7, 4), C8(8, 4), C8(9, 4), C8(10, 4), C8(11, 4),
+	C8(12, 4), C8(13, 4), C8(14, 4), C8(15, 4), C8(16, 4), C8(17, 4),
+	C8(18, 4), C8(19, 4), C8(20, 4), C8(21, 4), C8(22, 4), C8(23, 4),
+	C8(24, 4), C8(25, 4), C8(26, 4), C8(27, 4), C8(28, 4), C8(29, 4),
+	C8(30, 4), C8(31, 4), C8(32, 4), C8(33, 4), C8(34, 4), C8(35, 4),
+	C8(36, 4), C8(37, 4), C8(38, 4), C8(39, 4), C8(40, 4), C8(41, 4),
+	C8(42, 4), C8(43, 4), C8(44, 4), C8(45, 4), C8(46, 4), C8(47, 4),
+	C8(48, 4), C8(49, 4), C8(50, 4), C8(51, 4), C8(52, 4), C8(53, 4),
+	C8(54, 4), C8(55, 4), C8(56, 4), C8(57, 4), C8(58, 4), C8(59, 4),
+	C8(60, 4), C8(61, 4), C8(62, 4), C8(63, 4), C8(64, 4), C8(65, 4),
+	C8(66, 4), C8(67, 4), C8(68, 4), C8(69, 4), C8(70, 4), C8(71, 4),
+	C8(72, 4), C8(73, 4), C8(74, 4), C8(75, 4), C8(76, 4), C8(77, 4),
+	C8(78, 4), C8(79, 4), C8(80, 4), C8(81, 4), C8(82, 4), C8(83, 4),
+	C8(84, 4), C8(85, 4), C8(86, 4), C8(87, 4), C8(88, 4), C8(89, 4),
+	C8(90, 4), C8(91, 4), C8(92, 4), C8(93, 4), C8(94, 4), C8(95, 4),
+	C8(96, 4), C8(97, 4), C8(98, 4), C8(99, 4), C8(100, 4), C8(101, 4),
+	C8(102, 4), C8(103, 4), C8(104, 4), C8(105, 4), C8(106, 4), C8(107, 4),
+	C8(108, 4), C8(109, 4), C8(110, 4), C8(111, 4), C8(112, 4), C8(113, 4),
+	C8(114, 4), C8(115, 4), C8(116, 4), C8(117, 4), C8(118, 4), C8(119, 4),
+	C8(120, 4), C8(121, 4), C8(122, 4), C8(123, 4), C8(124, 4), C8(125, 4),
+	C8(126, 4), C8(127, 4), C8(128, 4), C8(129, 4), C8(130, 4), C8(131, 4),
+	C8(132, 4), C8(133, 4), C8(134, 4), C8(135, 4), C8(136, 4), C8(137, 4),
+	C8(138, 4), C8(139, 4), C8(140, 4), C8(141, 4), C8(142, 4), C8(143, 4),
+	C8(144, 4), C8(145, 4), C8(146, 4), C8(147, 4), C8(148, 4), C8(149, 4),
+	C8(150, 4), C8(151, 4), C8(152, 4), C8(153, 4), C8(154, 4), C8(155, 4),
+	C8(156, 4), C8(157, 4), C8(158, 4), C8(159, 4), C8(160, 4), C8(161, 4),
+	C8(162, 4), C8(163, 4), C8(164, 4), C8(165, 4), C8(166, 4), C8(167, 4),
+	C8(168, 4), C8(169, 4), C8(170, 4), C8(171, 4), C8(172, 4), C8(173, 4),
+	C8(174, 4), C8(175, 4), C8(176, 4), C8(177, 4), C8(178, 4), C8(179, 4),
+	C8(180, 4), C8(181, 4), C8(182, 4), C8(183, 4), C8(184, 4), C8(185, 4),
+	C8(186, 4), C8(187, 4), C8(188, 4), C8(189, 4), C8(190, 4), C8(191, 4),
+	C8(192, 4), C8(193, 4), C8(194, 4), C8(195, 4), C8(196, 4), C8(197, 4),
+	C8(198, 4), C8(199, 4), C8(200, 4), C8(201, 4), C8(202, 4), C8(203, 4),
+	C8(204, 4), C8(205, 4), C8(206, 4), C8(207, 4), C8(208, 4), C8(209, 4),
+	C8(210, 4), C8(211, 4), C8(212, 4), C8(213, 4), C8(214, 4), C8(215, 4),
+	C8(216, 4), C8(217, 4), C8(218, 4), C8(219, 4), C8(220, 4), C8(221, 4),
+	C8(222, 4), C8(223, 4), C8(224, 4), C8(225, 4), C8(226, 4), C8(227, 4),
+	C8(228, 4), C8(229, 4), C8(230, 4), C8(231, 4), C8(232, 4), C8(233, 4),
+	C8(234, 4), C8(235, 4), C8(236, 4), C8(237, 4), C8(238, 4), C8(239, 4),
+	C8(240, 4), C8(241, 4), C8(242, 4), C8(243, 4), C8(244, 4), C8(245, 4),
+	C8(246, 4), C8(247, 4), C8(248, 4), C8(249, 4), C8(250, 4), C8(251, 4),
+	C8(252, 4), C8(253, 4), C8(254, 4), C8(255, 4),
+};
+
+static const FbBits fbStipple4Bits[16] = {
+	C4(0, 8), C4(1, 8), C4(2, 8), C4(3, 8), C4(4, 8), C4(5, 8),
+	C4(6, 8), C4(7, 8), C4(8, 8), C4(9, 8), C4(10, 8), C4(11, 8),
+	C4(12, 8), C4(13, 8), C4(14, 8), C4(15, 8),
+};
+
+static const FbBits fbStipple2Bits[4] = {
+	C2(0, 16), C2(1, 16), C2(2, 16), C2(3, 16),
+};
+
+static const FbBits fbStipple1Bits[2] = {
+	C1(0, 32), C1(1, 32),
+};
+const FbBits *const fbStippleTable[] = {
+	0,
+	fbStipple1Bits,
+	fbStipple2Bits,
+	0,
+	fbStipple4Bits,
+	0,
+	0,
+	0,
+	fbStipple8Bits,
+};
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3219e15..7f4c0bf 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -42,9 +42,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #endif
 
 #include <stdint.h>
-
 #include "compiler.h"
 
+#include <xorg-server.h>
+
 #include <xf86Crtc.h>
 #include <xf86str.h>
 #include <windowstr.h>
@@ -52,7 +53,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <picturestr.h>
 #include <gcstruct.h>
 
-#include <xorg-server.h>
 #include <pciaccess.h>
 
 #include <xf86drmMode.h>
@@ -67,8 +67,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <libudev.h>
 #endif
 
-#include "compiler.h"
-
 #define DBG(x)
 
 #define DEBUG_ALL (HAS_DEBUG_FULL || 0)
@@ -114,6 +112,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "kgem.h"
 #include "sna_damage.h"
 #include "sna_render.h"
+#include "fb/fb.h"
 
 #define SNA_CURSOR_X			64
 #define SNA_CURSOR_Y			SNA_CURSOR_X
@@ -150,18 +149,9 @@ struct sna_glyph {
 	uint16_t size, pos;
 };
 
-extern DevPrivateKeyRec sna_private_index;
-extern DevPrivateKeyRec sna_pixmap_index;
-extern DevPrivateKeyRec sna_gc_index;
-extern DevPrivateKeyRec sna_glyph_key;
-
 static inline PixmapPtr get_window_pixmap(WindowPtr window)
 {
-#if 0
-	return window->drawable.pScreen->GetWindowPixmap(window)
-#else
-	return *(void **)window->devPrivates;
-#endif
+	return fbGetWindowPixmap(window);
 }
 
 static inline PixmapPtr get_drawable_pixmap(DrawablePtr drawable)
@@ -259,7 +249,6 @@ struct sna {
 
 	ScreenBlockHandlerProcPtr BlockHandler;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
-	CloseScreenProcPtr CloseScreen;
 
 	PicturePtr clear;
 	struct {
@@ -565,8 +554,7 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 		pixmap->drawable.width * pixmap->drawable.bitsPerPixel/8;
 }
 
-Bool sna_accel_pre_init(struct sna *sna);
-Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
+bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
 void sna_accel_wakeup_handler(struct sna *sna);
 void sna_accel_watch_flush(struct sna *sna, int enable);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e553baf..099075b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -36,14 +36,15 @@
 #include <X11/fonts/font.h>
 #include <X11/fonts/fontstruct.h>
 
-#include <fb.h>
 #include <dixfontstr.h>
 
+#include <mi.h>
+#include <migc.h>
+#include <miline.h>
+#include <micmap.h>
 #ifdef RENDER
 #include <mipict.h>
-#include <fbpict.h>
 #endif
-#include <miline.h>
 #include <shmint.h>
 
 #include <sys/time.h>
@@ -113,11 +114,11 @@ static void __sna_fallback_flush(DrawablePtr d)
 	box.x2 = pixmap->drawable.width;
 	box.y2 = pixmap->drawable.height;
 
-	tmp = fbCreatePixmap(pixmap->drawable.pScreen,
-			     pixmap->drawable.width,
-			     pixmap->drawable.height,
-			     pixmap->drawable.depth,
-			     0);
+	tmp = sna_pixmap_create_unattached(pixmap->drawable.pScreen,
+					   pixmap->drawable.width,
+					   pixmap->drawable.height,
+					   pixmap->drawable.depth,
+					   0);
 
 	DBG(("%s: comparing with direct read...\n", __FUNCTION__));
 	sna_read_boxes(sna,
@@ -138,7 +139,7 @@ static void __sna_fallback_flush(DrawablePtr d)
 		src += pixmap->devKind;
 		dst += tmp->devKind;
 	}
-	fbDestroyPixmap(tmp);
+	tmp->drawable.pScreen->DestroyPixmap(tmp);
 }
 #define FALLBACK_FLUSH(d) __sna_fallback_flush(d)
 #else
@@ -187,6 +188,8 @@ static const uint8_t fill_ROP[] = {
 static const GCOps sna_gc_ops;
 static const GCOps sna_gc_ops__cpu;
 static GCOps sna_gc_ops__tmp;
+static const GCFuncs sna_gc_funcs;
+static const GCFuncs sna_gc_funcs__cpu;
 
 static inline void region_set(RegionRec *r, const BoxRec *b)
 {
@@ -471,9 +474,13 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		priv->pixmap->devPrivate.ptr = NULL;
 }
 
-static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
+static bool sna_destroy_private(PixmapPtr pixmap)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+
+	if (priv == NULL)
+		return true;
 
 	list_del(&priv->list);
 	list_del(&priv->inactive);
@@ -612,7 +619,11 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 
 static inline void sna_set_pixmap(PixmapPtr pixmap, struct sna_pixmap *sna)
 {
-	dixSetPrivate(&pixmap->devPrivates, &sna_pixmap_index, sna);
+#if 0
+	dixSetPrivate(&pixmap->devPrivates, &sna_private_index, sna);
+#else
+	((void **)pixmap->devPrivates)[1] = sna;
+#endif
 	assert(sna_pixmap(pixmap) == sna);
 }
 
@@ -672,17 +683,74 @@ bool sna_pixmap_attach_to_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	return true;
 }
 
-static inline PixmapPtr
+static int bits_per_pixel(int depth)
+{
+	switch (depth) {
+	case 1: return 1;
+	case 4:
+	case 8: return 8;
+	case 15:
+	case 16: return 16;
+	case 24:
+	case 30:
+	case 32: return 32;
+	default: return 0;
+	}
+}
+static PixmapPtr
 create_pixmap(struct sna *sna, ScreenPtr screen,
 	      int width, int height, int depth,
-	      unsigned usage)
+	      unsigned usage_hint)
 {
 	PixmapPtr pixmap;
+	size_t datasize;
+	size_t stride;
+	int base, bpp;
+
+	bpp = bits_per_pixel(depth);
+	if (bpp == 0)
+		return NullPixmap;
+
+	stride = ((width * bpp + FB_MASK) >> FB_SHIFT) * sizeof(FbBits);
+	if (stride / 4 > 32767 || height > 32767)
+		return NullPixmap;
+
+	datasize = height * stride;
+	base = screen->totalPixmapSize;
+	if (base & 15) {
+		int adjust = 16 - (base & 15);
+		base += adjust;
+		datasize += adjust;
+	}
 
-	pixmap = fbCreatePixmap(screen, width, height, depth, usage);
-	if (pixmap == NullPixmap)
+	pixmap = AllocatePixmap(screen, datasize);
+	if (!pixmap)
 		return NullPixmap;
 
+	((void **)pixmap->devPrivates)[0] = sna;
+
+	pixmap->drawable.type = DRAWABLE_PIXMAP;
+	pixmap->drawable.class = 0;
+	pixmap->drawable.pScreen = screen;
+	pixmap->drawable.depth = depth;
+	pixmap->drawable.bitsPerPixel = bpp;
+	pixmap->drawable.id = 0;
+	pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
+	pixmap->drawable.x = 0;
+	pixmap->drawable.y = 0;
+	pixmap->drawable.width = width;
+	pixmap->drawable.height = height;
+	pixmap->devKind = stride;
+	pixmap->refcnt = 1;
+	pixmap->devPrivate.ptr =  (char *)pixmap + base;
+
+#ifdef COMPOSITE
+	pixmap->screen_x = 0;
+	pixmap->screen_y = 0;
+#endif
+
+	pixmap->usage_hint = usage_hint;
+
 	DBG(("%s: serial=%ld, usage=%d, %dx%d\n",
 	     __FUNCTION__,
 	     pixmap->drawable.serialNumber,
@@ -690,8 +758,6 @@ create_pixmap(struct sna *sna, ScreenPtr screen,
 	     pixmap->drawable.width,
 	     pixmap->drawable.height));
 
-	assert(sna_private_index.offset == 0);
-	dixSetPrivate(&pixmap->devPrivates, &sna_private_index, sna);
 	return pixmap;
 }
 
@@ -701,7 +767,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 		      char *addr)
 {
 	struct sna *sna = to_sna_from_screen(screen);
-	int bpp = BitsPerPixel(depth);
+	int bpp = bits_per_pixel(depth);
 	int pitch = PixmapBytePad(width, depth);
 	struct sna_pixmap *priv;
 	PixmapPtr pixmap;
@@ -741,7 +807,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 
 		priv = sna_pixmap_attach(pixmap);
 		if (!priv) {
-			fbDestroyPixmap(pixmap);
+			FreePixmap(pixmap);
 			return NullPixmap;
 		}
 	}
@@ -749,7 +815,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 	priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false);
 	if (priv->cpu_bo == NULL) {
 		free(priv);
-		fbDestroyPixmap(pixmap);
+		FreePixmap(pixmap);
 		return GetScratchPixmapHeader(screen, width, height, depth,
 					      bpp, pitch, addr);
 	}
@@ -787,7 +853,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, tiling=%d)\n", __FUNCTION__,
 	     width, height, depth, tiling));
 
-	bpp = BitsPerPixel(depth);
+	bpp = bits_per_pixel(depth);
 	if (tiling == I915_TILING_Y && !sna->have_render)
 		tiling = I915_TILING_X;
 
@@ -833,7 +899,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 
 		priv = sna_pixmap_attach(pixmap);
 		if (!priv) {
-			fbDestroyPixmap(pixmap);
+			FreePixmap(pixmap);
 			return NullPixmap;
 		}
 	}
@@ -846,7 +912,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 				      CREATE_TEMPORARY);
 	if (priv->gpu_bo == NULL) {
 		free(priv);
-		fbDestroyPixmap(pixmap);
+		FreePixmap(pixmap);
 		return NullPixmap;
 	}
 
@@ -956,15 +1022,14 @@ fallback:
 
 static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 {
-	if (pixmap->refcnt == 1) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-		if (priv) {
-			if (!sna_destroy_private(pixmap, priv))
-				return TRUE;
-		}
-	}
+	if (--pixmap->refcnt)
+		return TRUE;
 
-	return fbDestroyPixmap(pixmap);
+	if (!sna_destroy_private(pixmap))
+		return TRUE;
+
+	FreePixmap(pixmap);
+	return TRUE;
 }
 
 static inline bool pixmap_inplace(struct sna *sna,
@@ -1991,9 +2056,9 @@ inline static unsigned drawable_gc_flags(DrawablePtr draw,
 		return MOVE_READ | MOVE_WRITE;
 	}
 
-	if (fbGetGCPrivate(gc)->and) {
+	if (fb_gc(gc)->and) {
 		DBG(("%s: read due to rop %d:%x\n",
-		     __FUNCTION__, gc->alu, (unsigned)fbGetGCPrivate(gc)->and));
+		     __FUNCTION__, gc->alu, (unsigned)fb_gc(gc)->and));
 		return MOVE_READ | MOVE_WRITE;
 	}
 
@@ -2425,7 +2490,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	struct sna *sna = to_sna_from_screen(screen);
 	PixmapPtr pixmap;
 	struct sna_pixmap *priv;
-	int bpp = BitsPerPixel(depth);
+	int bpp = bits_per_pixel(depth);
 	void *ptr;
 
 	DBG(("%s(%d, %d, %d, flags=%x)\n", __FUNCTION__,
@@ -2446,7 +2511,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 
 		priv = malloc(sizeof(*priv));
 		if (!priv) {
-			fbDestroyPixmap(pixmap);
+			FreePixmap(pixmap);
 			return NullPixmap;
 		}
 
@@ -2461,7 +2526,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 					     flags, &ptr);
 	if (!priv->gpu_bo) {
 		free(priv);
-		fbDestroyPixmap(pixmap);
+		FreePixmap(pixmap);
 		return NullPixmap;
 	}
 
@@ -2699,25 +2764,35 @@ active:
 
 static bool must_check sna_validate_pixmap(DrawablePtr draw, PixmapPtr pixmap)
 {
-	bool ret = true;
-
 	if (draw->bitsPerPixel == pixmap->drawable.bitsPerPixel &&
 	    FbEvenTile(pixmap->drawable.width *
 		       pixmap->drawable.bitsPerPixel)) {
 		DBG(("%s: flushing pixmap\n", __FUNCTION__));
-		ret = sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE);
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
+			return false;
+
+		fbPadPixmap(pixmap);
 	}
 
-	return ret;
+	return true;
 }
 
-static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
+static bool must_check sna_gc_move_to_cpu(GCPtr gc,
+					  DrawablePtr drawable,
+					  RegionPtr region)
 {
 	struct sna_gc *sgc = sna_gc(gc);
 	long changes = sgc->changes;
 
 	DBG(("%s, changes=%lx\n", __FUNCTION__, changes));
 
+	assert(gc->ops == (GCOps *)&sna_gc_ops);
+	assert(gc->funcs == (GCFuncs *)&sna_gc_funcs);
+
+	sgc->priv = region;
+	gc->ops = (GCOps *)&sna_gc_ops__cpu;
+	gc->funcs = (GCFuncs *)&sna_gc_funcs__cpu;
+
 	if (gc->clientClipType == CT_PIXMAP) {
 		PixmapPtr clip = gc->clientClip;
 		gc->clientClip = BitmapToRegion(gc->pScreen, clip);
@@ -2730,6 +2805,11 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
 	if (changes || drawable->serialNumber != sgc->serial) {
 		gc->serialNumber = sgc->serial;
 
+		if (fb_gc(gc)->bpp != drawable->bitsPerPixel) {
+			changes |= GCStipple | GCForeground | GCBackground | GCPlaneMask;
+			fb_gc(gc)->bpp = drawable->bitsPerPixel;
+		}
+
 		if (changes & GCTile && !gc->tileIsPixel) {
 			DBG(("%s: flushing tile pixmap\n", __FUNCTION__));
 			if (!sna_validate_pixmap(drawable, gc->tile.pixmap))
@@ -2738,7 +2818,7 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
 
 		if (changes & GCStipple && gc->stipple) {
 			DBG(("%s: flushing stipple pixmap\n", __FUNCTION__));
-			if (!sna_pixmap_move_to_cpu(gc->stipple, MOVE_READ))
+			if (!sna_validate_pixmap(drawable, gc->stipple))
 				return false;
 		}
 
@@ -2760,6 +2840,15 @@ static bool must_check sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
 	}
 }
 
+static void sna_gc_move_to_gpu(GCPtr gc)
+{
+	assert(gc->ops == (GCOps *)&sna_gc_ops__cpu);
+	assert(gc->funcs == (GCFuncs *)&sna_gc_funcs__cpu);
+
+	gc->ops = (GCOps *)&sna_gc_ops;
+	gc->funcs = (GCFuncs *)&sna_gc_funcs;
+}
+
 static inline bool clip_box(BoxPtr box, GCPtr gc)
 {
 	const BoxRec *clip;
@@ -3491,10 +3580,12 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (priv == NULL) {
 		DBG(("%s: fbPutImage, unattached(%d, %d, %d, %d)\n",
 		     __FUNCTION__, x, y, w, h));
-		if (sna_gc_move_to_cpu(gc, drawable))
+		if (sna_gc_move_to_cpu(gc, drawable, NULL)) {
 			fbPutImage(drawable, gc, depth,
 				   x, y, w, h, left,
 				   format, bits);
+			sna_gc_move_to_gpu(gc);
+		}
 		return;
 	}
 
@@ -3563,17 +3654,19 @@ fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
 	RegionTranslate(&region, -dx, -dy);
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable, gc,
 							       true)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
 	     __FUNCTION__, x, y, w, h));
 	fbPutImage(drawable, gc, depth, x, y, w, h, left, format, bits);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -4284,23 +4377,26 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			return NULL;
 
 		ret = NULL;
-		if (!sna_gc_move_to_cpu(gc, dst))
+		if (!sna_gc_move_to_cpu(gc, dst, &region))
 			goto out;
 
 		if (!sna_drawable_move_region_to_cpu(dst, &region, MOVE_READ | MOVE_WRITE))
-			goto out;
+			goto out_gc;
 
 		RegionTranslate(&region,
 				src_x - dst_x - dst->x + src->x,
 				src_y - dst_y - dst->y + src->y);
 		if (!sna_drawable_move_region_to_cpu(src, &region, MOVE_READ))
-			goto out;
+			goto out_gc;
 
-		ret = fbCopyArea(src, dst, gc,
-				  src_x, src_y,
-				  width, height,
-				  dst_x, dst_y);
+		ret = miDoCopy(src, dst, gc,
+			       src_x, src_y,
+			       width, height,
+			       dst_x, dst_y,
+			       fbCopyNtoN, 0, 0);
 		FALLBACK_FLUSH(dst);
+out_gc:
+		sna_gc_move_to_gpu(gc);
 out:
 		RegionUninit(&region);
 		return ret;
@@ -4355,79 +4451,6 @@ find_clip_box_for_y(const BoxRec *begin, const BoxRec *end, int16_t y)
 	return find_clip_box_for_y(mid, end, y);
 }
 
-static void
-sna_fill_spans__cpu(DrawablePtr drawable,
-		    GCPtr gc, int n,
-		    DDXPointPtr pt, int *width, int sorted)
-{
-	RegionRec *clip = sna_gc(gc)->priv;
-
-	DBG(("%s x %d\n", __FUNCTION__, n));
-
-	while (n--) {
-		BoxRec b;
-
-		DBG(("%s: (%d, %d) + %d\n",
-		     __FUNCTION__, pt->x, pt->y, *width));
-
-		*(DDXPointRec *)&b = *pt++;
-		b.x2 = b.x1 + *width++;
-		b.y2 = b.y1 + 1;
-
-		if (!box_intersect(&b, &clip->extents))
-			continue;
-
-		if (region_is_singular(clip)) {
-			DBG(("%s: singular fill: (%d, %d) x %d\n",
-			     __FUNCTION__, b.x1, b.y1, b.x2 - b.x1));
-			fbFill(drawable, gc, b.x1, b.y1, b.x2 - b.x1, 1);
-		} else {
-			const BoxRec * const clip_start = RegionBoxptr(clip);
-			const BoxRec * const clip_end = clip_start + clip->data->numRects;
-			const BoxRec *c;
-
-			DBG(("%s: multiple fills: (%d, %d) x %d, clip start((%d, %d), (%d,%d)), end((%d, %d), (%d, %d))\n",
-			     __FUNCTION__, b.x1, b.y1, b.x2 - b.x1,
-			     clip_start->x1, clip_start->y1,
-			     clip_start->x2, clip_start->y2,
-			     clip_end[-1].x1, clip_end[-1].y1,
-			     clip_end[-1].x2, clip_end[-1].y2));
-
-			c = find_clip_box_for_y(clip_start, clip_end, b.y1);
-			while (c != clip_end) {
-				int16_t x1, x2;
-
-				DBG(("%s: clip box? (%d, %d), (%d, %d)\n",
-				     __FUNCTION__,
-				     c->x1, c->y1, c->x2, c->y2));
-
-				if (b.y2 <= c->y1 || b.x2 <= c->x1)
-					break;
-
-				if (b.x1 > c->x2) {
-					c++;
-					continue;
-				}
-
-				x1 = c->x1;
-				x2 = c->x2;
-				c++;
-
-				if (x1 < b.x1)
-					x1 = b.x1;
-				if (x2 > b.x2)
-					x2 = b.x2;
-				if (x2 > x1) {
-					DBG(("%s: fbFill(%d, %d) x %d\n",
-					     __FUNCTION__, x1, b.y1, x2 - x1));
-					fbFill(drawable, gc,
-					       x1, b.y1, x2 - x1, 1);
-				}
-			}
-		}
-	}
-}
-
 struct sna_fill_spans {
 	struct sna *sna;
 	PixmapPtr pixmap;
@@ -5300,16 +5323,18 @@ fallback:
 	if (!RegionNotEmpty(&region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, n > 1)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fbFillSpans\n", __FUNCTION__));
 	fbFillSpans(drawable, gc, n, pt, width, sorted);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -5339,16 +5364,18 @@ fallback:
 	if (!RegionNotEmpty(&region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, true)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fbSetSpans\n", __FUNCTION__));
 	fbSetSpans(drawable, gc, src, pt, width, n, sorted);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -5822,16 +5849,21 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
-	if (!sna_gc_move_to_cpu(gc, dst))
+	if (!sna_gc_move_to_cpu(gc, dst, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(dst, &region,
 					     MOVE_READ | MOVE_WRITE))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fbCopyPlane(%d, %d, %d, %d, %d,%d) %x\n",
 	     __FUNCTION__, src_x, src_y, w, h, dst_x, dst_y, (unsigned)bit));
-	ret = fbCopyPlane(src, dst, gc, src_x, src_y, w, h, dst_x, dst_y, bit);
+	ret = miDoCopy(src, dst, gc,
+		       src_x, src_y, w, h, dst_x, dst_y,
+		       src->bitsPerPixel > 1 ? fbCopyNto1 : fbCopy1toN,
+		       bit, 0);
 	FALLBACK_FLUSH(dst);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 	return ret;
@@ -5894,7 +5926,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 			b = box;
 		} while (n);
 	} else {
-		RegionPtr clip = fbGetCompositeClip(gc);
+		RegionPtr clip = gc->pCompositeClip;
 
 		while (n--) {
 			int x, y;
@@ -5976,13 +6008,6 @@ sna_poly_point_extents(DrawablePtr drawable, GCPtr gc,
 }
 
 static void
-sna_poly_point__cpu(DrawablePtr drawable, GCPtr gc,
-	       int mode, int n, DDXPointPtr pt)
-{
-	fbPolyPoint(drawable, gc, mode, n, pt);
-}
-
-static void
 sna_poly_point(DrawablePtr drawable, GCPtr gc,
 	       int mode, int n, DDXPointPtr pt)
 {
@@ -6035,16 +6060,18 @@ fallback:
 	if (!RegionNotEmpty(&region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable, gc,
 							       n > 1)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
 	fbPolyPoint(drawable, gc, mode, n, pt);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -7058,34 +7085,23 @@ fallback:
 	if (!RegionNotEmpty(&data.region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable, gc,
 							       !(data.flags & 4 && n == 2))))
-		goto out;
-
-	/* Install FillSpans in case we hit a fallback path in fbPolyLine */
-	sna_gc(gc)->priv = &data.region;
-	assert(gc->ops == (GCOps *)&sna_gc_ops);
-	gc->ops = (GCOps *)&sna_gc_ops__cpu;
+		goto out_gc;
 
 	DBG(("%s: fbPolyLine\n", __FUNCTION__));
 	fbPolyLine(drawable, gc, mode, n, pt);
 	FALLBACK_FLUSH(drawable);
 
-	gc->ops = (GCOps *)&sna_gc_ops;
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&data.region);
 }
 
-static void
-sna_poly_line__cpu(DrawablePtr drawable, GCPtr gc,
-		   int mode, int n, DDXPointPtr pt)
-{
-	fbPolyLine(drawable, gc, mode, n, pt);
-}
-
 static inline void box_from_seg(BoxPtr b, xSegment *seg, GCPtr gc)
 {
 	if (seg->x1 == seg->x2) {
@@ -7913,23 +7929,19 @@ fallback:
 	if (!RegionNotEmpty(&data.region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable, gc,
 							       !(data.flags & 4 && n == 1))))
-		goto out;
-
-	/* Install FillSpans in case we hit a fallback path in fbPolySegment */
-	sna_gc(gc)->priv = &data.region;
-	assert(gc->ops == (GCOps *)&sna_gc_ops);
-	gc->ops = (GCOps *)&sna_gc_ops__cpu;
+		goto out_gc;
 
 	DBG(("%s: fbPolySegment\n", __FUNCTION__));
 	fbPolySegment(drawable, gc, n, seg);
 	FALLBACK_FLUSH(drawable);
 
-	gc->ops = (GCOps *)&sna_gc_ops;
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&data.region);
 }
@@ -8519,16 +8531,18 @@ fallback:
 	if (!RegionNotEmpty(&region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, true)))
 		goto out;
 
-	DBG(("%s: fbPolyRectangle\n", __FUNCTION__));
-	fbPolyRectangle(drawable, gc, n, r);
+	DBG(("%s: miPolyRectangle\n", __FUNCTION__));
+	miPolyRectangle(drawable, gc, n, r);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -8698,23 +8712,19 @@ fallback:
 	if (!RegionNotEmpty(&data.region))
 		return;
 
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable,
 							       gc, true)))
-		goto out;
-
-	/* Install FillSpans in case we hit a fallback path in fbPolyArc */
-	sna_gc(gc)->priv = &data.region;
-	assert(gc->ops == (GCOps *)&sna_gc_ops);
-	gc->ops = (GCOps *)&sna_gc_ops__cpu;
+		goto out_gc;
 
 	DBG(("%s -- fbPolyArc\n", __FUNCTION__));
 	fbPolyArc(drawable, gc, n, arc);
 	FALLBACK_FLUSH(drawable);
 
-	gc->ops = (GCOps *)&sna_gc_ops;
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&data.region);
 }
@@ -9050,21 +9060,18 @@ fallback:
 		return;
 	}
 
-	if (!sna_gc_move_to_cpu(gc, draw))
+	if (!sna_gc_move_to_cpu(gc, draw, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &data.region,
 					     drawable_gc_flags(draw, gc,
 							       true)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback -- miFillPolygon -> sna_fill_spans__cpu\n",
 	     __FUNCTION__));
-	sna_gc(gc)->priv = &data.region;
-	assert(gc->ops == (GCOps *)&sna_gc_ops);
-	gc->ops = (GCOps *)&sna_gc_ops__cpu;
-
 	miFillPolygon(draw, gc, shape, mode, n, pt);
-	gc->ops = (GCOps *)&sna_gc_ops;
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&data.region);
 }
@@ -10465,72 +10472,18 @@ fallback:
 		return;
 	}
 
-	if (!sna_gc_move_to_cpu(gc, draw))
+	if (!sna_gc_move_to_cpu(gc, draw, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &region,
 					     drawable_gc_flags(draw, gc,
 							       n > 1)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback - fbPolyFillRect\n", __FUNCTION__));
-	if (region.data == NULL) {
-		do {
-			BoxRec box;
-
-			box.x1 = rect->x + draw->x;
-			box.y1 = rect->y + draw->y;
-			box.x2 = bound(box.x1, rect->width);
-			box.y2 = bound(box.y1, rect->height);
-			rect++;
-
-			if (box_intersect(&box, &region.extents)) {
-				DBG(("%s: fallback - fbFill((%d, %d), (%d, %d))\n",
-				     __FUNCTION__,
-				     box.x1, box.y1,
-				     box.x2-box.x1, box.y2-box.y1));
-				fbFill(draw, gc,
-				       box.x1, box.y1,
-				       box.x2-box.x1, box.y2-box.y1);
-			}
-		} while (--n);
-	} else {
-		const BoxRec * const clip_start = RegionBoxptr(&region);
-		const BoxRec * const clip_end = clip_start + region.data->numRects;
-		const BoxRec *c;
-
-		do {
-			BoxRec box;
-
-			box.x1 = rect->x + draw->x;
-			box.y1 = rect->y + draw->y;
-			box.x2 = bound(box.x1, rect->width);
-			box.y2 = bound(box.y1, rect->height);
-			rect++;
-
-			c = find_clip_box_for_y(clip_start,
-						clip_end,
-						box.y1);
-
-			while (c != clip_end) {
-				BoxRec b;
-
-				if (box.y2 <= c->y1)
-					break;
-
-				b = box;
-				if (box_intersect(&b, c++)) {
-					DBG(("%s: fallback - fbFill((%d, %d), (%d, %d))\n",
-					     __FUNCTION__,
-					       b.x1, b.y1,
-					       b.x2-b.x1, b.y2-b.y1));
-					fbFill(draw, gc,
-					       b.x1, b.y1,
-					       b.x2-b.x1, b.y2-b.y1);
-				}
-			}
-		} while (--n);
-	}
+	fbPolyFillRect(draw, gc, n, rect);
 	FALLBACK_FLUSH(draw);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -10650,21 +10603,19 @@ fallback:
 		return;
 	}
 
-	if (!sna_gc_move_to_cpu(gc, draw))
+	if (!sna_gc_move_to_cpu(gc, draw, &data.region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(draw, &data.region,
 					     drawable_gc_flags(draw, gc,
 							       true)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback -- miPolyFillArc -> sna_fill_spans__cpu\n",
 	     __FUNCTION__));
-	sna_gc(gc)->priv = &data.region;
-	assert(gc->ops == (GCOps *)&sna_gc_ops);
-	gc->ops = (GCOps *)&sna_gc_ops__cpu;
 
 	miPolyFillArc(draw, gc, n, arc);
-	gc->ops = (GCOps *)&sna_gc_ops;
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&data.region);
 }
@@ -10682,6 +10633,8 @@ sna_realize_font(ScreenPtr screen, FontPtr font)
 {
 	struct sna_font *priv;
 
+	DBG(("%s (key=%d)\n", __FUNCTION__, sna_font_key));
+
 	priv = calloc(1, sizeof(struct sna_font));
 	if (priv == NULL)
 		return FALSE;
@@ -10700,6 +10653,8 @@ sna_unrealize_font(ScreenPtr screen, FontPtr font)
 	struct sna_font *priv = FontGetPrivate(font, sna_font_key);
 	int i, j;
 
+	DBG(("%s (key=%d)\n", __FUNCTION__, sna_font_key));
+
 	if (priv == NULL)
 		return TRUE;
 
@@ -11110,17 +11065,19 @@ force_fallback:
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
 
-		if (!sna_gc_move_to_cpu(gc, drawable))
+		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
 						     drawable_gc_flags(drawable,
 								       gc, true)))
-			goto out;
+			goto out_gc;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 		fbPolyGlyphBlt(drawable, gc, x, y, n,
 			       info, FONTGLYPHS(gc->font));
 		FALLBACK_FLUSH(drawable);
+out_gc:
+		sna_gc_move_to_gpu(gc);
 	}
 out:
 	RegionUninit(&region);
@@ -11203,16 +11160,18 @@ force_fallback:
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
 				     &n, info);
 
-		if (!sna_gc_move_to_cpu(gc, drawable))
+		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
 						     drawable_gc_flags(drawable, gc, true)))
-			goto out;
+			goto out_gc;
 
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 		fbPolyGlyphBlt(drawable, gc, x, y, n,
 			       info, FONTGLYPHS(gc->font));
 		FALLBACK_FLUSH(drawable);
+out_gc:
+		sna_gc_move_to_gpu(gc);
 	}
 out:
 	RegionUninit(&region);
@@ -11304,17 +11263,19 @@ force_fallback:
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
 				     Linear8Bit, &n, info);
 
-		if (!sna_gc_move_to_cpu(gc, drawable))
+		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
 						     drawable_gc_flags(drawable,
 								       gc, n > 1)))
-			goto out;
+			goto out_gc;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 		fbImageGlyphBlt(drawable, gc, x, y, n,
 				info, FONTGLYPHS(gc->font));
 		FALLBACK_FLUSH(drawable);
+out_gc:
+		sna_gc_move_to_gpu(gc);
 	}
 out:
 	RegionUninit(&region);
@@ -11399,17 +11360,19 @@ force_fallback:
 				     FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
 				     &n, info);
 
-		if (!sna_gc_move_to_cpu(gc, drawable))
+		if (!sna_gc_move_to_cpu(gc, drawable, &region))
 			goto out;
 		if (!sna_drawable_move_region_to_cpu(drawable, &region,
 						     drawable_gc_flags(drawable,
 								       gc, n > 1)))
-			goto out;
+			goto out_gc;
 
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 		fbImageGlyphBlt(drawable, gc, x, y, n,
 				info, FONTGLYPHS(gc->font));
 		FALLBACK_FLUSH(drawable);
+out_gc:
+		sna_gc_move_to_gpu(gc);
 	}
 out:
 	RegionUninit(&region);
@@ -11695,17 +11658,19 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, n > 1)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 	fbImageGlyphBlt(drawable, gc, x, y, n, info, base);
 	FALLBACK_FLUSH(drawable);
 
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -11774,17 +11739,19 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, true)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 	fbPolyGlyphBlt(drawable, gc, x, y, n, info, base);
 	FALLBACK_FLUSH(drawable);
 
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -11956,19 +11923,21 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
 	}
 
 	DBG(("%s: fallback\n", __FUNCTION__));
-	if (!sna_gc_move_to_cpu(gc, drawable))
+	if (!sna_gc_move_to_cpu(gc, drawable, &region))
 		goto out;
 	if (!sna_pixmap_move_to_cpu(bitmap, MOVE_READ))
-		goto out;
+		goto out_gc;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region,
 					     drawable_gc_flags(drawable,
 							       gc, false)))
-		goto out;
+		goto out_gc;
 
 	DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
 	     __FUNCTION__, w, h, x, y));
 	fbPushPixels(gc, bitmap, drawable, w, h, x, y);
 	FALLBACK_FLUSH(drawable);
+out_gc:
+	sna_gc_move_to_gpu(gc);
 out:
 	RegionUninit(&region);
 }
@@ -11997,26 +11966,26 @@ static const GCOps sna_gc_ops = {
 };
 
 static const GCOps sna_gc_ops__cpu = {
-	sna_fill_spans__cpu,
-	sna_set_spans,
-	sna_put_image,
-	sna_copy_area,
-	sna_copy_plane,
-	sna_poly_point__cpu,
-	sna_poly_line__cpu,
-	sna_poly_segment,
-	sna_poly_rectangle,
-	sna_poly_arc,
-	sna_poly_fill_polygon,
-	sna_poly_fill_rect,
-	sna_poly_fill_arc,
-	sna_poly_text8,
-	sna_poly_text16,
-	sna_image_text8,
-	sna_image_text16,
-	sna_image_glyph,
-	sna_poly_glyph,
-	sna_push_pixels,
+	fbFillSpans,
+	fbSetSpans,
+	fbPutImage,
+	fbCopyArea,
+	fbCopyPlane,
+	fbPolyPoint,
+	fbPolyLine,
+	fbPolySegment,
+	miPolyRectangle,
+	fbPolyArc,
+	miFillPolygon,
+	fbPolyFillRect,
+	miPolyFillArc,
+	miPolyText8,
+	miPolyText16,
+	miImageText8,
+	miImageText16,
+	fbImageGlyphBlt,
+	fbPolyGlyphBlt,
+	fbPushPixels
 };
 
 static GCOps sna_gc_ops__tmp = {
@@ -12065,10 +12034,22 @@ static const GCFuncs sna_gc_funcs = {
 	miCopyClip
 };
 
+static const GCFuncs sna_gc_funcs__cpu = {
+	fbValidateGC,
+	miChangeGC,
+	miCopyGC,
+	miDestroyGC,
+	miChangeClip,
+	miDestroyClip,
+	miCopyClip
+};
+
 static int sna_create_gc(GCPtr gc)
 {
-	if (!fbCreateGC(gc))
-		return FALSE;
+	gc->miTranslate = 1;
+	gc->fExpose = 1;
+
+	fb_gc(gc)->bpp = bits_per_pixel(gc->depth);
 
 	gc->funcs = (GCFuncs *)&sna_gc_funcs;
 	gc->ops = (GCOps *)&sna_gc_ops;
@@ -12084,6 +12065,9 @@ sna_get_image(DrawablePtr drawable,
 	RegionRec region;
 	unsigned int flags;
 
+	if (!fbDrawableEnabled(drawable))
+		return;
+
 	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	region.extents.x1 = x + drawable->x;
@@ -12126,6 +12110,9 @@ sna_get_spans(DrawablePtr drawable, int wMax,
 {
 	RegionRec region;
 
+	if (!fbDrawableEnabled(drawable))
+		return;
+
 	if (sna_spans_extents(drawable, NULL, n, pt, width, &region.extents) == 0)
 		return;
 
@@ -12145,13 +12132,8 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
 	int dx, dy;
 
 	DBG(("%s origin=(%d, %d)\n", __FUNCTION__, origin.x, origin.y));
-
-	if (wedged(sna)) {
-		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
-		if (sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
-			fbCopyWindow(win, origin, src);
+	if (!fbWindowEnabled(win))
 		return;
-	}
 
 	dx = origin.x - win->drawable.x;
 	dy = origin.y - win->drawable.y;
@@ -12164,8 +12146,17 @@ sna_copy_window(WindowPtr win, DDXPointRec origin, RegionPtr src)
 		RegionTranslate(&dst, -pixmap->screen_x, -pixmap->screen_y);
 #endif
 
-	miCopyRegion(&pixmap->drawable, &pixmap->drawable,
-		     NULL, &dst, dx, dy, sna_self_copy_boxes, 0, NULL);
+	if (wedged(sna)) {
+		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE))
+			return;
+
+		miCopyRegion(&pixmap->drawable, &pixmap->drawable,
+			     0, &dst, dx, dy, fbCopyNtoN, 0, NULL);
+	} else {
+		miCopyRegion(&pixmap->drawable, &pixmap->drawable,
+			     NULL, &dst, dx, dy, sna_self_copy_boxes, 0, NULL);
+	}
 
 	RegionUninit(&dst);
 }
@@ -12189,7 +12180,7 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 		ret &= sna_validate_pixmap(&win->drawable, win->border.pixmap);
 	}
 
-	return ret && fbChangeWindowAttributes(win, mask);
+	return ret;
 }
 
 static void
@@ -12585,20 +12576,112 @@ static void sna_accel_debug_memory(struct sna *sna)
 static void sna_accel_debug_memory(struct sna *sna) { }
 #endif
 
-Bool sna_accel_pre_init(struct sna *sna)
+static ShmFuncs shm_funcs = { sna_pixmap_create_shm, NULL };
+
+static PixmapPtr
+sna_get_window_pixmap(WindowPtr window)
+{
+	return get_window_pixmap(window);
+}
+
+static void
+sna_set_window_pixmap(WindowPtr window, PixmapPtr pixmap)
+{
+	*(PixmapPtr *)window->devPrivates = pixmap;
+}
+
+static Bool
+sna_create_window(WindowPtr win)
 {
+	sna_set_window_pixmap(win, win->drawable.pScreen->devPrivate);
 	return TRUE;
 }
 
-static ShmFuncs shm_funcs = { sna_pixmap_create_shm, NULL };
+static Bool
+sna_map_window(WindowPtr win)
+{
+	return TRUE;
+}
+
+static Bool
+sna_position_window(WindowPtr win, int x, int y)
+{
+	return TRUE;
+}
 
-Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
+static Bool
+sna_unmap_window(WindowPtr win)
+{
+	return TRUE;
+}
+
+static Bool
+sna_destroy_window(WindowPtr win)
+{
+	return TRUE;
+}
+
+static void
+sna_query_best_size(int class,
+		    unsigned short *width, unsigned short *height,
+		    ScreenPtr screen)
+{
+	unsigned short w;
+
+	switch (class) {
+	case CursorShape:
+		if (*width > screen->width)
+			*width = screen->width;
+		if (*height > screen->height)
+			*height = screen->height;
+		break;
+
+	case TileShape:
+	case StippleShape:
+		w = *width;
+		if ((w & (w - 1)) && w < FB_UNIT) {
+			for (w = 1; w < *width; w <<= 1)
+				;
+			*width = w;
+		}
+		break;
+	}
+}
+
+static void sna_store_colors(ColormapPtr cmap, int n, xColorItem *def)
+{
+}
+
+static bool sna_picture_init(ScreenPtr screen)
+{
+	PictureScreenPtr ps;
+
+	if (!miPictureInit(screen, NULL, 0))
+		return false;
+
+	ps = GetPictureScreen(screen);
+	assert(ps != NULL);
+
+	ps->Composite = sna_composite;
+	ps->CompositeRects = sna_composite_rectangles;
+	ps->Glyphs = sna_glyphs;
+	ps->UnrealizeGlyph = sna_glyph_unrealize;
+	ps->AddTraps = sna_add_traps;
+	ps->Trapezoids = sna_composite_trapezoids;
+	ps->Triangles = sna_composite_triangles;
+#if PICTURE_SCREEN_VERSION >= 2
+	ps->TriStrip = sna_composite_tristrip;
+	ps->TriFan = sna_composite_trifan;
+#endif
+
+	return true;
+}
+
+bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 {
 	const char *backend;
 
 	sna_font_key = AllocateFontPrivateIndex();
-	screen->RealizeFont = sna_realize_font;
-	screen->UnrealizeFont = sna_unrealize_font;
 
 	list_init(&sna->dirty_pixmaps);
 	list_init(&sna->active_pixmaps);
@@ -12611,35 +12694,53 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	sna->timer_expire[DEBUG_MEMORY_TIMER] = GetTimeInMillis()+ 10 * 1000;
 #endif
 
-	screen->CreateGC = sna_create_gc;
+	screen->defColormap = FakeClientID(0);
+	/* let CreateDefColormap do whatever it wants for pixels */
+	screen->blackPixel = screen->whitePixel = (Pixel) 0;
+	screen->QueryBestSize = sna_query_best_size;
+	assert(screen->GetImage == NULL);
 	screen->GetImage = sna_get_image;
+	assert(screen->GetSpans == NULL);
 	screen->GetSpans = sna_get_spans;
-	screen->CopyWindow = sna_copy_window;
+	assert(screen->CreateWindow == NULL);
+	screen->CreateWindow = sna_create_window;
+	assert(screen->DestroyWindow == NULL);
+	screen->DestroyWindow = sna_destroy_window;
+	screen->PositionWindow = sna_position_window;
 	screen->ChangeWindowAttributes = sna_change_window_attributes;
+	screen->RealizeWindow = sna_map_window;
+	screen->UnrealizeWindow = sna_unmap_window;
+	screen->CopyWindow = sna_copy_window;
+	assert(screen->CreatePixmap == NULL);
 	screen->CreatePixmap = sna_create_pixmap;
+	assert(screen->DestroyPixmap == NULL);
 	screen->DestroyPixmap = sna_destroy_pixmap;
-
-#ifdef RENDER
-	{
-		PictureScreenPtr ps = GetPictureScreenIfSet(screen);
-		if (ps) {
-			ps->Composite = sna_composite;
-			ps->CompositeRects = sna_composite_rectangles;
-			ps->Glyphs = sna_glyphs;
-			ps->UnrealizeGlyph = sna_glyph_unrealize;
-			ps->AddTraps = sna_add_traps;
-			ps->Trapezoids = sna_composite_trapezoids;
-			ps->Triangles = sna_composite_triangles;
-#if PICTURE_SCREEN_VERSION >= 2
-			ps->TriStrip = sna_composite_tristrip;
-			ps->TriFan = sna_composite_trifan;
-#endif
-		}
-	}
-#endif
+	screen->RealizeFont = sna_realize_font;
+	screen->UnrealizeFont = sna_unrealize_font;
+	assert(screen->CreateGC == NULL);
+	screen->CreateGC = sna_create_gc;
+	screen->CreateColormap = miInitializeColormap;
+	screen->DestroyColormap = (void (*)(ColormapPtr)) NoopDDA;
+	screen->InstallColormap = miInstallColormap;
+	screen->UninstallColormap = miUninstallColormap;
+	screen->ListInstalledColormaps = miListInstalledColormaps;
+	screen->ResolveColor = miResolveColor;
+	assert(screen->StoreColors = PictureStoreColors);
+	screen->StoreColors = sna_store_colors;
+	screen->BitmapToRegion = fbBitmapToRegion;
+
+	assert(screen->GetWindowPixmap == NULL);
+	screen->GetWindowPixmap = sna_get_window_pixmap;
+	assert(screen->SetWindowPixmap == NULL);
+	screen->SetWindowPixmap = sna_set_window_pixmap;
 
 	if (USE_SHM_VMAP && sna->kgem.has_vmap)
 		ShmRegisterFuncs(screen, &shm_funcs);
+	else
+		ShmRegisterFbFuncs(screen);
+
+	if (!sna_picture_init(screen))
+		return false;
 
 	backend = "no";
 	sna->have_render = false;
@@ -12718,9 +12819,9 @@ void sna_accel_close(struct sna *sna)
 	sna_glyphs_close(sna);
 
 	if (sna->freed_pixmap) {
-		assert(sna->freed_pixmap->refcnt == 1);
+		assert(sna->freed_pixmap->refcnt == 0);
 		free(sna_pixmap(sna->freed_pixmap));
-		fbDestroyPixmap(sna->freed_pixmap);
+		FreePixmap(sna->freed_pixmap);
 		sna->freed_pixmap = NULL;
 	}
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 7690afe..0b0471e 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -38,9 +38,6 @@
 #include "sna_reg.h"
 #include "rop.h"
 
-#include <mipict.h>
-#include <fbpict.h>
-
 #if DEBUG_BLT
 #undef DBG
 #define DBG(x) ErrorF x
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 4941477..606acb6 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -32,9 +32,9 @@
 #include "sna.h"
 #include "sna_render.h"
 #include "sna_render_inline.h"
+#include "fb/fbpict.h"
 
 #include <mipict.h>
-#include <fbpict.h>
 
 #if DEBUG_COMPOSITE
 #undef DBG
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0b1a494..7c1d3bc 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -45,11 +45,9 @@
 #include <xf86drm.h>
 #include <xf86DDC.h> /* for xf86InterpretEDID */
 
-#include <fb.h>
-#include <fbpict.h>
-
 #include "sna.h"
 #include "sna_reg.h"
+#include "fb/fbpict.h"
 
 #include "intel_options.h"
 
@@ -2651,7 +2649,8 @@ sna_crtc_redisplay__fallback(xf86CrtcPtr crtc, RegionPtr region)
 	if (ptr == NULL)
 		return;
 
-	pixmap = fbCreatePixmap(screen, 0, 0, sna->front->drawable.depth, 0);
+	pixmap = sna_pixmap_create_unattached(screen,
+					      0, 0, sna->front->drawable.depth);
 	if (pixmap == NullPixmap)
 		return;
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index eb4776b..d12c2b0 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -48,8 +48,10 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xf86cmap.h>
 #include <xf86drm.h>
 #include <xf86RandR12.h>
+#include <mi.h>
 #include <micmap.h>
-#include <fb.h>
+#include <mipict.h>
+#include <mibstore.h>
 
 #include "compiler.h"
 #include "sna.h"
@@ -78,11 +80,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define DBG(x) ErrorF x
 #endif
 
-DevPrivateKeyRec sna_private_index;
-DevPrivateKeyRec sna_pixmap_index;
-DevPrivateKeyRec sna_gc_index;
-DevPrivateKeyRec sna_glyph_key;
-DevPrivateKeyRec sna_glyph_image_key;
+static DevPrivateKeyRec sna_private_index;
+static DevPrivateKeyRec sna_pixmap_index;
+static DevPrivateKeyRec sna_gc_index;
+static DevPrivateKeyRec sna_glyph_key;
+static DevPrivateKeyRec sna_window_key;
 
 static Bool sna_enter_vt(VT_FUNC_ARGS_DECL);
 
@@ -556,16 +558,10 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	/* Set display resolution */
 	xf86SetDpi(scrn, 0, 0);
 
-	/* Load the required sub modules */
-	if (!xf86LoadSubModule(scrn, "fb")) {
-		PreInitCleanup(scrn);
-		return FALSE;
-	}
-
 	/* Load the dri2 module if requested. */
 	xf86LoadSubModule(scrn, "dri2");
 
-	return sna_accel_pre_init(sna);
+	return TRUE;
 }
 
 static void
@@ -753,6 +749,8 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
+	DepthPtr depths;
+	int d;
 
 	DBG(("%s\n", __FUNCTION__));
 
@@ -771,11 +769,12 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 
 	xf86_cursors_fini(screen);
 
-	/* XXX unhook devPrivate otherwise fbCloseScreen frees it! */
-	screen->devPrivate = NULL;
+	depths = screen->allowedDepths;
+	for (d = 0; d < screen->numDepths; d++)
+		free(depths[d].vids);
+	free(depths);
 
-	screen->CloseScreen = sna->CloseScreen;
-	(*screen->CloseScreen) (CLOSE_SCREEN_ARGS);
+	free(screen->visuals);
 
 	if (sna->directRenderingOpen) {
 		sna_dri_close(sna, screen);
@@ -812,7 +811,7 @@ sna_register_all_privates(void)
 	assert(sna_pixmap_index.offset == sizeof(void*));
 
 	if (!dixRegisterPrivateKey(&sna_gc_index, PRIVATE_GC,
-				   sizeof(struct sna_gc)))
+				   sizeof(FbGCPrivate)))
 		return FALSE;
 	assert(sna_gc_index.offset == 0);
 
@@ -821,6 +820,11 @@ sna_register_all_privates(void)
 		return FALSE;
 	assert(sna_glyph_key.offset == 0);
 
+	if (!dixRegisterPrivateKey(&sna_window_key,
+				   PRIVATE_WINDOW, 0))
+		return FALSE;
+	assert(sna_window_key.offset == 0);
+
 	return TRUE;
 }
 
@@ -835,7 +839,12 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
-	VisualPtr visual;
+	VisualPtr visuals;
+	DepthPtr depths;
+	int nvisuals;
+	int ndepths;
+	int rootdepth;
+	VisualID defaultVisual;
 
 	DBG(("%s\n", __FUNCTION__));
 
@@ -852,16 +861,23 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	if (!miSetPixmapDepths())
 		return FALSE;
 
-	if (!fbScreenInit(screen, NULL,
+	rootdepth = 0;
+	if (!miInitVisuals(&visuals, &depths, &nvisuals, &ndepths, &rootdepth,
+			   &defaultVisual,
+			   ((unsigned long)1 << (scrn->bitsPerPixel - 1)),
+			   8, -1))
+		return FALSE;
+
+	if (!miScreenInit(screen, NULL,
 			  scrn->virtualX, scrn->virtualY,
-			  scrn->xDpi, scrn->yDpi,
-			  scrn->displayWidth, scrn->bitsPerPixel))
+			  scrn->xDpi, scrn->yDpi, 0,
+			  rootdepth, ndepths, depths,
+			  defaultVisual, nvisuals, visuals))
 		return FALSE;
-	assert(fbGetWinPrivateKey()->offset == 0);
 
 	if (scrn->bitsPerPixel > 8) {
 		/* Fixup RGB ordering */
-		visual = screen->visuals + screen->numVisuals;
+		VisualPtr visual = screen->visuals + screen->numVisuals;
 		while (--visual >= screen->visuals) {
 			if ((visual->class | DynamicClass) == DirectColor) {
 				visual->offsetRed = scrn->offset.red;
@@ -874,16 +890,16 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 		}
 	}
 
-	fbPictureInit(screen, NULL, 0);
-
-	xf86SetBlackWhitePixels(screen);
-
+	assert(screen->CloseScreen == NULL);
+	screen->CloseScreen = sna_close_screen;
 	if (!sna_accel_init(screen, sna)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "Hardware acceleration initialization failed\n");
 		return FALSE;
 	}
 
+	xf86SetBlackWhitePixels(screen);
+
 	miInitializeBackingStore(screen);
 	xf86SetBackingStore(screen);
 	xf86SetSilkenMouse(screen);
@@ -914,8 +930,6 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	screen->WakeupHandler = sna_wakeup_handler;
 
 	screen->SaveScreen = xf86SaveScreen;
-	sna->CloseScreen = screen->CloseScreen;
-	screen->CloseScreen = sna_close_screen;
 	screen->CreateScreenResources = sna_create_screen_resources;
 
 	if (!xf86CrtcScreenInit(screen))
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index d9a3dcf..0179520 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -65,10 +65,9 @@
 #include "sna.h"
 #include "sna_render.h"
 #include "sna_render_inline.h"
+#include "fb/fbpict.h"
 
 #include <mipict.h>
-#include <fbpict.h>
-#include <fb.h>
 
 #if DEBUG_GLYPHS
 #undef DBG
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index c22965c..f6a562b 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -28,8 +28,7 @@
 #include "sna.h"
 #include "sna_render.h"
 #include "sna_render_inline.h"
-
-#include <fb.h>
+#include "fb/fbpict.h"
 
 #if DEBUG_RENDER
 #undef DBG
@@ -108,7 +107,7 @@ static Bool
 no_render_copy_boxes(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		     PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		     const BoxRec *box, int n)
+		     const BoxRec *box, int n, unsigned flags)
 {
 	DBG(("%s (n=%d)\n", __FUNCTION__, n));
 
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index b20eceb..ae14d79 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -31,8 +31,7 @@
 
 #include "sna.h"
 #include "sna_render.h"
-
-#include <fbpict.h>
+#include "fb/fbpict.h"
 
 #if DEBUG_RENDER
 #undef DBG
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index d83c083..56c6a3e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -34,10 +34,9 @@
 #include "sna.h"
 #include "sna_render.h"
 #include "sna_render_inline.h"
+#include "fb/fbpict.h"
 
-#include <fb.h>
 #include <mipict.h>
-#include <fbpict.h>
 
 #if DEBUG_TRAPEZOIDS
 #undef DBG
@@ -5213,9 +5212,8 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	DBG(("%s: mask (%dx%d), dx=(%d, %d)\n",
 	     __FUNCTION__, extents.x2, extents.y2, dx, dy));
-	scratch = fbCreatePixmap(screen,
-				 extents.x2, extents.y2, 8,
-				 CREATE_PIXMAP_USAGE_SCRATCH);
+	scratch = sna_pixmap_create_unattached(screen,
+					       extents.x2, extents.y2, 8);
 	if (!scratch)
 		return true;
 
@@ -5882,8 +5880,17 @@ sna_add_traps(PicturePtr picture, INT16 x, INT16 y, int n, xTrap *t)
 
 	DBG(("%s -- fallback\n", __FUNCTION__));
 	if (sna_drawable_move_to_cpu(picture->pDrawable,
-				     MOVE_READ | MOVE_WRITE))
-		fbAddTraps(picture, x, y, n, t);
+				     MOVE_READ | MOVE_WRITE)) {
+		pixman_image_t *image;
+		int dx, dy;
+
+		if (!(image = image_from_pict(picture, FALSE, &dx, &dy)))
+			return;
+
+		pixman_add_traps(image, x + dx, y + dy, n, (pixman_trap_t *)t);
+
+		free_pixman_pict(picture, image);
+	}
 }
 
 static inline void
commit 5d2f88fd9972c62c87098ddc7fee7b6f0cea0fdb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 10:07:51 2012 +0100

    sna: Add a couple of DBG flags to control upload buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2ace6ca..2578ff9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -60,6 +60,8 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG_NO_LLC 0
 #define DBG_NO_SEMAPHORES 0
 #define DBG_NO_MADV 0
+#define DBG_NO_UPLOAD_CACHE 0
+#define DBG_NO_UPLOAD_ACTIVE 0
 #define DBG_NO_MAP_UPLOAD 0
 #define DBG_NO_RELAXED_FENCING 0
 #define DBG_DUMP 0
@@ -1682,7 +1684,8 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 		if (bo->mmapped) {
 			assert(!bo->need_io);
-			if (bo->used + PAGE_SIZE <= bytes(&bo->base) &&
+			if (!DBG_NO_UPLOAD_ACTIVE &&
+			    bo->used + PAGE_SIZE <= bytes(&bo->base) &&
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
@@ -3793,6 +3796,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (kgem->has_llc)
 		flags &= ~KGEM_BUFFER_INPLACE;
 
+#if !DBG_NO_UPLOAD_CACHE
 	list_for_each_entry(bo, &kgem->batch_partials, base.list) {
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
@@ -3863,6 +3867,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 	}
+#endif
 
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
commit 18b3090381ac0fb61df96a7c6b2f12aacf180ee8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jul 7 00:06:36 2012 +0100

    sna: Fixup USE_INPLACE debug hint
    
    After we enabled inplace readback, we can just apply the INPLACE hint to
    all fallbacks, but instead apply it to the automatic testing for a
    potential inplace operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 794086c..e553baf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1065,7 +1065,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 	     priv->gpu_bo ? priv->gpu_bo->handle : 0,
 	     priv->gpu_damage, priv->cpu_damage, priv->clear));
 
-	if ((flags & MOVE_READ) == 0) {
+	if (USE_INPLACE && (flags & MOVE_READ) == 0) {
 		assert(flags & MOVE_WRITE);
 		DBG(("%s: no readbck, discarding gpu damage [%d], pending clear[%d]\n",
 		     __FUNCTION__, priv->gpu_damage != NULL, priv->clear));
@@ -1485,7 +1485,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 	}
 
-	if ((flags & MOVE_READ) == 0) {
+	if (USE_INPLACE && (flags & MOVE_READ) == 0) {
 		DBG(("%s: no read, checking to see if we can stream the write into the GPU bo\n",
 		     __FUNCTION__));
 		assert(flags & MOVE_WRITE);
@@ -2001,8 +2001,6 @@ inline static unsigned drawable_gc_flags(DrawablePtr draw,
 	     __FUNCTION__, drawable_gc_inplace_hint(draw, gc)));
 
 	flags = MOVE_WRITE;
-	if (USE_INPLACE)
-		flags |= MOVE_INPLACE_HINT;
 	if (read) {
 		DBG(("%s: partial write\n", __FUNCTION__));
 		flags |= MOVE_READ;
commit 24b59a8955eb15522ad334a541530aca937fcf07
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 6 14:16:36 2012 +0100

    sna: Refactor a common function: is_clipped()
    
    Had I done this earlier, I would not have the bug in the open-coded
    version. Le sigh.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index bda3cb8..3219e15 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -703,4 +703,12 @@ inline static bool is_power_of_two(unsigned x)
 	return (x & (x-1)) == 0;
 }
 
+inline static bool is_clipped(const RegionRec *r,
+			      const DrawableRec *d)
+{
+	return (r->data ||
+		r->extents.x2 - r->extents.x1 != d->width ||
+		r->extents.y2 - r->extents.y1 != d->height);
+}
+
 #endif /* _SNA_H */
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 36b40a7..e1b5de6 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -522,9 +522,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	if (draw->type != DRAWABLE_PIXMAP) {
 		WindowPtr win = (WindowPtr)draw;
 
-		if (win->clipList.data ||
-		    win->clipList.extents.x2 - win->clipList.extents.x1 != draw->width ||
-		    win->clipList.extents.y2 - win->clipList.extents.y1 != draw->height) {
+		if (is_clipped(&win->clipList, draw)) {
 			DBG(("%s: draw=(%d, %d), delta=(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
 			     __FUNCTION__, draw->x, draw->y,
 			     get_drawable_dx(draw), get_drawable_dy(draw),
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index d4ace9d..d9a3dcf 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -471,9 +471,7 @@ glyphs_to_dst(struct sna *sna,
 	     __FUNCTION__, op, src_x, src_y, nlist,
 	     list->xOff, list->yOff, dst->pDrawable->x, dst->pDrawable->y));
 
-	if (dst->pCompositeClip->extents.x2 - dst->pCompositeClip->extents.x1 < dst->pDrawable->width ||
-	    dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1 < dst->pDrawable->height ||
-	    dst->pCompositeClip->data) {
+	if (is_clipped(dst->pCompositeClip, dst->pDrawable)) {
 		rects = REGION_RECTS(dst->pCompositeClip);
 		nrect = REGION_NUM_RECTS(dst->pCompositeClip);
 	} else
commit 68b357454af705f1ed3a9599435a402f2611a180
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 6 10:58:06 2012 +0100

    sna: Clear garbage from the new front buffer when resizing
    
    Avoid displaying a buffer filled with random junk when resizing (and
    thereby creating a new) framebuffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index fb38c25..0b1a494 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -525,6 +525,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	int i, ret = FALSE;
 
 	DBG(("%s\n", __FUNCTION__));
+	kgem_bo_submit(&sna->kgem, sna_crtc->bo);
 
 	assert(xf86_config->num_output < ARRAY_SIZE(output_ids));
 
@@ -788,15 +789,15 @@ void sna_copy_fbcon(struct sna *sna)
 
 	sx = dx = 0;
 	if (box.x2 < (uint16_t)fbcon.width)
-		sx = (fbcon.width - box.x2) / 2.;
+		sx = (fbcon.width - box.x2) / 2;
 	if (box.x2 < sna->front->drawable.width)
-		dx = (sna->front->drawable.width - box.x2) / 2.;
+		dx = (sna->front->drawable.width - box.x2) / 2;
 
 	sy = dy = 0;
 	if (box.y2 < (uint16_t)fbcon.height)
-		sy = (fbcon.height - box.y2) / 2.;
+		sy = (fbcon.height - box.y2) / 2;
 	if (box.y2 < sna->front->drawable.height)
-		dy = (sna->front->drawable.height - box.y2) / 2.;
+		dy = (sna->front->drawable.height - box.y2) / 2;
 
 	ok = sna->render.copy_boxes(sna, GXcopy,
 				    scratch, bo, sx, sy,
@@ -2122,6 +2123,62 @@ sna_redirect_screen_pixmap(ScrnInfoPtr scrn, PixmapPtr old, PixmapPtr new)
 	screen->SetScreenPixmap(new);
 }
 
+static void copy_front(struct sna *sna, PixmapPtr old, PixmapPtr new)
+{
+	struct sna_pixmap *old_priv, *new_priv;
+	int16_t sx, sy, dx, dy;
+	BoxRec box;
+
+	DBG(("%s\n", __FUNCTION__));
+
+	if (wedged(sna))
+		return;
+
+	old_priv = sna_pixmap_force_to_gpu(old, MOVE_READ);
+	if (!old_priv)
+		return;
+
+	new_priv = sna_pixmap_force_to_gpu(new, MOVE_WRITE);
+	if (!new_priv)
+		return;
+
+	box.x1 = box.y1 = 0;
+	box.x2 = min(old->drawable.width, new->drawable.width);
+	box.y2 = min(old->drawable.height, new->drawable.height);
+
+	sx = dx = 0;
+	if (box.x2 < old->drawable.width)
+		sx = (old->drawable.width - box.x2) / 2;
+	if (box.x2 < new->drawable.width)
+		dx = (new->drawable.width - box.x2) / 2;
+
+	sy = dy = 0;
+	if (box.y2 < old->drawable.height)
+		sy = (old->drawable.height - box.y2) / 2;
+	if (box.y2 < new->drawable.height)
+		dy = (new->drawable.height - box.y2) / 2;
+
+	DBG(("%s: copying box (%dx%d) from (%d, %d) to (%d, %d)\n",
+	     __FUNCTION__, box.x2, box.y2, sx, sy, dx, dy));
+
+	if (box.x2 != new->drawable.width || box.y2 != new->drawable.height) {
+		(void)sna->render.fill_one(sna, new, new_priv->gpu_bo, 0,
+					   0, 0,
+					   new->drawable.width,
+					   new->drawable.height,
+					   GXclear);
+	}
+	(void)sna->render.copy_boxes(sna, GXcopy,
+				     old, old_priv->gpu_bo, sx, sy,
+				     new, new_priv->gpu_bo, dx, dy,
+				     &box, 1, 0);
+
+	if (!DAMAGE_IS_ALL(new_priv->gpu_damage))
+		sna_damage_all(&new_priv->gpu_damage,
+			       new->drawable.width,
+			       new->drawable.height);
+}
+
 static Bool
 sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 {
@@ -2157,6 +2214,8 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	assert(sna->mode.shadow_damage == NULL);
 	assert(sna->mode.shadow == NULL);
 
+	copy_front(sna, sna->front, new_front);
+
 	sna->front = new_front;
 	scrn->virtualX = width;
 	scrn->virtualY = height;
commit 5784e0f21dc91f33c99a507105a0695cc53d6574
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 6 09:42:37 2012 +0100

    Allow matching against any device supported by drm/i915
    
    However we cannot enable acceleration if we do not recognise its
    hardware layout or instruction set.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 77611f5..f2770d6 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -417,6 +417,9 @@ static Bool has_relaxed_fencing(struct intel_screen_private *intel)
 
 static Bool can_accelerate_blt(struct intel_screen_private *intel)
 {
+	if (INTEL_INFO(intel)->gen == 0)
+		return FALSE;
+
 	if (0 && (IS_I830(intel) || IS_845G(intel))) {
 		/* These pair of i8xx chipsets have a crippling erratum
 		 * that prevents the use of a PTE entry by the BLT
diff --git a/src/intel_module.c b/src/intel_module.c
index af82cff..f8ba149 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -48,6 +48,10 @@
 
 static struct intel_device_info *chipset_info;
 
+static const struct intel_device_info intel_generic_info = {
+	.gen = 0,
+};
+
 static const struct intel_device_info intel_i81x_info = {
 	.gen = 10,
 };
@@ -215,6 +219,7 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
 
+	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
 	{ 0, 0, 0 },
 };
 
@@ -225,6 +230,7 @@ void intel_detect_chipset(ScrnInfoPtr scrn,
 	int i;
 
 	chipset->info = chipset_info;
+	chipset->name = NULL;
 
 	for (i = 0; intel_chipsets[i].name != NULL; i++) {
 		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
@@ -232,12 +238,14 @@ void intel_detect_chipset(ScrnInfoPtr scrn,
 			break;
 		}
 	}
-	if (intel_chipsets[i].name == NULL) {
-		chipset->name = "unknown chipset";
+	if (chipset->name == NULL) {
+		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
+		chipset->name = "unknown";
+	} else {
+		xf86DrvMsg(scrn->scrnIndex, X_INFO,
+			   "Integrated Graphics Chipset: Intel(R) %s\n",
+			   chipset->name);
 	}
-
-	xf86DrvMsg(scrn->scrnIndex, X_INFO,
-		   "Integrated Graphics Chipset: Intel(R) %s\n", chipset->name);
 }
 
 /*
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index abae21a..2ace6ca 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -543,7 +543,10 @@ static struct list *active(struct kgem *kgem, int num_pages, int tiling)
 static size_t
 agp_aperture_size(struct pci_device *dev, int gen)
 {
-	return dev->regions[gen < 30 ? 0 : 2].size;
+	/* XXX assume that only future chipsets are unknown and follow
+	 * the post gen2 PCI layout.
+	 */
+	return dev->regions[(gen && gen < 30) ? 0 : 2].size;
 }
 
 static size_t
commit 1ee10cc3b2aa0888753eeb25c7fde7296a3c92eb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jul 6 00:01:58 2012 +0100

    Make the detection of broken pre-production hardware verbose
    
    These SDV should have been returned to the manufacturer long ago and
    replaced with real systems. So if they are still in use, add a gentle
    reminder.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index f1bb10d..77611f5 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -432,6 +432,21 @@ static Bool can_accelerate_blt(struct intel_screen_private *intel)
 		return FALSE;
 	}
 
+	if (INTEL_INFO(intel)->gen == 60) {
+		struct pci_device *const device = intel->PciInfo;
+
+		/* Sandybridge rev07 locks up easily, even with the
+		 * BLT ring workaround in place.
+		 * Thus use shadowfb by default.
+		 */
+		if (device->revision < 8) {
+			xf86DrvMsg(intel->scrn->scrnIndex, X_WARNING,
+				   "Disabling hardware acceleration on this pre-production hardware.\n");
+
+			return FALSE;
+		}
+	}
+
 	if (INTEL_INFO(intel)->gen >= 60) {
 		drm_i915_getparam_t gp;
 		int value;
@@ -445,17 +460,6 @@ static Bool can_accelerate_blt(struct intel_screen_private *intel)
 			return FALSE;
 	}
 
-	if (INTEL_INFO(intel)->gen == 60) {
-		struct pci_device *const device = intel->PciInfo;
-
-		/* Sandybridge rev07 locks up easily, even with the
-		 * BLT ring workaround in place.
-		 * Thus use shadowfb by default.
-		 */
-		if (device->revision < 8)
-		    return FALSE;
-	}
-
 	return TRUE;
 }
 
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ed1d700..abae21a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -625,17 +625,23 @@ static bool __kgem_throttle(struct kgem *kgem)
 	return errno == EIO;
 }
 
-static bool is_hw_supported(struct kgem *kgem)
+static bool is_hw_supported(struct kgem *kgem,
+			    struct pci_device *dev)
 {
 	if (DBG_NO_HW)
 		return false;
 
-	if (kgem->gen >= 60) /* Only if the kernel supports the BLT ring */
-		return gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
-
 	if (kgem->gen <= 20) /* dynamic GTT is fubar */
 		return false;
 
+	if (kgem->gen == 60 && dev->revision < 8) {
+		/* pre-production SNB with dysfunctional BLT */
+		return false;
+	}
+
+	if (kgem->gen >= 60) /* Only if the kernel supports the BLT ring */
+		return gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+
 	return true;
 }
 
@@ -663,6 +669,12 @@ static bool test_has_cache_level(struct kgem *kgem)
 #endif
 }
 
+static int kgem_get_screen_index(struct kgem *kgem)
+{
+	struct sna *sna = container_of(kgem, struct sna, kgem);
+	return sna->scrn->scrnIndex;
+}
+
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -674,8 +686,15 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->fd = fd;
 	kgem->gen = gen;
-	kgem->wedged = __kgem_throttle(kgem);
-	kgem->wedged |= !is_hw_supported(kgem);
+	if (!is_hw_supported(kgem, dev)) {
+		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
+			   "Detected unsupported/dysfunctional hardware, disabling acceleration.\n");
+		kgem->wedged = 1;
+	} else if (__kgem_throttle(kgem)) {
+		xf86DrvMsg(kgem_get_screen_index(kgem), X_WARNING,
+			   "Detected a hung GPU, disabling acceleration.\n");
+		kgem->wedged = 1;
+	}
 
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
@@ -2072,10 +2091,9 @@ void kgem_throttle(struct kgem *kgem)
 	kgem->wedged |= __kgem_throttle(kgem);
 	DBG(("%s: wedged=%d\n", __FUNCTION__, kgem->wedged));
 	if (kgem->wedged && !warned) {
-		struct sna *sna = container_of(kgem, struct sna, kgem);
-		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+		xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
 			   "Detected a hung GPU, disabling acceleration.\n");
-		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+		xf86DrvMsg(kgem_get_screen_index(kgem), X_ERROR,
 			   "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
 		warned = 1;
 	}
commit cd2dd3016e0834d1636aa96511608022a4cdbcd1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 19:58:54 2012 +0100

    sna: Fix clipping of glyphs-to-dst for partially obscurred windows
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 3d24ea4..d4ace9d 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -472,7 +472,8 @@ glyphs_to_dst(struct sna *sna,
 	     list->xOff, list->yOff, dst->pDrawable->x, dst->pDrawable->y));
 
 	if (dst->pCompositeClip->extents.x2 - dst->pCompositeClip->extents.x1 < dst->pDrawable->width ||
-	    dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1 < dst->pDrawable->height) {
+	    dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1 < dst->pDrawable->height ||
+	    dst->pCompositeClip->data) {
 		rects = REGION_RECTS(dst->pCompositeClip);
 		nrect = REGION_NUM_RECTS(dst->pCompositeClip);
 	} else
commit 0f086acb259d7732560c5d0d642308de028a4445
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 12:26:18 2012 +0100

    sna: Fallback to attaching a shadow fb if we fail to setup the crtc
    
    As we've chosen to fix the kernel to handle CRTC offsets > 4096, drop
    the automatic workaround. However, allow the user to force creation of
    PerCrtcPixmaps for the purpose of debugging (and to workaround the bug
    in older kernels) and to fallback to trying a shadow fb if the setcrtc
    fails with the composite fb.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index d8455f9..2e112f9 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -20,9 +20,10 @@ const OptionInfoRec intel_options[] = {
 #endif
 #ifdef USE_SNA
 	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	1},
-	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	0},
+	{OPTION_ZAPHOD,		"ZaphodHeads",	OPTV_STRING,	{0},	0},
 	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	1},
-	{OPTION_TEAR_FREE,	"TearFree",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_TEAR_FREE,	"TearFree",	OPTV_BOOLEAN,	{0},	0},
+	{OPTION_CRTC_PIXMAPS,	"PerCrtcPixmaps", OPTV_BOOLEAN,	{0},	0},
 #endif
 #ifdef USE_UXA
 	{OPTION_FALLBACKDEBUG,	"FallbackDebug",OPTV_BOOLEAN,	{0},	0},
diff --git a/src/intel_options.h b/src/intel_options.h
index c3e4999..8d0312c 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -29,6 +29,7 @@ enum intel_options {
 	OPTION_ZAPHOD,
 	OPTION_DELAYED_FLUSH,
 	OPTION_TEAR_FREE,
+	OPTION_CRTC_PIXMAPS,
 #endif
 #ifdef USE_UXA
 	OPTION_FALLBACKDEBUG,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 218a882..bda3cb8 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -213,6 +213,7 @@ struct sna {
 #define SNA_NO_WAIT		0x4
 #define SNA_NO_FLIP		0x8
 #define SNA_TEAR_FREE		0x10
+#define SNA_FORCE_SHADOW	0x20
 
 	unsigned watch_flush;
 	unsigned flush;
@@ -232,7 +233,6 @@ struct sna {
 
 	struct sna_mode {
 		drmModeResPtr kmode;
-		int max_tile_offset;
 
 		int shadow_active;
 		DamagePtr shadow_damage;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f59eed5..fb38c25 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -69,6 +69,7 @@ struct sna_crtc {
 	struct kgem_bo *bo;
 	uint32_t cursor;
 	bool shadow;
+	bool fallback_shadow;
 	uint8_t id;
 	uint8_t pipe;
 	uint8_t plane;
@@ -540,9 +541,6 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		output_count++;
 	}
 
-	crtc->funcs->gamma_set(crtc, crtc->gamma_red, crtc->gamma_green,
-			       crtc->gamma_blue, crtc->gamma_size);
-
 	VG_CLEAR(arg);
 	arg.crtc_id = sna_crtc->id;
 	arg.fb_id = fb_id(sna_crtc->bo);
@@ -558,12 +556,6 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	arg.mode = sna_crtc->kmode;
 	arg.mode_valid = 1;
 
-	xf86DrvMsg(crtc->scrn->scrnIndex, X_INFO,
-		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
-		   sna_crtc->kmode.hdisplay,
-		   sna_crtc->kmode.vdisplay,
-		   sna_crtc->id, sna_crtc->pipe);
-
 	DBG(("%s: applying crtc [%d] mode=%dx%d+%d+%d@%d, fb=%d%s update to %d outputs\n",
 	     __FUNCTION__, sna_crtc->id,
 	     arg.mode.hdisplay,
@@ -575,14 +567,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     output_count));
 
 	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
-	if (ret) {
-		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
-			   "failed to set mode: %s\n", strerror(errno));
+	if (ret)
 		return FALSE;
-	}
-
-	if (crtc->scrn->pScreen)
-		xf86_reload_cursors(crtc->scrn->pScreen);
 
 	sna_crtc_force_outputs_on(crtc);
 	return TRUE;
@@ -649,6 +635,7 @@ static bool sna_crtc_enable_shadow(struct sna *sna, struct sna_crtc *crtc)
 
 static void sna_crtc_disable_shadow(struct sna *sna, struct sna_crtc *crtc)
 {
+	crtc->fallback_shadow = false;
 	if (!crtc->shadow)
 		return;
 
@@ -856,6 +843,16 @@ static bool use_shadow(struct sna *sna, xf86CrtcPtr crtc)
 
 	assert(sna->scrn->virtualX && sna->scrn->virtualY);
 
+	if (sna->flags & SNA_FORCE_SHADOW) {
+		DBG(("%s: forcing shadow\n", __FUNCTION__));
+		return true;
+	}
+
+	if (to_sna_crtc(crtc)->fallback_shadow) {
+		DBG(("%s: fallback shadow\n", __FUNCTION__));
+		return true;
+	}
+
 	if (sna->scrn->virtualX > sna->mode.kmode->max_width ||
 	    sna->scrn->virtualY > sna->mode.kmode->max_height) {
 		DBG(("%s: framebuffer too large (%dx%d) > (%dx%d)\n",
@@ -866,14 +863,6 @@ static bool use_shadow(struct sna *sna, xf86CrtcPtr crtc)
 		return true;
 	}
 
-	if (crtc->x >= sna->mode.max_tile_offset ||
-	    crtc->y >= sna->mode.max_tile_offset) {
-		DBG(("%s: offset too large (%d, %d) >= %d\n",
-		    __FUNCTION__,
-		    crtc->x, crtc->y, sna->mode.max_tile_offset));
-		return true;
-	}
-
 	transform = NULL;
 	if (crtc->transformPresent)
 		transform = &crtc->transform;
@@ -1084,6 +1073,11 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	struct kgem_bo *saved_bo, *bo;
 	struct drm_mode_modeinfo saved_kmode;
 
+	xf86DrvMsg(crtc->scrn->scrnIndex, X_INFO,
+		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
+		   mode->HDisplay, mode->VDisplay,
+		   sna_crtc->id, sna_crtc->pipe);
+
 	DBG(("%s(crtc=%d [pipe=%d] rotation=%d, x=%d, y=%d, mode=%dx%d@%d)\n",
 	     __FUNCTION__, sna_crtc->id, sna_crtc->pipe, rotation, x, y,
 	     mode->HDisplay, mode->VDisplay, mode->Clock));
@@ -1091,20 +1085,36 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	assert(mode->HDisplay <= sna->mode.kmode->max_width &&
 	       mode->VDisplay <= sna->mode.kmode->max_height);
 
-	/* Attach per-crtc pixmap or direct */
+	crtc->funcs->gamma_set(crtc,
+			       crtc->gamma_red,
+			       crtc->gamma_green,
+			       crtc->gamma_blue,
+			       crtc->gamma_size);
+
+	saved_kmode = sna_crtc->kmode;
+	saved_bo = sna_crtc->bo;
+
+	sna_crtc->fallback_shadow = false;
+retry: /* Attach per-crtc pixmap or direct */
 	bo = sna_crtc_attach(crtc);
 	if (bo == NULL)
 		return FALSE;
 
-	saved_kmode = sna_crtc->kmode;
-	saved_bo = sna_crtc->bo;
 	sna_crtc->bo = bo;
 	mode_to_kmode(&sna_crtc->kmode, mode);
-
 	if (!sna_crtc_apply(crtc)) {
+		kgem_bo_destroy(&sna->kgem, bo);
+
+		if (!sna_crtc->shadow) {
+			sna_crtc->fallback_shadow = true;
+			goto retry;
+		}
+
+		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
+			   "failed to set mode: %s\n", strerror(errno));
+
 		sna_crtc->bo = saved_bo;
 		sna_crtc->kmode = saved_kmode;
-		kgem_bo_destroy(&sna->kgem, bo);
 		return FALSE;
 	}
 	if (saved_bo)
@@ -1116,6 +1126,9 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	if (sna_crtc->shadow)
 		sna_crtc_damage(crtc);
 
+	if (scrn->pScreen)
+		xf86_reload_cursors(scrn->pScreen);
+
 	return TRUE;
 }
 
@@ -2287,7 +2300,6 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 			   "failed to get resources: %s\n", strerror(errno));
 		return FALSE;
 	}
-	mode->max_tile_offset = 4096;
 
 	set_size_range(sna);
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 6476d1a..eb4776b 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -518,6 +518,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 			sna->flags |= SNA_TEAR_FREE;
 	} else
 		sna->flags |= SNA_NO_FLIP;
+	if (xf86ReturnOptValBool(sna->Options, OPTION_CRTC_PIXMAPS, FALSE))
+		sna->flags |= SNA_FORCE_SHADOW;
 
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Framebuffer %s\n",
 		   sna->tiling & SNA_TILING_FB ? "tiled" : "linear");
@@ -531,6 +533,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		   sna->flags & SNA_NO_DELAYED_FLUSH ? "dis" : "en");
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "\"Tear free\" %sabled\n",
 		   sna->flags & SNA_TEAR_FREE ? "en" : "dis");
+	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Forcing per-crtc-pixmaps? %s\n",
+		   sna->flags & SNA_FORCE_SHADOW ? "yes" : "no");
 
 	if (!sna_mode_pre_init(scrn, sna)) {
 		PreInitCleanup(scrn);
commit 7e8060f837475c85cc061ba4a5388140cd227613
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 03:31:12 2012 +0100

    sna: Do not force GPU allocation if CPU bo is already busy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 13bd978..794086c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2260,8 +2260,8 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
-		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
-			DBG(("%s: damaged with a small operation, will not force allocation\n",
+		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+			DBG(("%s: already using CPU bo, will not force allocation\n",
 			     __FUNCTION__));
 			goto use_cpu_bo;
 		}
@@ -2271,6 +2271,12 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
+		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
+			DBG(("%s: damaged with a small operation, will not force allocation\n",
+			     __FUNCTION__));
+			goto use_cpu_bo;
+		}
+
 		flags = MOVE_WRITE | MOVE_READ;
 		if (prefer_gpu & FORCE_GPU)
 			flags |= __MOVE_FORCE;
commit c32bb286dc9a489232030f6abe9076411fbcecfd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 03:18:12 2012 +0100

    sna: Make sure damage is flushed to the CPU bo before use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 46a9180..13bd978 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1267,7 +1267,7 @@ done:
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
-	priv->cpu = true;
+	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -1898,7 +1898,7 @@ out:
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
-	priv->cpu = true;
+	priv->cpu = (flags & MOVE_ASYNC_HINT) == 0;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -2211,7 +2211,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	BoxRec extents;
+	RegionRec region;
 	int16_t dx, dy;
 	int ret;
 
@@ -2267,8 +2267,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		}
 
 		if (priv->cpu_damage && prefer_gpu == 0) {
-			DBG(("%s: prefer cpu",
-			     __FUNCTION__));
+			DBG(("%s: prefer cpu", __FUNCTION__));
 			goto use_cpu_bo;
 		}
 
@@ -2284,19 +2283,20 @@ sna_drawable_use_bo(DrawablePtr drawable,
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
-	extents = *box;
-	extents.x1 += dx;
-	extents.x2 += dx;
-	extents.y1 += dy;
-	extents.y2 += dy;
+	region.extents = *box;
+	region.extents.x1 += dx;
+	region.extents.x2 += dx;
+	region.extents.y1 += dy;
+	region.extents.y2 += dy;
 
 	DBG(("%s extents (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     extents.x1, extents.y1, extents.x2, extents.y2));
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
 	if (priv->gpu_damage) {
 		if (!priv->cpu_damage) {
 			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
-							       &extents)) {
+							       &region.extents)) {
 				DBG(("%s: region wholly contained within GPU damage\n",
 				     __FUNCTION__));
 				goto use_gpu_bo;
@@ -2307,7 +2307,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			}
 		}
 
-		ret = sna_damage_contains_box(priv->gpu_damage, &extents);
+		ret = sna_damage_contains_box(priv->gpu_damage, &region.extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within GPU damage\n",
 			     __FUNCTION__));
@@ -2322,7 +2322,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->cpu_damage) {
-		ret = sna_damage_contains_box(priv->cpu_damage, &extents);
+		ret = sna_damage_contains_box(priv->cpu_damage, &region.extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within CPU damage\n",
 			     __FUNCTION__));
@@ -2342,7 +2342,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 move_to_gpu:
-	if (!sna_pixmap_move_area_to_gpu(pixmap, &extents,
+	if (!sna_pixmap_move_area_to_gpu(pixmap, &region.extents,
 					 MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: failed to move-to-gpu, fallback\n", __FUNCTION__));
 		assert(priv->gpu_bo == NULL);
@@ -2379,29 +2379,26 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
-	if (priv->cpu_bo->sync && !kgem_bo_is_busy(priv->cpu_bo))
+	if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 		return NULL;
 
-	/* Continue to use the shadow pixmap once mapped */
-	if (pixmap->devPrivate.ptr) {
-		/* But only if we do not need to sync the CPU bo */
-		if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
-			return NULL;
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 
-		/* Both CPU and GPU are busy, prefer to use the GPU */
-		if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)) {
-			get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	region.extents = *box;
+	region.extents.x1 += dx;
+	region.extents.x2 += dx;
+	region.extents.y1 += dy;
+	region.extents.y2 += dy;
+	region.data = NULL;
 
-			extents = *box;
-			extents.x1 += dx;
-			extents.x2 += dx;
-			extents.y1 += dy;
-			extents.y2 += dy;
-			goto move_to_gpu;
-		}
+	/* Both CPU and GPU are busy, prefer to use the GPU */
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		goto move_to_gpu;
 
-		priv->mapped = false;
-		pixmap->devPrivate.ptr = NULL;
+	if (!sna_drawable_move_region_to_cpu(&pixmap->drawable, &region,
+					     MOVE_READ | MOVE_ASYNC_HINT)) {
+		DBG(("%s: failed to move-to-cpu, fallback\n", __FUNCTION__));
+		return NULL;
 	}
 
 	if (sna_damage_is_all(&priv->cpu_damage,
commit d46cc00b3cd903bfaf37ad7d4a60676c4b346983
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jul 5 02:19:26 2012 +0100

    sna/dri: Assert that our pixmaps sizes are invariant
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index a10efc7..36b40a7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -223,11 +223,11 @@ sna_dri_create_buffer(DrawablePtr drawable,
 					dri_drawable_type, NULL, DixWriteAccess);
 		if (buffer) {
 			private = get_private(buffer);
-			if (private->pixmap == pixmap &&
-			    private->width  == pixmap->drawable.width &&
-			    private->height == pixmap->drawable.height)  {
+			if (private->pixmap == pixmap) {
 				DBG(("%s: reusing front buffer attachment\n",
 				     __FUNCTION__));
+				assert(private->width  == pixmap->drawable.width);
+				assert(private->height == pixmap->drawable.height);
 				private->refcnt++;
 				return buffer;
 			}
commit 3c1f58fe45e8f4f18c45733f862ca1f290dde84d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 23:03:44 2012 +0100

    sna: drop an unused ret var
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a54a80a..ed1d700 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3531,12 +3531,10 @@ retry:
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_gem_flink flink;
-	int ret;
 
 	VG_CLEAR(flink);
 	flink.handle = bo->handle;
-	ret = drmIoctl(kgem->fd, DRM_IOCTL_GEM_FLINK, &flink);
-	if (ret)
+	if (drmIoctl(kgem->fd, DRM_IOCTL_GEM_FLINK, &flink))
 		return 0;
 
 	DBG(("%s: flinked handle=%d to name=%d, marking non-reusable\n",
commit a69a47f94037698a360f8b8611f6f3e58a20ca87
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 22:15:45 2012 +0100

    sna: Fix detection of EIO through throttle
    
    When using drmIoctl, one needs to check the errno for the actual error
    code.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 7f8c89e..a54a80a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -619,7 +619,10 @@ static bool semaphores_enabled(void)
 
 static bool __kgem_throttle(struct kgem *kgem)
 {
-	return drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == -EIO;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == 0)
+		return false;
+
+	return errno == EIO;
 }
 
 static bool is_hw_supported(struct kgem *kgem)
@@ -2067,6 +2070,7 @@ void kgem_throttle(struct kgem *kgem)
 	static int warned;
 
 	kgem->wedged |= __kgem_throttle(kgem);
+	DBG(("%s: wedged=%d\n", __FUNCTION__, kgem->wedged));
 	if (kgem->wedged && !warned) {
 		struct sna *sna = container_of(kgem, struct sna, kgem);
 		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
commit f2fdb0da83d6208c884ffde2bb30d0a67fc75217
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 21:43:22 2012 +0100

    sna: Use throttle() to confirm a GPU hang
    
    This has two effects. The first is that we confirm that the EIO was
    indeed a GPU hang and not some other form of I/O failure (for example
    swapin). And the second is that it means we also print the warning to
    look for an i915_error_state when we detect a GPU hang.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 944b8f2..7f8c89e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1604,7 +1604,7 @@ static void kgem_commit(struct kgem *kgem)
 		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
 		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain)) {
 			DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
-			kgem->wedged = 1;
+			kgem_throttle(kgem);
 		}
 
 		kgem_retire(kgem);
@@ -1983,7 +1983,7 @@ void _kgem_submit(struct kgem *kgem)
 			}
 			if (ret == -1 && (errno == EIO || errno == EBUSY)) {
 				DBG(("%s: GPU hang detected\n", __FUNCTION__));
-				kgem->wedged = 1;
+				kgem_throttle(kgem);
 				ret = 0;
 			}
 #if !NDEBUG
@@ -2047,7 +2047,7 @@ void _kgem_submit(struct kgem *kgem)
 				ret = drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 				if (ret == -1) {
 					DBG(("%s: sync: GPU hang detected\n", __FUNCTION__));
-					kgem->wedged = 1;
+					kgem_throttle(kgem);
 				}
 			}
 		}
commit b8c2b34e39c7f2ee2597f8727b9b98c6aac93fb1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 19:53:27 2012 +0100

    sna: Simplify timer execution
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 8564cfc..218a882 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -217,11 +217,9 @@ struct sna {
 	unsigned watch_flush;
 	unsigned flush;
 
-	OsTimerPtr timer;
+	struct timeval timer_tv;
 	uint32_t timer_expire[NUM_TIMERS];
 	uint16_t timer_active;
-	uint16_t timer_ready;
-	struct timeval timer_tv;
 
 	int vblank_interval;
 
@@ -570,7 +568,7 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 Bool sna_accel_pre_init(struct sna *sna);
 Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
-void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready);
+void sna_accel_wakeup_handler(struct sna *sna);
 void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1f17c00..46a9180 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3719,8 +3719,6 @@ fallback:
 static int
 source_prefer_gpu(struct sna_pixmap *priv)
 {
-	unsigned flags;
-
 	if (priv == NULL) {
 		DBG(("%s: source unattached, use cpu\n", __FUNCTION__));
 		return 0;
@@ -12246,7 +12244,6 @@ static void sna_accel_disarm_timer(struct sna *sna, int id)
 {
 	DBG(("%s[%d] (time=%ld)\n", __FUNCTION__, id, (long)TIME));
 	sna->timer_active &= ~(1<<id);
-	sna->timer_ready &= ~(1<<id);
 }
 
 static bool has_shadow(struct sna *sna)
@@ -12302,8 +12299,10 @@ static bool sna_accel_do_flush(struct sna *sna)
 		return true;
 
 	if (sna->timer_active & (1<<(FLUSH_TIMER))) {
-		DBG(("%s: flush timer active\n", __FUNCTION__));
-		if (sna->timer_ready & (1<<(FLUSH_TIMER))) {
+		int32_t delta = sna->timer_expire[FLUSH_TIMER] - TIME;
+		DBG(("%s: flush timer active: delta=%d\n",
+		     __FUNCTION__, delta));
+		if (delta <= 3) {
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			sna->timer_expire[FLUSH_TIMER] =
 				TIME + sna->vblank_interval;
@@ -12314,7 +12313,6 @@ static bool sna_accel_do_flush(struct sna *sna)
 			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
-			sna->timer_ready |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
 				TIME + sna->vblank_interval / 2;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
@@ -12330,7 +12328,8 @@ static bool sna_accel_do_throttle(struct sna *sna)
 		return false;
 
 	if (sna->timer_active & (1<<(THROTTLE_TIMER))) {
-		if (sna->timer_ready & (1<<(THROTTLE_TIMER))) {
+		int32_t delta = sna->timer_expire[THROTTLE_TIMER] - TIME;
+		if (delta <= 3) {
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			sna->timer_expire[THROTTLE_TIMER] = TIME + 20;
 			return true;
@@ -12341,7 +12340,6 @@ static bool sna_accel_do_throttle(struct sna *sna)
 		} else {
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 			sna->timer_active |= 1 << THROTTLE_TIMER;
-			sna->timer_ready |= 1 << THROTTLE_TIMER;
 			sna->timer_expire[THROTTLE_TIMER] = TIME + 20;
 		}
 	}
@@ -12352,7 +12350,8 @@ static bool sna_accel_do_throttle(struct sna *sna)
 static bool sna_accel_do_expire(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
-		if (sna->timer_ready & (1<<(EXPIRE_TIMER))) {
+		int32_t delta = sna->timer_expire[EXPIRE_TIMER] - TIME;
+		if (delta <= 3) {
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			sna->timer_expire[EXPIRE_TIMER] =
 				TIME + MAX_INACTIVE_TIME * 1000;
@@ -12361,7 +12360,6 @@ static bool sna_accel_do_expire(struct sna *sna)
 	} else {
 		if (sna->kgem.need_expire) {
 			sna->timer_active |= 1 << EXPIRE_TIMER;
-			sna->timer_ready |= 1 << EXPIRE_TIMER;
 			sna->timer_expire[EXPIRE_TIMER] =
 				TIME + MAX_INACTIVE_TIME * 1000;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
@@ -12374,7 +12372,8 @@ static bool sna_accel_do_expire(struct sna *sna)
 static bool sna_accel_do_inactive(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(INACTIVE_TIMER))) {
-		if (sna->timer_ready & (1<<(INACTIVE_TIMER))) {
+		int32_t delta = sna->timer_expire[INACTIVE_TIMER] - TIME;
+		if (delta <= 3) {
 			sna->timer_expire[INACTIVE_TIMER] =
 				TIME + 120 * 1000;
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
@@ -12383,7 +12382,6 @@ static bool sna_accel_do_inactive(struct sna *sna)
 	} else {
 		if (!list_is_empty(&sna->active_pixmaps)) {
 			sna->timer_active |= 1 << INACTIVE_TIMER;
-			sna->timer_ready |= 1 << INACTIVE_TIMER;
 			sna->timer_expire[INACTIVE_TIMER] =
 				TIME + 120 * 1000;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
@@ -12393,33 +12391,25 @@ static bool sna_accel_do_inactive(struct sna *sna)
 	return false;
 }
 
-static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
+static int32_t sna_timeout(struct sna *sna)
 {
-	struct sna *sna = arg;
-	int32_t next = 0;
-	uint32_t active;
+	int32_t now = TIME, next = 0;
 	int i;
 
-	DBG(("%s: now=%d, active=%08x, ready=%08x\n",
-	     __FUNCTION__, (int)now, sna->timer_active, sna->timer_ready));
-	active = sna->timer_active & ~sna->timer_ready;
-	if (active == 0)
-		return 0;
-
+	DBG(("%s: now=%d, active=%08x\n",
+	     __FUNCTION__, (int)now, sna->timer_active));
 	for (i = 0; i < NUM_TIMERS; i++) {
-		if (active & (1 << i)) {
+		if (sna->timer_active & (1 << i)) {
 			int32_t delta = sna->timer_expire[i] - now;
 			DBG(("%s: timer[%d] expires in %d [%d]\n",
 			     __FUNCTION__, i, delta, sna->timer_expire[i]));
-			if (delta <= 3)
-				sna->timer_ready |= 1 << i;
-			else if (next == 0 || delta < next)
+			if (next == 0 || delta < next)
 				next = delta;
 		}
 	}
 
-	DBG(("%s: active=%08x, ready=%08x, next=+%d\n",
-	     __FUNCTION__, sna->timer_active, sna->timer_ready, next));
+	DBG(("%s: active=%08x, next=+%d\n",
+	     __FUNCTION__, sna->timer_active, next));
 	return next;
 }
 
@@ -12596,7 +12586,6 @@ static void sna_accel_debug_memory(struct sna *sna) { }
 
 Bool sna_accel_pre_init(struct sna *sna)
 {
-	sna->timer = TimerSet(NULL, 0, 0, sna_timeout, sna);
 	return TRUE;
 }
 
@@ -12741,6 +12730,8 @@ void sna_accel_close(struct sna *sna)
 
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
+	UpdateCurrentTimeIf();
+
 	if (sna->kgem.nbatch && kgem_is_idle(&sna->kgem)) {
 		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
 		_kgem_submit(&sna->kgem);
@@ -12776,14 +12767,12 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 		sna->watch_flush = 0;
 	}
 
-	if (sna->timer_ready) {
+	if (sna->timer_active) {
 		int32_t timeout;
 
-		DBG(("%s: evaluating timers, ready=%x\n",
-		     __FUNCTION__, sna->timer_ready));
-		sna->timer_ready = 0;
-		timeout = sna_timeout(sna->timer, TIME, sna);
-		TimerSet(sna->timer, 0, timeout, sna_timeout, sna);
+		DBG(("%s: evaluating timers, active=%x\n",
+		     __FUNCTION__, sna->timer_active));
+		timeout = sna_timeout(sna);
 		if (timeout) {
 			if (*tv == NULL) {
 				*tv = &sna->timer_tv;
@@ -12798,9 +12787,10 @@ set_tv:
 	}
 }
 
-void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
+void sna_accel_wakeup_handler(struct sna *sna)
 {
 	DBG(("%s\n", __FUNCTION__));
+
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
 	if (!sna->mode.shadow_active && !sna->kgem.need_retire) {
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index f7eeca5..6476d1a 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -596,7 +596,7 @@ sna_wakeup_handler(WAKEUPHANDLER_ARGS_DECL)
 
 	sna->WakeupHandler(WAKEUPHANDLER_ARGS);
 
-	sna_accel_wakeup_handler(sna, read_mask);
+	sna_accel_wakeup_handler(sna);
 
 	if (FD_ISSET(sna->kgem.fd, (fd_set*)read_mask))
 		sna_mode_wakeup(sna);
commit 844ab84c64fde38db315787e16a9f7f674d0f88c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 17:58:27 2012 +0100

    sna: Fix leak around ENOMEM error path in sna_copy_boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1919b2e..1f17c00 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3974,7 +3974,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						       src->depth,
 						       KGEM_BUFFER_WRITE_INPLACE);
 			if (tmp == NullPixmap)
-				return;
+				goto out;
 
 			dx = -region.extents.x1;
 			dy = -region.extents.y1;
commit 54ee41bf05e328ed2f2cd2a64c21afe3efe2556a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 17:40:58 2012 +0100

    sna: Pass a hint to the backends when using copy_boxes for readback
    
    If we expect to only emit this set of copy_boxes() and then submit the
    batch, we would prefer to use the BLT for its lower latency.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 5fd995b..b41c386 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2409,7 +2409,7 @@ static inline Bool prefer_blt_fill(struct sna *sna)
 #endif
 }
 
-static inline Bool prefer_blt_copy(struct sna *sna)
+static inline Bool prefer_blt_copy(struct sna *sna, unsigned flags)
 {
 #if PREFER_BLT_COPY
 	return true;
@@ -2864,7 +2864,7 @@ static Bool
 gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
@@ -2882,7 +2882,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %d)->(%d, %d) x %d\n",
 	     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy, n));
 
-	if (prefer_blt_copy(sna) &&
+	if (prefer_blt_copy(sna, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3048,7 +3048,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 #endif
 
 	/* Prefer to use the BLT */
-	if (prefer_blt_copy(sna) &&
+	if (prefer_blt_copy(sna, 0) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 63bbd76..cfc8c63 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -4040,7 +4040,7 @@ static Bool
 gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c3a8204..c577536 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2576,11 +2576,22 @@ gen4_render_copy_one(struct sna *sna,
 	FLUSH(op);
 }
 
+static inline bool prefer_blt_copy(struct sna *sna, unsigned flags)
+{
+#if PREFER_BLT
+	return true;
+	(void)sna;
+#else
+	return sna->kgem.mode != KGEM_RENDER;
+#endif
+	(void)flags;
+}
+
 static Bool
 gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
@@ -2597,7 +2608,7 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 				  box, n);
 #endif
 
-	if (prefer_blt(sna) &&
+	if (prefer_blt_copy(sna, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 46a37a0..49cc17e 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2909,7 +2909,7 @@ static Bool
 gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 81fad35..778a072 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3238,9 +3238,11 @@ static bool prefer_blt_bo(struct sna *sna,
 
 static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
-				   PixmapPtr dst, struct kgem_bo *dst_bo)
+				   PixmapPtr dst, struct kgem_bo *dst_bo,
+				   unsigned flags)
 {
 	return (sna->kgem.ring == KGEM_BLT ||
+		(flags & COPY_LAST && sna->kgem.mode == KGEM_NONE) ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
@@ -3280,7 +3282,7 @@ static Bool
 gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
@@ -3302,7 +3304,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3537,7 +3539,7 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, 0) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index d303286..f9b2e9e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3322,9 +3322,11 @@ static bool prefer_blt_bo(struct sna *sna,
 
 static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
-				   PixmapPtr dst, struct kgem_bo *dst_bo)
+				   PixmapPtr dst, struct kgem_bo *dst_bo,
+				   unsigned flags)
 {
 	return (sna->kgem.ring == KGEM_BLT ||
+		(flags & COPY_LAST && sna->kgem.mode == KGEM_NONE) ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
@@ -3364,7 +3366,7 @@ static Bool
 gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		       PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		       const BoxRec *box, int n)
+		       const BoxRec *box, int n, unsigned flags)
 {
 	struct sna_composite_op tmp;
 
@@ -3386,7 +3388,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, flags) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3644,7 +3646,7 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo, 0) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 537b287..1919b2e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -594,7 +594,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 	if (!sna->render.copy_boxes(sna, GXcopy,
 				    pixmap, priv->gpu_bo, 0, 0,
 				    pixmap, bo, 0, 0,
-				    &box, 1)) {
+				    &box, 1, 0)) {
 		DBG(("%s: copy failed\n", __FUNCTION__));
 		kgem_bo_destroy(&sna->kgem, bo);
 		return NULL;
@@ -1232,7 +1232,7 @@ skip_inplace_map:
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
-							    box, n);
+							    box, n, COPY_LAST);
 			if (!ok)
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
@@ -1690,7 +1690,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
-							    box, n);
+							    box, n, COPY_LAST);
 			if (!ok)
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
@@ -1798,7 +1798,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
-									    box, n);
+									    box, n, COPY_LAST);
 
 					if (!ok)
 						sna_read_boxes(sna,
@@ -1823,7 +1823,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
 								    pixmap, priv->cpu_bo, 0, 0,
-								    box, n);
+								    box, n, COPY_LAST);
 				if (!ok)
 					sna_read_boxes(sna,
 						       priv->gpu_bo, 0, 0,
@@ -1848,7 +1848,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
-									    box, n);
+									    box, n, COPY_LAST);
 					if (!ok)
 						sna_read_boxes(sna,
 							       priv->gpu_bo, 0, 0,
@@ -2096,7 +2096,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
-							    box, n);
+							    box, n, 0);
 			if (!ok) {
 				if (pixmap->devPrivate.ptr == NULL) {
 					assert(priv->stride && priv->ptr);
@@ -2134,7 +2134,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
-						    box, 1);
+						    box, 1, 0);
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
@@ -2163,7 +2163,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
-						    box, n);
+						    box, n,0);
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
@@ -2641,7 +2641,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
-						    box, n);
+						    box, n, 0);
 		if (!ok) {
 			if (pixmap->devPrivate.ptr == NULL) {
 				assert(priv->stride && priv->ptr);
@@ -3657,7 +3657,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (!sna->render.copy_boxes(sna, alu,
 					    pixmap, priv->gpu_bo, dx, dy,
 					    pixmap, priv->gpu_bo, tx, ty,
-					    box, n)) {
+					    box, n, 0)) {
 			DBG(("%s: fallback - accelerated copy boxes failed\n",
 			     __FUNCTION__));
 			goto fallback;
@@ -3874,7 +3874,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
 						    dst_pixmap, bo, dst_dx, dst_dy,
-						    box, n)) {
+						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				goto fallback;
@@ -3909,7 +3909,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
 						    dst_pixmap, bo, dst_dx, dst_dy,
-						    box, n)) {
+						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				goto fallback;
@@ -3945,7 +3945,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
 						    dst_pixmap, bo, dst_dx, dst_dy,
-						    box, n)) {
+						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				goto fallback;
@@ -4005,7 +4005,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			if (!sna->render.copy_boxes(sna, alu,
 						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
 						    dst_pixmap, bo, dst_dx, dst_dy,
-						    box, n)) {
+						    box, n, 0)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				tmp->drawable.pScreen->DestroyPixmap(tmp);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f1a0b84..f59eed5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -814,7 +814,7 @@ void sna_copy_fbcon(struct sna *sna)
 	ok = sna->render.copy_boxes(sna, GXcopy,
 				    scratch, bo, sx, sy,
 				    sna->front, priv->gpu_bo, dx, dy,
-				    &box, 1);
+				    &box, 1, 0);
 	if (!DAMAGE_IS_ALL(priv->gpu_damage))
 		sna_damage_add_box(&priv->gpu_damage, &box);
 
@@ -2780,7 +2780,7 @@ sna_crtc_redisplay(xf86CrtcPtr crtc, RegionPtr region)
 		if (sna->render.copy_boxes(sna, GXcopy,
 					   sna->front, sna_pixmap_get_bo(sna->front), 0, 0,
 					   &tmp, sna_crtc->bo, -tx, -ty,
-					   REGION_RECTS(region), REGION_NUM_RECTS(region)))
+					   REGION_RECTS(region), REGION_NUM_RECTS(region), 0))
 			return;
 	}
 
@@ -2909,7 +2909,8 @@ disable:
 					     sna->front, new, 0, 0,
 					     sna->front, old, 0, 0,
 					     REGION_RECTS(region),
-					     REGION_NUM_RECTS(region));
+					     REGION_NUM_RECTS(region),
+					     COPY_LAST);
 		kgem_submit(&sna->kgem);
 
 		sna_pixmap(sna->front)->gpu_bo = old;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 4519f17..a10efc7 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -568,7 +568,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	sna->render.copy_boxes(sna, GXcopy,
 			       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
 			       pixmap, dst_bo, dx, dy,
-			       boxes, n);
+			       boxes, n, COPY_LAST);
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
@@ -654,7 +654,7 @@ sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	sna->render.copy_boxes(sna, GXcopy,
 			       pixmap, src_bo, dx, dy,
 			       (PixmapPtr)draw, dst_bo, -draw->x, -draw->y,
-			       boxes, n);
+			       boxes, n, COPY_LAST);
 
 	if (region == &clip)
 		pixman_region_fini(&clip);
@@ -697,7 +697,7 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	sna->render.copy_boxes(sna, GXcopy,
 			       (PixmapPtr)draw, src_bo, 0, 0,
 			       (PixmapPtr)draw, dst_bo, 0, 0,
-			       boxes, n);
+			       boxes, n, COPY_LAST);
 
 	if (region == &clip)
 		pixman_region_fini(&clip);
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 8d04548..ffa86b1 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -276,7 +276,7 @@ fallback:
 					if (!sna->render.copy_boxes(sna, GXcopy,
 								    dst, src_bo, src_dx, src_dy,
 								    &tmp, dst_bo, -tile.x1, -tile.y1,
-								    clipped, c-clipped)) {
+								    clipped, c-clipped, COPY_LAST)) {
 						kgem_bo_destroy(&sna->kgem, dst_bo);
 						if (clipped != stack)
 							free(clipped);
@@ -316,7 +316,7 @@ fallback:
 			if (!sna->render.copy_boxes(sna, GXcopy,
 						    dst, src_bo, src_dx, src_dy,
 						    &tmp, dst_bo, -extents.x1, -extents.y1,
-						    box, nbox)) {
+						    box, nbox, COPY_LAST)) {
 				kgem_bo_destroy(&sna->kgem, dst_bo);
 				goto fallback;
 			}
@@ -689,7 +689,7 @@ tile:
 						n = sna->render.copy_boxes(sna, GXcopy,
 									   &tmp, src_bo, -tile.x1, -tile.y1,
 									   dst, dst_bo, dst_dx, dst_dy,
-									   clipped, c - clipped);
+									   clipped, c - clipped, 0);
 					else
 						n = 1;
 
@@ -736,7 +736,7 @@ tile:
 			n = sna->render.copy_boxes(sna, GXcopy,
 						   &tmp, src_bo, -extents.x1, -extents.y1,
 						   dst, dst_bo, dst_dx, dst_dy,
-						   box, nbox);
+						   box, nbox, 0);
 
 			kgem_bo_destroy(&sna->kgem, src_bo);
 
@@ -973,7 +973,7 @@ fallback:
 		n = sna->render.copy_boxes(sna, GXcopy,
 					   &tmp, src_bo, -extents.x1, -extents.y1,
 					   dst, dst_bo, dst_dx, dst_dy,
-					   box, nbox);
+					   box, nbox, 0);
 
 		kgem_bo_destroy(&sna->kgem, src_bo);
 
@@ -1144,7 +1144,7 @@ indirect_replace(struct sna *sna,
 	ret = sna->render.copy_boxes(sna, GXcopy,
 				     pixmap, src_bo, 0, 0,
 				     pixmap, bo, 0, 0,
-				     &box, 1);
+				     &box, 1, 0);
 
 	kgem_bo_destroy(kgem, src_bo);
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index a072994..c22965c 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1167,7 +1167,7 @@ sna_render_picture_extract(struct sna *sna,
 			if (!sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, src_bo, 0, 0,
 						    &tmp, bo, -box.x1, -box.y1,
-						    &box, 1)) {
+						    &box, 1, 0)) {
 				kgem_bo_destroy(&sna->kgem, bo);
 				bo = NULL;
 			}
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 1cc3af1..0eb7e90 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -237,7 +237,9 @@ struct sna_render {
 	Bool (*copy_boxes)(struct sna *sna, uint8_t alu,
 			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-			   const BoxRec *box, int n);
+			   const BoxRec *box, int n, unsigned flags);
+#define COPY_LAST 0x1
+
 	Bool (*copy)(struct sna *sna, uint8_t alu,
 		     PixmapPtr src, struct kgem_bo *src_bo,
 		     PixmapPtr dst, struct kgem_bo *dst_bo,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 27a9dc8..b20eceb 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -659,7 +659,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 				if (!sna->render.copy_boxes(sna, GXcopy,
 							     dst, dst_bo, 0, 0,
 							     &tmp, bo, -dx, -dy,
-							     REGION_RECTS(&this), REGION_NUM_RECTS(&this)))
+							     REGION_RECTS(&this), REGION_NUM_RECTS(&this), 0))
 					goto err;
 
 				RegionTranslate(&this, -dx, -dy);
@@ -671,7 +671,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 				if (!sna->render.copy_boxes(sna, GXcopy,
 							     &tmp, bo, 0, 0,
 							     dst, dst_bo, dx, dy,
-							     REGION_RECTS(&this), REGION_NUM_RECTS(&this)))
+							     REGION_RECTS(&this), REGION_NUM_RECTS(&this), 0))
 					goto err;
 
 				kgem_bo_destroy(&sna->kgem, bo);
@@ -898,11 +898,11 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 			i = (sna->render.copy_boxes(sna, GXcopy,
 						    src, src_bo, src_dx, src_dy,
 						    &p, tmp_bo, -tile.x1, -tile.y1,
-						    clipped, c - clipped) &&
+						    clipped, c - clipped, 0) &&
 			     sna->render.copy_boxes(sna, alu,
 						    &p, tmp_bo, -tile.x1, -tile.y1,
 						    dst, dst_bo, dst_dx, dst_dy,
-						    clipped, c - clipped));
+						    clipped, c - clipped, 0));
 
 			kgem_bo_destroy(&sna->kgem, tmp_bo);
 
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 36defa1..d83c083 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -5779,7 +5779,7 @@ trap_mask_converter(PicturePtr picture,
 	sna->render.copy_boxes(sna, GXcopy,
 			       scratch, sna_pixmap_get_bo(scratch), -extents.x1, -extents.x1,
 			       pixmap, priv->gpu_bo, x, y,
-			       &extents, 1);
+			       &extents, 1, 0);
 	mark_damaged(pixmap, priv, &extents ,x, y);
 
 	screen->DestroyPixmap(scratch);
@@ -5857,7 +5857,7 @@ trap_upload(PicturePtr picture,
 	sna->render.copy_boxes(sna, GXcopy,
 			       scratch, sna_pixmap_get_bo(scratch), -extents.x1, -extents.x1,
 			       pixmap, priv->gpu_bo, x, y,
-			       &extents, 1);
+			       &extents, 1, 0);
 	mark_damaged(pixmap, priv, &extents, x, y);
 
 	screen->DestroyPixmap(scratch);
commit d36623aaebdc454bd281f34332edcad5d94f34c0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 13:55:25 2012 +0100

    sna/dri: Remove broken code for buffer xchg from AsyncSwap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 44015e4..4519f17 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -46,13 +46,17 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <i915_drm.h>
 #include <dri2.h>
 
+#if DEBUG_DRI
+#undef DBG
+#define DBG(x) ErrorF x
+#endif
+
 #if DRI2INFOREC_VERSION <= 2
 #error DRI2 version supported by the Xserver is too old
 #endif
 
-#if DEBUG_DRI
-#undef DBG
-#define DBG(x) ErrorF x
+#if DRI2INFOREC_VERSION < 9
+#define USE_ASYNC_SWAP 0
 #endif
 
 #define COLOR_PREFER_TILING_Y 0
@@ -1468,7 +1472,7 @@ static void sna_dri_flip_event(struct sna *sna,
 			sna_dri_frame_event_info_free(sna, flip);
 		break;
 
-#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
+#if USE_ASYNC_SWAP
 	case DRI2_ASYNC_FLIP:
 		DBG(("%s: async swap flip completed on pipe %d, pending? %d, new? %d\n",
 		     __FUNCTION__, flip->pipe,
@@ -1509,7 +1513,7 @@ finish_async_flip:
 
 			DBG(("%s: async flip completed\n", __FUNCTION__));
 			sna->dri.flip_pending = NULL;
-			sna_dri_frame_event_info_free(fsna, lip);
+			sna_dri_frame_event_info_free(sna, flip);
 		}
 		break;
 #endif
@@ -2028,20 +2032,7 @@ blit_fallback:
 	return TRUE;
 }
 
-#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
-static void
-sna_dri_exchange_attachment(DRI2BufferPtr front, DRI2BufferPtr back)
-{
-	int tmp;
-
-	DBG(("%s(%d <--> %d)\n",
-	     __FUNCTION__, front->attachment, back->attachment));
-
-	tmp = front->attachment;
-	front->attachment = back->attachment;
-	back->attachment = tmp;
-}
-
+#if USE_ASYNC_SWAP
 static Bool
 sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 		   DRI2BufferPtr front, DRI2BufferPtr back,
@@ -2054,34 +2045,23 @@ sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	if (!sna->scrn->vtSema) {
-		PixmapPtr pixmap;
-
-exchange:
-		DBG(("%s: unattached, exchange pixmaps\n", __FUNCTION__));
-
-		pixmap = get_drawable_pixmap(draw);
-		set_bo(pixmap, get_private(back)->bo);
-		sna_dri_exchange_attachment(front, back);
-		get_private(back)->pixmap = pixmap;
-		get_private(front)->pixmap = NULL;
-
-		DRI2SwapComplete(client, draw, 0, 0, 0,
-				 DRI2_EXCHANGE_COMPLETE, func, data);
-		return TRUE;
-	}
-
 	if (!can_flip(sna, draw, front, back)) {
 blit:
-		DBG(("%s: unable to flip, so blit\n", __FUNCTION__));
+		if (can_exchange(sna, draw, front, back)) {
+			DBG(("%s: unable to flip, so xchg\n", __FUNCTION__));
+			sna_dri_exchange_buffers(draw, front, back);
+			name = DRI2_EXCHANGE_COMPLETE;
+		} else {
+			DBG(("%s: unable to flip, so blit\n", __FUNCTION__));
+			sna_dri_copy_to_front(sna, draw, NULL,
+					      get_private(front)->bo,
+					      get_private(back)->bo,
+					      false);
+			name = DRI2_BLIT_COMPLETE;
+		}
 
-		sna_dri_copy_to_front(sna, draw, NULL,
-				      get_private(front)->bo,
-				      get_private(back)->bo,
-				      false);
-		DRI2SwapComplete(client, draw, 0, 0, 0,
-				 DRI2_BLIT_COMPLETE, func, data);
-		return FALSE;
+		DRI2SwapComplete(client, draw, 0, 0, 0, name, func, data);
+		return name == DRI2_EXCHANGE_COMPLETE;
 	}
 
 	bo = NULL;
@@ -2091,7 +2071,7 @@ blit:
 	if (info == NULL) {
 		int pipe = sna_dri_get_pipe(draw);
 		if (pipe == -1)
-			goto exchange;
+			goto blit;
 
 		DBG(("%s: no pending flip, so updating scanout\n",
 		     __FUNCTION__));
@@ -2100,7 +2080,6 @@ blit:
 		if (!info)
 			goto blit;
 
-		info->sna = sna;
 		info->client = client;
 		info->type = DRI2_ASYNC_FLIP;
 		info->pipe = pipe;
@@ -2393,8 +2372,8 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	info.ReuseBufferNotify = NULL;
 #endif
 
-#if USE_AYSYNC_SWAP && DRI2INFOREC_VERSION >= 7
-	info.version = 7;
+#if USE_ASYNC_SWAP
+	info.version = 9;
 	info.AsyncSwap = sna_dri_async_swap;
 #endif
 
commit 0bdb4d0d3693df007a6cfc9a75bb0deddd812d53
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 13:12:43 2012 +0100

    sna/dri: Fixup blit fallback path to use xchg when possible
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index db03f77..44015e4 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1767,6 +1767,7 @@ sna_dri_immediate_xchg(struct sna *sna,
 			priv->chain->chain = info;
 		}
 	} else {
+		sna_dri_exchange_buffers(draw, info->front, info->back);
 		DRI2SwapComplete(info->client, draw, 0, 0, 0,
 				 DRI2_EXCHANGE_COMPLETE,
 				 info->event_complete,
@@ -1928,7 +1929,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	info->type = swap_type;
 	if (divisor == 0) {
-		if (can_exchange(sna, draw, info->front, info->back))
+		if (can_exchange(sna, draw, front, back))
 			sna_dri_immediate_xchg(sna, draw, info);
 		else
 			sna_dri_immediate_blit(sna, draw, info);
@@ -2008,14 +2009,21 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	return TRUE;
 
 blit_fallback:
-	DBG(("%s -- blit\n", __FUNCTION__));
-	sna_dri_copy_to_front(sna, draw, NULL,
-			      get_private(front)->bo,
-			      get_private(back)->bo,
-			      false);
+	if (can_exchange(sna, draw, front, back)) {
+		DBG(("%s -- xchg\n", __FUNCTION__));
+		sna_dri_exchange_buffers(draw, front, back);
+		pipe = DRI2_EXCHANGE_COMPLETE;
+	} else {
+		DBG(("%s -- blit\n", __FUNCTION__));
+		sna_dri_copy_to_front(sna, draw, NULL,
+				      get_private(front)->bo,
+				      get_private(back)->bo,
+				      false);
+		pipe = DRI2_BLIT_COMPLETE;
+	}
 	if (info)
 		sna_dri_frame_event_info_free(sna, info);
-	DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
+	DRI2SwapComplete(client, draw, 0, 0, 0, pipe, func, data);
 	*target_msc = 0; /* offscreen, so zero out target vblank count */
 	return TRUE;
 }
commit aeeed323f9081875a804ae19fb356bee2a61e7b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 11:52:27 2012 +0100

    sna/dri: Select the appropriate copy engine based on the current src ring
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 93e24b8..db03f77 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -436,6 +436,53 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	DamageRegionProcessPending(&pixmap->drawable);
 }
 
+static void sna_dri_select_mode(struct sna *sna, struct kgem_bo *src, bool sync)
+{
+	struct drm_i915_gem_busy busy;
+
+	if (sna->kgem.gen < 60)
+		return;
+
+	if (sync) {
+		DBG(("%s: sync, force RENDER ring\n", __FUNCTION__));
+		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+		return;
+	}
+
+	if (sna->kgem.mode != KGEM_NONE) {
+		DBG(("%s: busy, not switching\n", __FUNCTION__));
+		return;
+	}
+
+	VG_CLEAR(busy);
+	busy.handle = src->handle;
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_BUSY, &busy))
+		return;
+
+	DBG(("%s: src busy?=%x\n", __FUNCTION__, busy.busy));
+	if (busy.busy == 0) {
+		DBG(("%s: src is idle, using defaults\n", __FUNCTION__));
+		return;
+	}
+
+	/* Sandybridge introduced a separate ring which it uses to
+	 * perform blits. Switching rendering between rings incurs
+	 * a stall as we wait upon the old ring to finish and
+	 * flush its render cache before we can proceed on with
+	 * the operation on the new ring.
+	 *
+	 * As this buffer, we presume, has just been written to by
+	 * the DRI client using the RENDER ring, we want to perform
+	 * our operation on the same ring, and ideally on the same
+	 * ring as we will flip from (which should be the RENDER ring
+	 * as well).
+	 */
+	if ((busy.busy & 0xffff0000) == 0 || busy.busy & (1 << 16))
+		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	else
+		kgem_set_mode(&sna->kgem, KGEM_BLT);
+}
+
 static struct kgem_bo *
 sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		      struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
@@ -502,21 +549,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
 	}
 
-	if (sna->kgem.gen >= 60) {
-		/* Sandybridge introduced a separate ring which it uses to
-		 * perform blits. Switching rendering between rings incurs
-		 * a stall as we wait upon the old ring to finish and
-		 * flush its render cache before we can proceed on with
-		 * the operation on the new ring.
-		 *
-		 * As this buffer, we presume, has just been written to by
-		 * the DRI client using the RENDER ring, we want to perform
-		 * our operation on the same ring, and ideally on the same
-		 * ring as we will flip from (which should be the RENDER ring
-		 * as well).
-		 */
-		kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	}
+	sna_dri_select_mode(sna, src_bo, flush);
 
 	damage(pixmap, region);
 	if (region) {
@@ -602,8 +635,7 @@ sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
 	}
 
-	if (sna->kgem.gen >= 60)
-		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	sna_dri_select_mode(sna, src_bo, false);
 
 	if (region) {
 		boxes = REGION_RECTS(region);
@@ -656,21 +688,7 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		n = 1;
 	}
 
-	if (sna->kgem.gen >= 60) {
-		/* Sandybridge introduced a separate ring which it uses to
-		 * perform blits. Switching rendering between rings incurs
-		 * a stall as we wait upon the old ring to finish and
-		 * flush its render cache before we can proceed on with
-		 * the operation on the new ring.
-		 *
-		 * As this buffer, we presume, has just been written to by
-		 * the DRI client using the RENDER ring, we want to perform
-		 * our operation on the same ring, and ideally on the same
-		 * ring as we will flip from (which should be the RENDER ring
-		 * as well).
-		 */
-		kgem_set_mode(&sna->kgem, KGEM_RENDER);
-	}
+	sna_dri_select_mode(sna, src_bo, false);
 
 	sna->render.copy_boxes(sna, GXcopy,
 			       (PixmapPtr)draw, src_bo, 0, 0,
commit 5026b6a147f411582af24ffd17c87a81da5eea0b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jul 4 09:39:59 2012 +0100

    uxa: Install the drm_wakeup_handler for vblank events in !use_page_flipping
    
    Even if page-flipping itself is disabled, we still want to allow the
    client to schedule wakeups for some future vblank which requires
    listening to the kernel vblank notifications.
    
    Reported-by: Eric Anholt <eric at anholt.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51699
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 2e2a9b1..a974e34 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1646,6 +1646,10 @@ Bool intel_mode_pre_init(ScrnInfoPtr scrn, int fd, int cpp)
 
 	xf86InitialConfiguration(scrn, TRUE);
 
+	mode->event_context.version = DRM_EVENT_CONTEXT_VERSION;
+	mode->event_context.vblank_handler = intel_vblank_handler;
+	mode->event_context.page_flip_handler = intel_page_flip_handler;
+
 	has_flipping = 0;
 	gp.param = I915_PARAM_HAS_PAGEFLIPPING;
 	gp.value = &has_flipping;
@@ -1655,10 +1659,6 @@ Bool intel_mode_pre_init(ScrnInfoPtr scrn, int fd, int cpp)
 		xf86DrvMsg(scrn->scrnIndex, X_INFO,
 			   "Kernel page flipping support detected, enabling\n");
 		intel->use_pageflipping = TRUE;
-
-		mode->event_context.version = DRM_EVENT_CONTEXT_VERSION;
-		mode->event_context.vblank_handler = intel_vblank_handler;
-		mode->event_context.page_flip_handler = intel_page_flip_handler;
 	}
 
 	intel->modes = mode;
@@ -1668,18 +1668,16 @@ Bool intel_mode_pre_init(ScrnInfoPtr scrn, int fd, int cpp)
 void
 intel_mode_init(struct intel_screen_private *intel)
 {
-	if (intel->use_pageflipping) {
-		struct intel_mode *mode = intel->modes;
+	struct intel_mode *mode = intel->modes;
 
-		/* We need to re-register the mode->fd for the synchronisation
-		 * feedback on every server generation, so perform the
-		 * registration within ScreenInit and not PreInit.
-		 */
-		mode->flip_count = 0;
-		AddGeneralSocket(mode->fd);
-		RegisterBlockAndWakeupHandlers((BlockHandlerProcPtr)NoopDDA,
-					       drm_wakeup_handler, mode);
-	}
+	/* We need to re-register the mode->fd for the synchronisation
+	 * feedback on every server generation, so perform the
+	 * registration within ScreenInit and not PreInit.
+	 */
+	mode->flip_count = 0;
+	AddGeneralSocket(mode->fd);
+	RegisterBlockAndWakeupHandlers((BlockHandlerProcPtr)NoopDDA,
+				       drm_wakeup_handler, mode);
 }
 
 void
commit 79309dd55f20098e12ead5427c811f237d5592fa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 22:27:39 2012 +0100

    sna: check for failure to change cache level on the bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index fceb5d2..944b8f2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3052,10 +3052,9 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		if (bo == NULL)
 			return NULL;
 
-		gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC);
 		bo->reusable = false;
-
-		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
+		if (!gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC) ||
+		    kgem_bo_map__cpu(kgem, bo) == NULL) {
 			kgem_bo_destroy(kgem, bo);
 			return NULL;
 		}
commit 9e2ac8a413c05aafd1c69775c5b0fa5f1f37ac23
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 19:33:55 2012 +0100

    sna: Use set-cache-level ioctl to create CPU bo
    
    As an alternative to vmap, we can use the kernel for all memory
    management through bo, which is much preferred for its simplicity (i.e.
    avoiding introducing even more vm complexity).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 604db7d..d1ddf95 100644
--- a/configure.ac
+++ b/configure.ac
@@ -205,6 +205,16 @@ if test "x$accel" = xno; then
 	AC_MSG_ERROR([No default acceleration option])
 fi
 
+AC_ARG_ENABLE(cache-level,
+	      AS_HELP_STRING([--enable-cache-level],
+			     [Enable use of cache level ioctl (experimental) [default=no]]),
+	      [CACHE_LEVEL="$enableval"],
+	      [CACHE_LEVEL=no])
+AM_CONDITIONAL(USE_CACHE_LEVEL, test x$CACHE_LEVEL = xyes)
+if test "x$CACHE_LEVEL" = xyes; then
+	AC_DEFINE(USE_CACHE_LEVEL,1,[Assume DRM_I915_GEM_SET_CACHE_LEVEL_IOCTL support])
+fi
+
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
 			     [Enable use of vmap (experimental) [default=no]]),
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 43fa705..fceb5d2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -53,9 +53,9 @@
 static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
-
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
+#define DBG_NO_CACHE_LEVEL 0
 #define DBG_NO_VMAP 0
 #define DBG_NO_LLC 0
 #define DBG_NO_SEMAPHORES 0
@@ -103,6 +103,23 @@ struct drm_i915_gem_vmap {
 };
 #endif
 
+#if !defined(DRM_I915_GEM_SET_CACHE_LEVEL)
+#define I915_CACHE_NONE		0
+#define I915_CACHE_LLC		1
+#define I915_CACHE_LLC_MLC	2 /* gen6+ */
+
+struct drm_i915_gem_cache_level {
+	/** Handle of the buffer to check for busy */
+	__u32 handle;
+
+	/** Cache level to apply or return value */
+	__u32 cache_level;
+};
+
+#define DRM_I915_GEM_SET_CACHE_LEVEL	0x2f
+#define DRM_IOCTL_I915_GEM_SET_CACHE_LEVEL		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHE_LEVEL, struct drm_i915_gem_cache_level)
+#endif
+
 struct kgem_partial_bo {
 	struct kgem_bo base;
 	void *mem;
@@ -175,6 +192,16 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
+static bool gem_set_cache_level(int fd, uint32_t handle, int cache_level)
+{
+	struct drm_i915_gem_cache_level arg;
+
+	VG_CLEAR(arg);
+	arg.handle = handle;
+	arg.cache_level = cache_level;
+	return drmIoctl(fd, DRM_IOCTL_I915_GEM_SET_CACHE_LEVEL, &arg) == 0;
+}
+
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
 {
 	if (flags & CREATE_NO_RETIRE) {
@@ -609,6 +636,30 @@ static bool is_hw_supported(struct kgem *kgem)
 	return true;
 }
 
+static bool test_has_cache_level(struct kgem *kgem)
+{
+#if defined(USE_CACHE_LEVEL)
+	uint32_t handle;
+	bool ret;
+
+	if (DBG_NO_CACHE_LEVEL)
+		return false;
+
+	if (kgem->gen == 40) /* XXX sampler dies with snoopable memory */
+		return false;
+
+	handle = gem_create(kgem->fd, 1);
+	if (handle == 0)
+		return false;
+
+	ret = gem_set_cache_level(kgem->fd, handle, I915_CACHE_NONE);
+	gem_close(kgem->fd, handle);
+	return ret;
+#else
+	return false;
+#endif
+}
+
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -658,6 +709,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
+	kgem->has_cache_level = test_has_cache_level(kgem);
+	DBG(("%s: using set-cache-level=%d\n", __FUNCTION__, kgem->has_cache_level));
+
 #if defined(USE_VMAP)
 	if (!DBG_NO_VMAP)
 		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
@@ -688,8 +742,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		}
 		kgem->has_llc = has_llc;
 	}
-	DBG(("%s: cpu bo enabled %d: llc? %d, vmap? %d\n", __FUNCTION__,
-	     kgem->has_llc | kgem->has_vmap, kgem->has_llc, kgem->has_vmap));
+	DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, vmap? %d\n", __FUNCTION__,
+	     kgem->has_llc | kgem->has_vmap | kgem->has_cache_level,
+	     kgem->has_llc, kgem->has_cache_level, kgem->has_vmap));
 
 	kgem->has_semaphores = false;
 	if (gen >= 60 && semaphores_enabled())
@@ -766,7 +821,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->large_object_size = MAX_CACHE_SIZE;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
-	if (kgem->has_llc | kgem->has_vmap) {
+	if (kgem->has_llc | kgem->has_cache_level | kgem->has_vmap) {
 		if (kgem->large_object_size > kgem->max_cpu_size)
 			kgem->large_object_size = kgem->max_cpu_size;
 	} else
@@ -2616,6 +2671,9 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	size /= PAGE_SIZE;
 	bucket = cache_bucket(size);
 
+	if (flags & CREATE_FORCE)
+		goto create;
+
 	if (bucket >= NUM_CACHE_BUCKETS) {
 		DBG(("%s: large bo num pages=%d, bucket=%d\n",
 		     __FUNCTION__, size, bucket));
@@ -2988,6 +3046,23 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 		return bo;
 	}
 
+	if (kgem->has_cache_level) {
+		bo = kgem_create_2d(kgem, width, height, bpp,
+				    I915_TILING_NONE, flags | CREATE_FORCE);
+		if (bo == NULL)
+			return NULL;
+
+		gem_set_cache_level(kgem->fd, bo->handle, I915_CACHE_LLC);
+		bo->reusable = false;
+
+		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
+			kgem_bo_destroy(kgem, bo);
+			return NULL;
+		}
+
+		return bo;
+	}
+
 	if (kgem->has_vmap) {
 		int stride, size;
 		void *ptr;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f9b2a33..22ae401 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -155,6 +155,7 @@ struct kgem {
 	uint32_t has_vmap :1;
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_semaphores :1;
+	uint32_t has_cache_level :1;
 	uint32_t has_llc :1;
 
 	uint16_t fence_max;
@@ -235,6 +236,7 @@ enum {
 	CREATE_TEMPORARY = 0x20,
 	CREATE_NO_RETIRE = 0x40,
 	CREATE_NO_THROTTLE = 0x40,
+	CREATE_FORCE = 0x80,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
commit f26163e916e21f6b64a19c147165f2f2c9c5466d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 18:51:46 2012 +0100

    sna: Guess when the client is attempting to read back the whole pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 9d36543..8564cfc 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -438,7 +438,8 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_INPLACE_HINT 0x4
 #define MOVE_ASYNC_HINT 0x8
 #define MOVE_SOURCE_HINT 0x10
-#define __MOVE_FORCE 0x20
+#define MOVE_WHOLE_HINT 0x20
+#define __MOVE_FORCE 0x40
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1eae904..537b287 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1187,7 +1187,6 @@ skip_inplace_map:
 		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo))
 			sna_pixmap_free_cpu(sna, priv);
 		sna_damage_destroy(&priv->gpu_damage);
-		priv->undamaged = true;
 	}
 
 	if (pixmap->devPrivate.ptr == NULL &&
@@ -1466,6 +1465,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		goto out;
 	}
 
+	if (flags & MOVE_WHOLE_HINT)
+		return _sna_pixmap_move_to_cpu(pixmap, flags);
+
 	if (priv->gpu_bo == NULL &&
 	    (priv->create & KGEM_CAN_CREATE_GPU) == 0 &&
 	    flags & MOVE_WRITE)
@@ -1872,9 +1874,12 @@ done:
 		sna_damage_reduce_all(&priv->cpu_damage,
 				      pixmap->drawable.width,
 				      pixmap->drawable.height);
-		if (priv->gpu_bo && DAMAGE_IS_ALL(priv->cpu_damage)) {
-			DBG(("%s: replaced entire pixmap\n", __FUNCTION__));
-			sna_pixmap_free_gpu(sna, priv);
+		if (DAMAGE_IS_ALL(priv->cpu_damage)) {
+			if (priv->gpu_bo) {
+				DBG(("%s: replaced entire pixmap\n",
+				     __FUNCTION__));
+				sna_pixmap_free_gpu(sna, priv);
+			}
 			priv->undamaged = false;
 		}
 		if (priv->flush)
@@ -12091,6 +12096,8 @@ sna_get_image(DrawablePtr drawable,
 	flags = MOVE_READ;
 	if ((w | h) == 1)
 		flags |= MOVE_INPLACE_HINT;
+	if (w == drawable->width)
+		flags |= MOVE_WHOLE_HINT;
 	if (!sna_drawable_move_region_to_cpu(drawable, &region, flags))
 		return;
 
commit c597e6cd4cb4406878eae07c04ad420d8185d99e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 13:26:33 2012 +0100

    sna: Fix iterator typo
    
    Increment the iterator, not the end-stop.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 65d09b9..1eae904 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3973,7 +3973,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			dx = -region.extents.x1;
 			dy = -region.extents.y1;
-			for (i = 0; i < n; n++) {
+			for (i = 0; i < n; i++) {
 				assert(box[i].x1 + src_dx >= 0);
 				assert(box[i].y1 + src_dy >= 0);
 				assert(box[i].x2 + src_dx <= src_pixmap->drawable.width);
commit 2d087eadd9cd3aa2e2ccd73a568286d21702d29a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 11:54:02 2012 +0100

    sna: Also prefer to use the dst GPU bo if CopyArea reduces to an inplace upload
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b474864..65d09b9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3716,16 +3716,24 @@ source_prefer_gpu(struct sna_pixmap *priv)
 {
 	unsigned flags;
 
-	if (priv == NULL)
+	if (priv == NULL) {
+		DBG(("%s: source unattached, use cpu\n", __FUNCTION__));
 		return 0;
+	}
 
-	if (priv->gpu_damage)
+	if (priv->gpu_damage) {
+		DBG(("%s: source has gpu damage, force gpu\n", __FUNCTION__));
 		return PREFER_GPU | FORCE_GPU;
+	}
 
-	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+		DBG(("%s: source has busy CPU bo, force gpu\n", __FUNCTION__));
 		return PREFER_GPU | FORCE_GPU;
+	}
 
-	return PREFER_GPU;
+	DBG(("%s: source has GPU bo? %d\n",
+	     __FUNCTION__, priv->gpu_bo != NULL));
+	return priv->gpu_bo != NULL;
 }
 
 static void
@@ -3811,7 +3819,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	}
 
 	bo = sna_drawable_use_bo(&dst_pixmap->drawable,
-				 source_prefer_gpu(src_priv),
+				 source_prefer_gpu(src_priv) ?:
+				 region_inplace(sna, dst_pixmap, &region,
+						dst_priv, alu_overwrites(alu)),
 				 &region.extents, &damage);
 	if (bo) {
 		if (src_priv && src_priv->clear) {
commit dbe3a5ca1e6558bcee29d893aba8cc352220a36d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 11:48:15 2012 +0100

    sna: Attempt to reduce all-damage on the CPU
    
    As we now treat CPU all-damaged as a special case for deciding when and
    where to migrate, look out for that condition after adding damage.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 28b964d..b474864 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1869,10 +1869,10 @@ done:
 		assert(!DAMAGE_IS_ALL(priv->cpu_damage));
 		assert_pixmap_contains_box(pixmap, RegionExtents(region));
 		sna_damage_add(&priv->cpu_damage, region);
-		if (priv->gpu_bo &&
-		    sna_damage_is_all(&priv->cpu_damage,
+		sna_damage_reduce_all(&priv->cpu_damage,
 				      pixmap->drawable.width,
-				      pixmap->drawable.height)) {
+				      pixmap->drawable.height);
+		if (priv->gpu_bo && DAMAGE_IS_ALL(priv->cpu_damage)) {
 			DBG(("%s: replaced entire pixmap\n", __FUNCTION__));
 			sna_pixmap_free_gpu(sna, priv);
 			priv->undamaged = false;
commit 3a41248195e8b327a5d970726450bd2077cdaf0f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 11:22:54 2012 +0100

    sna: Operate on the original boxes for CopyArea
    
    Be consistent and avoid the confusion when mixing operations on the
    region boxes and the original boxes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8db89ca..28b964d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3796,14 +3796,12 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     replaces));
 
 	RegionTranslate(&region, dst_dx, dst_dy);
-	src_dx -= dst_dx;
-	src_dy -= dst_dy;
 
 	if (dst_priv == NULL)
 		goto fallback;
 
 	if (dst_priv->cpu_damage && alu_overwrites(alu)) {
-		DBG(("%s: overwritting CPU damage\n", _FUNCTION__));
+		DBG(("%s: overwritting CPU damage\n", __FUNCTION__));
 		sna_damage_subtract(&dst_priv->cpu_damage, &region);
 		if (dst_priv->cpu_damage == NULL) {
 			list_del(&dst_priv->list);
@@ -3821,8 +3819,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			     __FUNCTION__, src_priv->clear_color));
 			assert_pixmap_contains_box(dst_pixmap,
 						   RegionExtents(&region));
-			box = REGION_RECTS(&region);
-			n = REGION_NUM_RECTS(&region);
 			if (n == 1) {
 				if (!sna->render.fill_one(sna,
 							  dst_pixmap, bo,
@@ -3862,7 +3858,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			     __FUNCTION__));
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, 0, 0,
+						    dst_pixmap, bo, dst_dx, dst_dy,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -3897,7 +3893,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, 0, 0,
+						    dst_pixmap, bo, dst_dx, dst_dy,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -3923,17 +3919,17 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			assert(bo != dst_priv->cpu_bo);
 
-			RegionTranslate(&region, src_dx, src_dy);
+			RegionTranslate(&region, src_dx-dst_dx, src_dy-dst_dy);
 			ret = sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
 							      &region,
 							      MOVE_READ | MOVE_ASYNC_HINT);
-			RegionTranslate(&region, -src_dx, -src_dy);
+			RegionTranslate(&region, dst_dx-src_dx, dst_dy-src_dy);
 			if (!ret)
 				goto fallback;
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
-						    dst_pixmap, bo, 0, 0,
+						    dst_pixmap, bo, dst_dx, dst_dy,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -3993,7 +3989,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
-						    dst_pixmap, bo, 0, 0,
+						    dst_pixmap, bo, dst_dx, dst_dy,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -4074,9 +4070,6 @@ fallback:
 		DBG(("%s: copying clear [%08x]\n",
 		     __FUNCTION__, src_priv->clear_color));
 
-		box = REGION_RECTS(&region);
-		n = REGION_NUM_RECTS(&region);
-
 		if (dst_priv) {
 			assert_pixmap_contains_box(dst_pixmap,
 						   RegionExtents(&region));
@@ -4091,7 +4084,7 @@ fallback:
 			pixman_fill(dst_pixmap->devPrivate.ptr,
 				    dst_pixmap->devKind/sizeof(uint32_t),
 				    dst_pixmap->drawable.bitsPerPixel,
-				    box->x1, box->y1,
+				    box->x1 + dst_dx, box->y1 + dst_dy,
 				    box->x2 - box->x1,
 				    box->y2 - box->y1,
 				    src_priv->clear_color);
@@ -4106,7 +4099,7 @@ fallback:
 		if (src_priv) {
 			unsigned mode;
 
-			RegionTranslate(&region, src_dx, src_dy);
+			RegionTranslate(&region, src_dx-dst_dx, src_dy-dst_dy);
 
 			assert_pixmap_contains_box(src_pixmap,
 						   RegionExtents(&region));
@@ -4119,7 +4112,7 @@ fallback:
 							     &region, mode))
 				goto out;
 
-			RegionTranslate(&region, -src_dx, -src_dy);
+			RegionTranslate(&region, dst_dx-src_dx, dst_dy-src_dy);
 		}
 
 		if (dst_priv) {
@@ -4140,8 +4133,6 @@ fallback:
 		dst_stride = dst_pixmap->devKind;
 		src_stride = src_pixmap->devKind;
 
-		src_dx += dst_dx;
-		src_dy += dst_dy;
 		if (alu == GXcopy && bpp >= 8) {
 			dst_bits = (FbBits *)
 				((char *)dst_pixmap->devPrivate.ptr +
commit e3bc91842bf186ec149c852f48993235ef7ad27f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jul 3 10:21:06 2012 +0100

    sna: Allow booting on older kernels by disabling HW acceleration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 76f6cae..43fa705 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -595,6 +595,20 @@ static bool __kgem_throttle(struct kgem *kgem)
 	return drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == -EIO;
 }
 
+static bool is_hw_supported(struct kgem *kgem)
+{
+	if (DBG_NO_HW)
+		return false;
+
+	if (kgem->gen >= 60) /* Only if the kernel supports the BLT ring */
+		return gem_param(kgem, I915_PARAM_HAS_BLT) > 0;
+
+	if (kgem->gen <= 20) /* dynamic GTT is fubar */
+		return false;
+
+	return true;
+}
+
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -607,7 +621,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->fd = fd;
 	kgem->gen = gen;
 	kgem->wedged = __kgem_throttle(kgem);
-	kgem->wedged |= DBG_NO_HW;
+	kgem->wedged |= !is_hw_supported(kgem);
 
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 047b865..f7eeca5 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -294,8 +294,7 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	struct sna *sna = to_sna(scrn);
 	struct pci_device *pci = sna->PciInfo;
 	drmSetVersion sv;
-	struct drm_i915_getparam gp;
-	int err, val;
+	int err;
 	char busid[20];
 	int fd;
 
@@ -335,18 +334,6 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 		return -1;
 	}
 
-	val = FALSE;
-
-	VG_CLEAR(gp);
-	gp.param = I915_PARAM_HAS_BLT;
-	gp.value = &val;
-	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) {
-		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "Failed to detect BLT.  Kernel 2.6.37 required.\n");
-		drmClose(fd);
-		return -1;
-	}
-
 	dev = malloc(sizeof(*dev));
 	if (dev) {
 		int flags;
commit affdebcb2d09cd8fdc5aadb2d8df3193587e4a06
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 18:50:26 2012 +0100

    sna: And free the DIR after use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 43cf15a..f1a0b84 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -391,6 +391,7 @@ sna_output_backlight_init(xf86OutputPtr output)
 			}
 		}
 	}
+	closedir(dir);
 
 	sna_output->backlight_iface = NULL;
 
commit e7b31b6d0a32f76db4a8aef64c77d4afe808fb6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 14:01:36 2012 +0100

    sna: Consolidate CopyArea with the aim of reducing migration ping-pong
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6550d25..8db89ca 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -65,7 +65,6 @@
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
 #define USE_SHM_VMAP 0
-#define PREFER_VMAP 0
 
 #define MIGRATE_ALL 0
 
@@ -1321,12 +1320,19 @@ region_subsumes_damage(const RegionRec *region, struct sna_damage *damage)
 }
 
 static bool
-region_overlaps_damage(const RegionRec *region, struct sna_damage *damage)
+region_overlaps_damage(const RegionRec *region,
+		       struct sna_damage *damage,
+		       int dx, int dy)
 {
 	const BoxRec *re, *de;
 
 	DBG(("%s?\n", __FUNCTION__));
-	assert(damage);
+
+	if (damage == NULL)
+		return false;
+
+	if (DAMAGE_IS_ALL(damage))
+		return true;
 
 	re = &region->extents;
 	de = &DAMAGE_PTR(damage)->extents;
@@ -1335,8 +1341,8 @@ region_overlaps_damage(const RegionRec *region, struct sna_damage *damage)
 	     re->x1, re->y1, re->x2, re->y2,
 	     de->x1, de->y1, de->x2, de->y2));
 
-	return (re->x1 < de->x2 && re->x2 > de->x1 &&
-		re->y1 < de->y2 && re->y2 > de->y1);
+	return (re->x1 + dx < de->x2 && re->x2 + dx > de->x1 &&
+		re->y1 + dy < de->y2 && re->y2 + dy > de->y1);
 }
 
 #ifndef NDEBUG
@@ -1378,8 +1384,8 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 	}
 
-	if (!write_only && priv->cpu_damage &&
-	    region_overlaps_damage(region, priv->cpu_damage)) {
+	if (!write_only &&
+	    region_overlaps_damage(region, priv->cpu_damage, 0, 0)) {
 		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
 		return false;
 	}
@@ -2189,9 +2195,12 @@ box_inplace(PixmapPtr pixmap, const BoxRec *box)
 	return ((box->x2 - box->x1) * (box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 15) >= sna->kgem.half_cpu_cache_pages;
 }
 
+#define PREFER_GPU 1
+#define FORCE_GPU 2
+
 static inline struct kgem_bo *
 sna_drawable_use_bo(DrawablePtr drawable,
-		    bool prefer_gpu,
+		    int prefer_gpu,
 		    const BoxRec *box,
 		    struct sna_damage ***damage)
 {
@@ -2201,8 +2210,11 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	int16_t dx, dy;
 	int ret;
 
-	DBG(("%s((%d, %d), (%d, %d))...\n", __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2));
+	DBG(("%s pixmap=%ld, box=((%d, %d), (%d, %d)), prefer_gpu?=%d...\n",
+	     __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     box->x1, box->y1, box->x2, box->y2,
+	     prefer_gpu));
 
 	assert_pixmap_damage(pixmap);
 	assert_drawable_contains_box(drawable, box);
@@ -2220,11 +2232,11 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->flush)
-		prefer_gpu = true;
-	if (priv->cpu)
-		prefer_gpu = false;
+		prefer_gpu |= PREFER_GPU;
+	if (priv->cpu && (prefer_gpu & FORCE_GPU) == 0)
+		prefer_gpu = 0;
 
-	if (!prefer_gpu && priv->gpu_bo && !kgem_bo_is_busy(priv->gpu_bo))
+	if (!prefer_gpu && (!priv->gpu_bo || !kgem_bo_is_busy(priv->gpu_bo)))
 		goto use_cpu_bo;
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
@@ -2234,7 +2246,10 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 
 	if (priv->gpu_bo == NULL) {
-		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0) {
+		unsigned int flags;
+
+		if ((prefer_gpu & FORCE_GPU) == 0 &&
+		    (priv->create & KGEM_CAN_CREATE_GPU) == 0) {
 			DBG(("%s: untiled, will not force allocation\n",
 			     __FUNCTION__));
 			goto use_cpu_bo;
@@ -2246,13 +2261,16 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
-		if (priv->cpu_damage && !prefer_gpu) {
+		if (priv->cpu_damage && prefer_gpu == 0) {
 			DBG(("%s: prefer cpu",
 			     __FUNCTION__));
 			goto use_cpu_bo;
 		}
 
-		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ))
+		flags = MOVE_WRITE | MOVE_READ;
+		if (prefer_gpu & FORCE_GPU)
+			flags |= __MOVE_FORCE;
+		if (!sna_pixmap_move_to_gpu(pixmap, flags))
 			goto use_cpu_bo;
 
 		DBG(("%s: allocated GPU bo for operation\n", __FUNCTION__));
@@ -2362,7 +2380,7 @@ use_cpu_bo:
 	/* Continue to use the shadow pixmap once mapped */
 	if (pixmap->devPrivate.ptr) {
 		/* But only if we do not need to sync the CPU bo */
-		if (!kgem_bo_is_busy(priv->cpu_bo))
+		if (prefer_gpu == 0 && !kgem_bo_is_busy(priv->cpu_bo))
 			return NULL;
 
 		/* Both CPU and GPU are busy, prefer to use the GPU */
@@ -2498,69 +2516,11 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 	}
 
 	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
-	if (priv->gpu_bo == NULL) {
-		struct sna *sna = to_sna_from_pixmap(pixmap);
-		unsigned mode;
-
-		DBG(("%s: forcing creation of  gpu bo (%dx%d@%d, flags=%x)\n",
-		     __FUNCTION__,
-		     pixmap->drawable.width,
-		     pixmap->drawable.height,
-		     pixmap->drawable.bitsPerPixel,
-		     priv->create));
-
-		mode = 0;
-		if (priv->cpu_damage && !priv->cpu_bo)
-			mode |= CREATE_INACTIVE;
-		if (pixmap->usage_hint == SNA_CREATE_FB)
-			mode |= CREATE_EXACT | CREATE_SCANOUT;
-
-		priv->gpu_bo = kgem_create_2d(&sna->kgem,
-					      pixmap->drawable.width,
-					      pixmap->drawable.height,
-					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap,
-								       DEFAULT_TILING),
-					      mode);
-		if (priv->gpu_bo == NULL)
-			return NULL;
-
-		DBG(("%s: created gpu bo\n", __FUNCTION__));
-
-		if (flags & MOVE_WRITE && priv->cpu_damage == NULL) {
-			/* Presume that we will only ever write to the GPU
-			 * bo. Readbacks are expensive but fairly constant
-			 * in cost for all sizes i.e. it is the act of
-			 * synchronisation that takes the most time. This is
-			 * mitigated by avoiding fallbacks in the first place.
-			 */
-			sna_damage_all(&priv->gpu_damage,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height);
-			list_del(&priv->list);
-			priv->undamaged = false;
-			DBG(("%s: marking as all-damaged for GPU\n",
-			     __FUNCTION__));
-		}
-	}
-
 	if (!sna_pixmap_move_to_gpu(pixmap, flags | __MOVE_FORCE))
 		return NULL;
 
 	assert(!priv->cpu);
 
-	/* For large bo, try to keep only a single copy around */
-	if (priv->create & KGEM_CAN_CREATE_LARGE && priv->ptr) {
-		sna_damage_all(&priv->gpu_damage,
-			       pixmap->drawable.width,
-			       pixmap->drawable.height);
-		sna_damage_destroy(&priv->cpu_damage);
-		priv->undamaged = false;
-		list_del(&priv->list);
-		assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
-		sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv);
-	}
-
 	return priv;
 }
 
@@ -2611,7 +2571,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		     pixmap->drawable.bitsPerPixel,
 		     priv->create));
 		assert(!priv->mapped);
-		if (!wedged(sna) && priv->create & KGEM_CAN_CREATE_GPU) {
+		if (flags & __MOVE_FORCE || priv->create & KGEM_CAN_CREATE_GPU) {
 			assert(pixmap->drawable.width > 0);
 			assert(pixmap->drawable.height > 0);
 			assert(pixmap->drawable.bitsPerPixel >= 8);
@@ -2706,6 +2666,13 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	__sna_damage_destroy(DAMAGE_PTR(priv->cpu_damage));
 	priv->cpu_damage = NULL;
 	priv->undamaged = true;
+
+	/* For large bo, try to keep only a single copy around */
+	if (priv->create & KGEM_CAN_CREATE_LARGE)
+		sna_damage_all(&priv->gpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+
 done:
 	list_del(&priv->list);
 
@@ -3251,7 +3218,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	bo = sna_drawable_use_bo(&pixmap->drawable, true,
+	bo = sna_drawable_use_bo(&pixmap->drawable, PREFER_GPU,
 				 &region->extents, &damage);
 	if (bo == NULL)
 		return false;
@@ -3375,7 +3342,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	if (gc->alu != GXcopy)
 		return false;
 
-	bo = sna_drawable_use_bo(&pixmap->drawable, true,
+	bo = sna_drawable_use_bo(&pixmap->drawable, PREFER_GPU,
 				 &region->extents, &damage);
 	if (bo == NULL)
 		return false;
@@ -3612,24 +3579,35 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	int w = box->x2 - box->x1;
 	int h = box->y2 - box->y1;
 
-	if (priv->gpu_bo)
-		return TRUE;
+	if (DAMAGE_IS_ALL(priv->gpu_damage))
+		return true;
 
-	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
-		return FALSE;
+	if (DAMAGE_IS_ALL(priv->cpu_damage))
+		return false;
+
+	if (priv->gpu_bo) {
+		if (alu != GXcopy)
+			return true;
+
+		if (!priv->cpu)
+			return true;
+	} else {
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
+			return false;
+	}
 
 	if (priv->cpu_bo) {
 		if (sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) == I915_TILING_NONE)
-			return FALSE;
+			return false;
+
+		if (priv->cpu)
+			return false;
 
 		return (priv->source_count++-SOURCE_BIAS) * w*h >=
 			(int)pixmap->drawable.width * pixmap->drawable.height;
+	} else {
+		return ++priv->source_count * w*h >= (SOURCE_BIAS+2) * (int)pixmap->drawable.width * pixmap->drawable.height;
 	}
-
-	if (alu != GXcopy)
-		return TRUE;
-
-	return ++priv->source_count * w*h >= (SOURCE_BIAS+2) * (int)pixmap->drawable.width * pixmap->drawable.height;
 }
 
 static void
@@ -3733,49 +3711,21 @@ fallback:
 	}
 }
 
-static bool copy_use_gpu_bo(struct sna *sna,
-			    struct sna_pixmap *priv,
-			    RegionPtr region,
-			    bool write_only)
+static int
+source_prefer_gpu(struct sna_pixmap *priv)
 {
-	if (region_inplace(sna, priv->pixmap, region, priv, write_only)) {
-		DBG(("%s: perform in place, use gpu bo\n", __FUNCTION__));
-		return true;
-	}
-
-	if (!priv->cpu_bo) {
-		DBG(("%s: no cpu bo, copy to shadow\n", __FUNCTION__));
-		return false;
-	}
-
-	if (kgem_bo_is_busy(priv->cpu_bo)) {
-		if (priv->cpu_bo->exec) {
-			DBG(("%s: cpu bo is busy, use gpu bo\n", __FUNCTION__));
-			return true;
-		}
-
-		kgem_retire(&sna->kgem);
-	}
+	unsigned flags;
 
-	DBG(("%s: cpu bo busy? %d\n", __FUNCTION__,
-	     kgem_bo_is_busy(priv->cpu_bo)));
-	return kgem_bo_is_busy(priv->cpu_bo);
-}
+	if (priv == NULL)
+		return 0;
 
-static bool
-copy_use_cpu_bo(struct sna_pixmap *priv, struct kgem_bo *dst_bo)
-{
-	if (priv == NULL || priv->cpu_bo == NULL)
-		return false;
+	if (priv->gpu_damage)
+		return PREFER_GPU | FORCE_GPU;
 
-	if (PREFER_VMAP) {
-		return true;
-	} else {
-		if (kgem_bo_is_busy(priv->cpu_bo) || kgem_bo_is_busy(dst_bo))
-			return true;
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return PREFER_GPU | FORCE_GPU;
 
-		return !priv->cpu_bo->sync;
-	}
+	return PREFER_GPU;
 }
 
 static void
@@ -3790,6 +3740,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	PixmapPtr dst_pixmap = get_drawable_pixmap(dst);
 	struct sna_pixmap *dst_priv = sna_pixmap(dst_pixmap);
 	struct sna *sna = to_sna_from_pixmap(src_pixmap);
+	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	int alu = gc ? gc->alu : GXcopy;
 	int16_t src_dx, src_dy;
 	int16_t dst_dx, dst_dy;
@@ -3827,7 +3779,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	src_dx += dx;
 	src_dy += dy;
 
-	replaces = alu == GXcopy && n == 1 &&
+	replaces = n == 1 &&
 		box->x1 + dst_dx <= 0 &&
 		box->y1 + dst_dy <= 0 &&
 		box->x2 + dst_dx >= dst_pixmap->drawable.width &&
@@ -3843,80 +3795,37 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     src_priv ? src_priv->cpu_bo : NULL,
 	     replaces));
 
-	if (dst_priv == NULL)
-		goto fallback;
-
-	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
-		DBG(("%s: discarding cached upload\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
-		dst_priv->gpu_bo = NULL;
-	}
-
-	if (replaces) {
-		sna_damage_destroy(&dst_priv->gpu_damage);
-		sna_damage_destroy(&dst_priv->cpu_damage);
-		list_del(&dst_priv->list);
-		dst_priv->undamaged = true;
-		dst_priv->clear = false;
-		dst_priv->cpu = false;
-	}
+	RegionTranslate(&region, dst_dx, dst_dy);
+	src_dx -= dst_dx;
+	src_dy -= dst_dy;
 
-	if (src_priv == NULL &&
-	    !copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) {
-		DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
-		     __FUNCTION__));
+	if (dst_priv == NULL)
 		goto fallback;
-	}
 
-	/* Try to maintain the data on the GPU */
-	if (dst_priv->gpu_bo == NULL &&
-	    ((dst_priv->cpu_damage == NULL &&
-	      copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) ||
-	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
-		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap,
-							   DEFAULT_TILING);
-
-		DBG(("%s: create dst GPU bo for upload\n", __FUNCTION__));
-
-		dst_priv->gpu_bo =
-			kgem_create_2d(&sna->kgem,
-				       dst_pixmap->drawable.width,
-				       dst_pixmap->drawable.height,
-				       dst_pixmap->drawable.bitsPerPixel,
-				       tiling, 0);
-	}
-
-	if (dst_priv->gpu_bo) {
-		if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-			BoxRec extents = region.extents;
-			extents.x1 += dst_dx;
-			extents.x2 += dst_dx;
-			extents.y1 += dst_dy;
-			extents.y2 += dst_dy;
-			if (!sna_pixmap_move_area_to_gpu(dst_pixmap, &extents,
-							 MOVE_WRITE | (n == 1 && alu_overwrites(alu) ? 0 : MOVE_READ))) {
-				DBG(("%s: fallback - not a pure copy and failed to move dst to GPU\n",
-				     __FUNCTION__));
-				goto fallback;
-			}
-		} else {
-			dst_priv->clear = false;
-			if (!dst_priv->pinned &&
-			    (dst_priv->create & KGEM_CAN_CREATE_LARGE) == 0)
-				list_move(&dst_priv->inactive,
-					  &sna->active_pixmaps);
+	if (dst_priv->cpu_damage && alu_overwrites(alu)) {
+		DBG(("%s: overwritting CPU damage\n", _FUNCTION__));
+		sna_damage_subtract(&dst_priv->cpu_damage, &region);
+		if (dst_priv->cpu_damage == NULL) {
+			list_del(&dst_priv->list);
+			dst_priv->undamaged = false;
+			dst_priv->cpu = false;
 		}
+	}
 
+	bo = sna_drawable_use_bo(&dst_pixmap->drawable,
+				 source_prefer_gpu(src_priv),
+				 &region.extents, &damage);
+	if (bo) {
 		if (src_priv && src_priv->clear) {
 			DBG(("%s: applying src clear[%08x] to dst\n",
 			     __FUNCTION__, src_priv->clear_color));
-			RegionTranslate(&region, dst_dx, dst_dy);
+			assert_pixmap_contains_box(dst_pixmap,
+						   RegionExtents(&region));
 			box = REGION_RECTS(&region);
 			n = REGION_NUM_RECTS(&region);
 			if (n == 1) {
 				if (!sna->render.fill_one(sna,
-							  dst_pixmap,
-							  dst_priv->gpu_bo,
+							  dst_pixmap, bo,
 							  src_priv->clear_color,
 							  box->x1, box->y1,
 							  box->x2, box->y2,
@@ -3929,7 +3838,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				struct sna_fill_op fill;
 
 				if (!sna_fill_init_blt(&fill, sna,
-						       dst_pixmap, dst_priv->gpu_bo,
+						       dst_pixmap, bo,
 						       alu, src_priv->clear_color)) {
 					DBG(("%s: unsupported fill\n",
 					     __FUNCTION__));
@@ -3940,87 +3849,106 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				fill.done(sna, &fill);
 			}
 
-			dst_priv->cpu = false;
-			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-				if (replaces) {
-					sna_damage_destroy(&dst_priv->cpu_damage);
-					sna_damage_all(&dst_priv->gpu_damage,
-						       dst_pixmap->drawable.width,
-						       dst_pixmap->drawable.height);
-					list_del(&dst_priv->list);
-					dst_priv->undamaged = false;
-				} else {
-					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(&region));
-					sna_damage_add(&dst_priv->gpu_damage, &region);
-					assert_pixmap_damage(dst_pixmap);
-				}
-			}
+			if (damage)
+				sna_damage_add(damage, &region);
 
-			if (replaces) {
-				DBG(("%s: mark dst as clear\n", __FUNCTION__));
-				dst_priv->clear = true;
-				dst_priv->clear_color = src_priv->clear_color;
-			}
-		} else if (src_priv &&
+			goto out;
+		}
+
+		if (src_priv &&
 		    move_to_gpu(src_pixmap, src_priv, &region.extents, alu) &&
 		    sna_pixmap_move_to_gpu(src_pixmap, MOVE_READ)) {
+			DBG(("%s: move whole src_pixmap to GPU and copy\n",
+			     __FUNCTION__));
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				goto fallback;
 			}
 
-			dst_priv->cpu = false;
-			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-				if (replaces) {
-					sna_damage_destroy(&dst_priv->cpu_damage);
-					sna_damage_all(&dst_priv->gpu_damage,
-						       dst_pixmap->drawable.width,
-						       dst_pixmap->drawable.height);
-					list_del(&dst_priv->list);
-					dst_priv->undamaged = false;
-				} else {
-					RegionTranslate(&region, dst_dx, dst_dy);
-					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(&region));
-					sna_damage_add(&dst_priv->gpu_damage, &region);
-					RegionTranslate(&region, -dst_dx, -dst_dy);
-				}
-				assert_pixmap_damage(dst_pixmap);
+			if (damage) {
+				assert_pixmap_contains_box(dst_pixmap,
+							   RegionExtents(&region));
+				sna_damage_add(damage, &region);
 			}
-		} else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) {
+			goto out;
+		}
+
+		if (src_priv &&
+		    region_overlaps_damage(&region, src_priv->gpu_damage,
+					   src_dx, src_dy)) {
+			BoxRec area;
+
+			DBG(("%s: region overlaps GPU damage, upload and copy\n",
+			     __FUNCTION__));
+
+			area = region.extents;
+			area.x1 += src_dx;
+			area.x2 += src_dx;
+			area.y1 += src_dy;
+			area.y2 += src_dy;
+
+			if (!sna_pixmap_move_area_to_gpu(src_pixmap, &area,
+							 MOVE_READ))
+				goto fallback;
+
+			if (!sna->render.copy_boxes(sna, alu,
+						    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
+						    dst_pixmap, bo, 0, 0,
+						    box, n)) {
+				DBG(("%s: fallback - accelerated copy boxes failed\n",
+				     __FUNCTION__));
+				goto fallback;
+			}
+
+			if (damage) {
+				assert_pixmap_contains_box(dst_pixmap,
+							   RegionExtents(&region));
+				sna_damage_add(damage, &region);
+			}
+			goto out;
+		}
+
+		if (bo != dst_priv->gpu_bo)
+			goto fallback;
+
+		if (src_priv && src_priv->cpu_bo) {
+			bool ret;
+
+			DBG(("%s: region overlaps CPU damage, copy from CPU bo\n",
+			     __FUNCTION__));
+
+			assert(bo != dst_priv->cpu_bo);
+
+			RegionTranslate(&region, src_dx, src_dy);
+			ret = sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
+							      &region,
+							      MOVE_READ | MOVE_ASYNC_HINT);
+			RegionTranslate(&region, -src_dx, -src_dy);
+			if (!ret)
+				goto fallback;
+
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
-						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
 				goto fallback;
 			}
 
-			dst_priv->cpu = false;
-			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-				if (replaces) {
-					sna_damage_destroy(&dst_priv->cpu_damage);
-					sna_damage_all(&dst_priv->gpu_damage,
-						       dst_pixmap->drawable.width,
-						       dst_pixmap->drawable.height);
-					list_del(&dst_priv->list);
-					dst_priv->undamaged = false;
-				} else {
-					RegionTranslate(&region, dst_dx, dst_dy);
-					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(&region));
-					sna_damage_add(&dst_priv->gpu_damage, &region);
-					RegionTranslate(&region, -dst_dx, -dst_dy);
-				}
-				assert_pixmap_damage(dst_pixmap);
+			if (damage) {
+				assert_pixmap_contains_box(dst_pixmap,
+							   RegionExtents(&region));
+				sna_damage_add(damage, &region);
 			}
-		} else if (alu != GXcopy) {
+			goto out;
+		}
+
+		if (alu != GXcopy) {
 			PixmapPtr tmp;
 			int i;
 
@@ -4030,23 +3958,25 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			     __FUNCTION__, alu));
 
 			tmp = sna_pixmap_create_upload(src->pScreen,
-						       src->width,
-						       src->height,
+						       region.extents.x2 - region.extents.x1,
+						       region.extents.y2 - region.extents.y1,
 						       src->depth,
 						       KGEM_BUFFER_WRITE_INPLACE);
 			if (tmp == NullPixmap)
 				return;
 
-			for (i = 0; i < n; i++) {
-				assert(box->x1 + src_dx >= 0);
-				assert(box->y1 + src_dy >= 0);
-				assert(box->x2 + src_dx <= src_pixmap->drawable.width);
-				assert(box->y2 + src_dy <= src_pixmap->drawable.height);
+			dx = -region.extents.x1;
+			dy = -region.extents.y1;
+			for (i = 0; i < n; n++) {
+				assert(box[i].x1 + src_dx >= 0);
+				assert(box[i].y1 + src_dy >= 0);
+				assert(box[i].x2 + src_dx <= src_pixmap->drawable.width);
+				assert(box[i].y2 + src_dy <= src_pixmap->drawable.height);
 
-				assert(box->x1 + dx >= 0);
-				assert(box->y1 + dy >= 0);
-				assert(box->x2 + dx <= tmp->drawable.width);
-				assert(box->y2 + dy <= tmp->drawable.height);
+				assert(box[i].x1 + dx >= 0);
+				assert(box[i].y1 + dy >= 0);
+				assert(box[i].x2 + dx <= tmp->drawable.width);
+				assert(box[i].y2 + dy <= tmp->drawable.height);
 
 				memcpy_blt(src_pixmap->devPrivate.ptr,
 					   tmp->devPrivate.ptr,
@@ -4063,7 +3993,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 			if (!sna->render.copy_boxes(sna, alu,
 						    tmp, sna_pixmap_get_bo(tmp), dx, dy,
-						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
+						    dst_pixmap, bo, 0, 0,
 						    box, n)) {
 				DBG(("%s: fallback - accelerated copy boxes failed\n",
 				     __FUNCTION__));
@@ -4072,20 +4002,22 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 			tmp->drawable.pScreen->DestroyPixmap(tmp);
 
-			dst_priv->cpu = false;
-			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-				RegionTranslate(&region, dst_dx, dst_dy);
+			if (damage) {
 				assert_pixmap_contains_box(dst_pixmap,
 							   RegionExtents(&region));
-				sna_damage_add(&dst_priv->gpu_damage, &region);
-				RegionTranslate(&region, -dst_dx, -dst_dy);
+				sna_damage_add(damage, &region);
 			}
-			assert_pixmap_damage(dst_pixmap);
+			goto out;
 		} else {
+			DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
+			     __FUNCTION__));
+
 			if (src_priv) {
-				/* Fixup the shadow pointer as neccessary */
-				assert(!src_priv->gpu_bo);
-				assert(!src_priv->mapped);
+				/* Fixup the shadow pointer as necessary */
+				if (src_priv->mapped) {
+					src_pixmap->devPrivate.ptr = NULL;
+					src_priv->mapped = false;
+				}
 				if (src_pixmap->devPrivate.ptr == NULL) {
 					if (!src_priv->ptr) /* uninitialised!*/
 						goto out;
@@ -4099,26 +4031,12 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				stride = src_pixmap->devKind;
 				bits = src_pixmap->devPrivate.ptr;
 				bits += (src_dy + box->y1) * stride + (src_dx + box->x1) * bpp / 8;
-				assert(src_dy + box->y1 + dst_pixmap->drawable.height <= src_pixmap->drawable.height);
-				assert(src_dx + box->x1 + dst_pixmap->drawable.width <= src_pixmap->drawable.width);
 
 				if (!sna_replace(sna, dst_pixmap,
 						 &dst_priv->gpu_bo,
 						 bits, stride))
 					goto fallback;
-
-				dst_priv->cpu = false;
-				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-					sna_damage_destroy(&dst_priv->cpu_damage);
-					sna_damage_all(&dst_priv->gpu_damage,
-						       dst_pixmap->drawable.width,
-						       dst_pixmap->drawable.height);
-					list_del(&dst_priv->list);
-					dst_priv->undamaged = false;
-				}
 			} else {
-				DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
-				     __FUNCTION__));
 				assert(!DAMAGE_IS_ALL(dst_priv->cpu_damage));
 				if (!sna_write_boxes(sna, dst_pixmap,
 						     dst_priv->gpu_bo, dst_dx, dst_dy,
@@ -4127,60 +4045,28 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						     src_dx, src_dy,
 						     box, n))
 					goto fallback;
+			}
 
-				dst_priv->cpu = false;
-				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-					if (replaces) {
-						sna_damage_destroy(&dst_priv->cpu_damage);
-						sna_damage_all(&dst_priv->gpu_damage,
-							       dst_pixmap->drawable.width,
-							       dst_pixmap->drawable.height);
-						list_del(&dst_priv->list);
-						dst_priv->undamaged = false;
-					} else {
-						RegionTranslate(&region, dst_dx, dst_dy);
-						assert_pixmap_contains_box(dst_pixmap,
-									   RegionExtents(&region));
-						sna_damage_add(&dst_priv->gpu_damage,
-							       &region);
-						RegionTranslate(&region, -dst_dx, -dst_dy);
-					}
-					assert_pixmap_damage(dst_pixmap);
+			dst_priv->cpu = false;
+			if (damage) {
+				if (replaces) {
+					sna_damage_destroy(&dst_priv->cpu_damage);
+					sna_damage_all(&dst_priv->gpu_damage,
+						       dst_pixmap->drawable.width,
+						       dst_pixmap->drawable.height);
+					list_del(&dst_priv->list);
+					dst_priv->undamaged = false;
+				} else {
+					assert_pixmap_contains_box(dst_pixmap,
+								   RegionExtents(&region));
+					sna_damage_add(&dst_priv->gpu_damage,
+						       &region);
 				}
+				assert_pixmap_damage(dst_pixmap);
 			}
 		}
 
 		goto out;
-	} else if (use_cpu_bo_for_write(sna, dst_priv) &&
-		   src_priv && DAMAGE_IS_ALL(src_priv->gpu_damage) && !src_priv->clear) {
-		assert(src_priv->gpu_bo != NULL); /* guaranteed by gpu_damage */
-		if (!sna->render.copy_boxes(sna, alu,
-					    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
-					    dst_pixmap, dst_priv->cpu_bo, dst_dx, dst_dy,
-					    box, n)) {
-			DBG(("%s: fallback - accelerated copy boxes failed\n",
-			     __FUNCTION__));
-			goto fallback;
-		}
-
-		dst_priv->cpu = true;
-		if (replaces) {
-			sna_damage_all(&dst_priv->cpu_damage,
-				       dst_pixmap->drawable.width,
-				       dst_pixmap->drawable.height);
-			dst_priv->undamaged = false;
-		} else {
-			RegionTranslate(&region, dst_dx, dst_dy);
-			assert_pixmap_contains_box(dst_pixmap,
-						   RegionExtents(&region));
-			sna_damage_add(&dst_priv->cpu_damage, &region);
-			RegionTranslate(&region, -dst_dx, -dst_dy);
-		}
-		assert_pixmap_damage(dst_pixmap);
-		if (dst_priv->flush)
-			list_move(&dst_priv->list, &sna->dirty_pixmaps);
-
-		goto out;
 	}
 
 fallback:
@@ -4188,7 +4074,6 @@ fallback:
 		DBG(("%s: copying clear [%08x]\n",
 		     __FUNCTION__, src_priv->clear_color));
 
-		RegionTranslate(&region, dst_dx, dst_dy);
 		box = REGION_RECTS(&region);
 		n = REGION_NUM_RECTS(&region);
 
@@ -4237,7 +4122,6 @@ fallback:
 			RegionTranslate(&region, -src_dx, -src_dy);
 		}
 
-		RegionTranslate(&region, dst_dx, dst_dy);
 		if (dst_priv) {
 			unsigned mode;
 
@@ -4256,6 +4140,8 @@ fallback:
 		dst_stride = dst_pixmap->devKind;
 		src_stride = src_pixmap->devKind;
 
+		src_dx += dst_dx;
+		src_dy += dst_dy;
 		if (alu == GXcopy && bpp >= 8) {
 			dst_bits = (FbBits *)
 				((char *)dst_pixmap->devPrivate.ptr +
@@ -5355,7 +5241,8 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	bo = sna_drawable_use_bo(drawable, true, &region.extents, &damage);
+	bo = sna_drawable_use_bo(drawable, PREFER_GPU,
+				 &region.extents, &damage);
 	if (bo) {
 		if (gc_is_solid(gc, &color)) {
 			DBG(("%s: trying solid fill [alu=%d, pixel=%08lx] blt paths\n",
@@ -5907,7 +5794,8 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (!PM_IS_SOLID(dst, gc->planemask))
 		goto fallback;
 
-	arg.bo = sna_drawable_use_bo(dst, true, &region.extents, &arg.damage);
+	arg.bo = sna_drawable_use_bo(dst, PREFER_GPU,
+				     &region.extents, &arg.damage);
 	if (arg.bo) {
 		if (arg.bo->tiling == I915_TILING_Y) {
 			assert(arg.bo == sna_pixmap_get_bo(pixmap));
@@ -6128,7 +6016,8 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 		DBG(("%s: trying solid fill [%08lx] blt paths\n",
 		     __FUNCTION__, gc->fgPixel));
 
-		if ((bo = sna_drawable_use_bo(drawable, false, &region.extents, &damage)) &&
+		if ((bo = sna_drawable_use_bo(drawable, 0,
+					      &region.extents, &damage)) &&
 		    sna_poly_point_blt(drawable, bo, damage,
 				       gc, mode, n, pt, flags & 2))
 			return;
@@ -6825,7 +6714,7 @@ sna_poly_line_extents(DrawablePtr drawable, GCPtr gc,
  * Currently it looks to be faster to use the GPU for zero spans on all
  * platforms.
  */
-inline static bool
+inline static int
 _use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
 	if (USE_ZERO_SPANS)
@@ -6834,7 +6723,7 @@ _use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 	return !drawable_gc_inplace_hint(drawable, gc);
 }
 
-static bool
+static int
 use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
 	bool ret = _use_zero_spans(drawable, gc, extents);
@@ -6849,7 +6738,7 @@ use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
  * platforms, slow MI code. But that does not take into account the true
  * cost of readback?
  */
-inline static bool
+inline static int
 _use_wide_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
 	if (USE_WIDE_SPANS)
@@ -6858,10 +6747,10 @@ _use_wide_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 	return !drawable_gc_inplace_hint(drawable, gc);
 }
 
-static bool
+static int
 use_wide_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
-	bool ret = _use_wide_spans(drawable, gc, extents);
+	int ret = _use_wide_spans(drawable, gc, extents);
 	DBG(("%s? %d\n", __FUNCTION__, ret));
 	return ret;
 }
@@ -6936,7 +6825,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		     __FUNCTION__, (unsigned)color));
 
 		if (data.flags & 4) {
-			data.bo = sna_drawable_use_bo(drawable, true,
+			data.bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 						      &data.region.extents,
 						      &data.damage);
 			if (data.bo &&
@@ -6961,7 +6850,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		}
 	} else if (data.flags & 4) {
 		/* Try converting these to a set of rectangles instead */
-		data.bo = sna_drawable_use_bo(drawable, true,
+		data.bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 					      &data.region.extents, &data.damage);
 		if (data.bo) {
 			DDXPointRec p1, p2;
@@ -7839,7 +7728,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		     __FUNCTION__, (unsigned)color, data.flags));
 
 		if (data.flags & 4) {
-			if ((data.bo = sna_drawable_use_bo(drawable, true,
+			if ((data.bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 							   &data.region.extents,
 							   &data.damage)) &&
 			     sna_poly_segment_blt(drawable,
@@ -7865,7 +7754,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		xRectangle *rect;
 		int i;
 
-		data.bo = sna_drawable_use_bo(drawable, true,
+		data.bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 					      &data.region.extents,
 					      &data.damage);
 		if (data.bo == NULL)
@@ -8601,7 +8490,7 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	if (gc->lineStyle == LineSolid && gc->joinStyle == JoinMiter) {
 		DBG(("%s: trying blt solid fill [%08lx] paths\n",
 		     __FUNCTION__, gc->fgPixel));
-		if ((bo = sna_drawable_use_bo(drawable, true,
+		if ((bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 					      &region.extents, &damage)) &&
 		    sna_poly_rectangle_blt(drawable, bo, damage,
 					   gc, n, r, &region.extents, flags&2))
@@ -8610,7 +8499,7 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 		/* Not a trivial outline, but we still maybe able to break it
 		 * down into simpler operations that we can accelerate.
 		 */
-		if (sna_drawable_use_bo(drawable, true,
+		if (sna_drawable_use_bo(drawable, PREFER_GPU,
 					&region.extents, &damage)) {
 			miPolyRectangle(drawable, gc, n, r);
 			return;
@@ -10531,7 +10420,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		}
 	}
 
-	bo = sna_drawable_use_bo(draw, true, &region.extents, &damage);
+	bo = sna_drawable_use_bo(draw, PREFER_GPU, &region.extents, &damage);
 	if (bo == NULL)
 		goto fallback;
 
@@ -10688,7 +10577,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 	if (!PM_IS_SOLID(draw, gc->planemask))
 		goto fallback;
 
-	if ((data.bo = sna_drawable_use_bo(draw, true,
+	if ((data.bo = sna_drawable_use_bo(draw, PREFER_GPU,
 					   &data.region.extents,
 					   &data.damage))) {
 		uint32_t color;
@@ -10856,7 +10745,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		return false;
 	}
 
-	bo = sna_drawable_use_bo(drawable, true, &clip->extents, &damage);
+	bo = sna_drawable_use_bo(drawable, PREFER_GPU, &clip->extents, &damage);
 	if (bo == NULL)
 		return false;
 
@@ -11792,7 +11681,7 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if (sna_font_too_large(gc->font))
 		goto fallback;
 
-	if ((bo = sna_drawable_use_bo(drawable, true,
+	if ((bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
 				   bo, damage, &region,
@@ -11872,7 +11761,7 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	if (sna_font_too_large(gc->font))
 		goto fallback;
 
-	if ((bo = sna_drawable_use_bo(drawable, true,
+	if ((bo = sna_drawable_use_bo(drawable, PREFER_GPU,
 				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
 				   bo, damage, &region, fg, -1, true))
@@ -11910,7 +11799,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	bo = sna_drawable_use_bo(drawable, true, &region->extents, &damage);
+	bo = sna_drawable_use_bo(drawable, PREFER_GPU, &region->extents, &damage);
 	if (bo == NULL)
 		return false;
 
commit ce27a81ac508368d54f1237893a9b1214cf3e3d0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 17:52:24 2012 +0100

    sna: Clear cpu flag after deciding to use gpu bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c4dbe12..6550d25 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2341,14 +2341,15 @@ done:
 	return priv->gpu_bo;
 
 use_gpu_bo:
+	DBG(("%s: using whole GPU bo\n", __FUNCTION__));
 	assert(priv->gpu_bo != NULL);
+	assert(priv->gpu_bo->proxy == NULL);
 	priv->clear = false;
+	priv->cpu = false;
 	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive,
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
-	DBG(("%s: using whole GPU bo\n", __FUNCTION__));
-	assert(priv->gpu_bo->proxy == NULL);
 	return priv->gpu_bo;
 
 use_cpu_bo:
commit 2f1b7e8a23ac3086dda0025ecf09dd1feac94837
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 17:53:01 2012 +0100

    sna: Check for non-existent /sys/class/backlight directory
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 649b66f..43cf15a 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -326,6 +326,9 @@ sna_output_backlight_init(xf86OutputPtr output)
 	best_type = INT_MAX;
 
 	dir = opendir(BACKLIGHT_CLASS);
+	if (dir == NULL)
+		return;
+
 	while ((de = readdir(dir))) {
 		char path[1024];
 		char buf[100];
commit d12d50d107d403c3cf4dfe24bb63ce1006d0e025
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 14:09:21 2012 +0100

    sna: Skip hidden and special entries inside /sys/class/backlight
    
    Just to avoid the warnings after fallback to comparing the d_name to
    the list of known interfaces.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0d9e474..649b66f 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -331,6 +331,9 @@ sna_output_backlight_init(xf86OutputPtr output)
 		char buf[100];
 		int fd, v;
 
+		if (*de->d_name == '.')
+			continue;
+
 		snprintf(path, sizeof(path), "%s/%s/type",
 			 BACKLIGHT_CLASS, de->d_name);
 
commit e80f9c4670a0e84521907b1baa059322784b1558
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jul 2 10:21:42 2012 +0100

    sna: Prefer backlight iface based on /sys/class/backlight/*/type
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 07ff607..0d9e474 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -34,8 +34,10 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <dirent.h>
 #include <errno.h>
 #include <poll.h>
+#include <ctype.h>
 
 #include <xorgVersion.h>
 #include <X11/Xatom.h>
@@ -91,7 +93,7 @@ struct sna_output {
 	int panel_vdisplay;
 
 	int dpms_mode;
-	const char *backlight_iface;
+	char *backlight_iface;
 	int backlight_active_level;
 	int backlight_max;
 	struct list link;
@@ -104,28 +106,6 @@ static inline struct sna_crtc *to_sna_crtc(xf86CrtcPtr crtc)
 
 #define BACKLIGHT_CLASS "/sys/class/backlight"
 
-/*
- * List of available kernel interfaces in priority order
- */
-static const char *backlight_interfaces[] = {
-	"intel", /* prefer our own native backlight driver */
-	"asus-laptop",
-	"asus-nb-wmi",
-	"eeepc",
-	"thinkpad_screen",
-	"mbp_backlight",
-	"fujitsu-laptop",
-	"sony",
-	"samsung",
-	"acpi_video1", /* finally fallback to the generic acpi drivers */
-	"acpi_video0",
-	NULL,
-};
-/*
- * Must be long enough for BACKLIGHT_CLASS + '/' + longest in above table +
- * '/' + "max_backlight"
- */
-#define BACKLIGHT_PATH_LEN 80
 /* Enough for 10 digits of backlight + '\n' + '\0' */
 #define BACKLIGHT_VALUE_LEN 12
 
@@ -219,14 +199,14 @@ static void
 sna_output_backlight_set(xf86OutputPtr output, int level)
 {
 	struct sna_output *sna_output = output->driver_private;
-	char path[BACKLIGHT_PATH_LEN], val[BACKLIGHT_VALUE_LEN];
+	char path[1024], val[BACKLIGHT_VALUE_LEN];
 	int fd, len, ret;
 
 	DBG(("%s: level=%d\n", __FUNCTION__, level));
 
 	if (level > sna_output->backlight_max)
 		level = sna_output->backlight_max;
-	if (! sna_output->backlight_iface || level < 0)
+	if (!sna_output->backlight_iface || level < 0)
 		return;
 
 	len = snprintf(val, BACKLIGHT_VALUE_LEN, "%d\n", level);
@@ -252,7 +232,7 @@ static int
 sna_output_backlight_get(xf86OutputPtr output)
 {
 	struct sna_output *sna_output = output->driver_private;
-	char path[BACKLIGHT_PATH_LEN], val[BACKLIGHT_VALUE_LEN];
+	char path[1024], val[BACKLIGHT_VALUE_LEN];
 	int fd, level;
 
 	sprintf(path, "%s/%s/actual_brightness",
@@ -287,7 +267,7 @@ static int
 sna_output_backlight_get_max(xf86OutputPtr output)
 {
 	struct sna_output *sna_output = output->driver_private;
-	char path[BACKLIGHT_PATH_LEN], val[BACKLIGHT_VALUE_LEN];
+	char path[1024], val[BACKLIGHT_VALUE_LEN];
 	int fd, max = 0;
 
 	sprintf(path, "%s/%s/max_brightness",
@@ -313,29 +293,117 @@ sna_output_backlight_get_max(xf86OutputPtr output)
 	return max;
 }
 
+enum {
+	FIRMWARE,
+	PLATFORM,
+	RAW,
+	NAMED,
+};
+
 static void
 sna_output_backlight_init(xf86OutputPtr output)
 {
+	static const char *known_interfaces[] = {
+		"asus-laptop",
+		"asus-nb-wmi",
+		"eeepc",
+		"thinkpad_screen",
+		"mbp_backlight",
+		"fujitsu-laptop",
+		"sony",
+		"samsung",
+		"acpi_video1",
+		"acpi_video0",
+		"intel_backlight",
+	};
 	struct sna_output *sna_output = output->driver_private;
-	int i;
+	char *best_iface;
+	int best_type;
+	DIR *dir;
+	struct dirent *de;
+
+	best_iface = NULL;
+	best_type = INT_MAX;
+
+	dir = opendir(BACKLIGHT_CLASS);
+	while ((de = readdir(dir))) {
+		char path[1024];
+		char buf[100];
+		int fd, v;
+
+		snprintf(path, sizeof(path), "%s/%s/type",
+			 BACKLIGHT_CLASS, de->d_name);
+
+		v = -1;
+		fd = open(path, O_RDONLY);
+		if (fd >= 0) {
+			v = read(fd, buf, sizeof(buf)-1);
+			close(fd);
+		}
+		if (v > 0) {
+			while (v > 0 && isspace(buf[v-1]))
+				v--;
+			buf[v] = '\0';
+
+			if (strcmp(buf, "raw") == 0)
+				v = RAW;
+			else if (strcmp(buf, "platform") == 0)
+				v = PLATFORM;
+			else if (strcmp(buf, "firmware") == 0)
+				v = FIRMWARE;
+			else
+				v = NAMED;
+		} else
+			v = NAMED;
+
+		/* Fallback to priority list of known iface for old kernels */
+		if (v == NAMED) {
+			int i;
+			for (i = 0; i < ARRAY_SIZE(known_interfaces); i++) {
+				if (strcmp(de->d_name, known_interfaces[i]) == 0)
+					break;
+			}
+			v += i;
+		}
+
+		if (v < best_type) {
+			char *copy;
+			int max;
+
+			/* XXX detect right backlight for multi-GPU/panels */
+
+			sna_output->backlight_iface = de->d_name;
+			max = sna_output_backlight_get_max(output);
+			if (max <= 0)
+				continue;
 
-	for (i = 0; backlight_interfaces[i] != NULL; i++) {
-		char path[BACKLIGHT_PATH_LEN];
-		struct stat buf;
-
-		sprintf(path, "%s/%s", BACKLIGHT_CLASS, backlight_interfaces[i]);
-		if (!stat(path, &buf)) {
-			sna_output->backlight_iface = backlight_interfaces[i];
-			sna_output->backlight_max = sna_output_backlight_get_max(output);
-			if (sna_output->backlight_max > 0) {
-				sna_output->backlight_active_level = sna_output_backlight_get(output);
-				xf86DrvMsg(output->scrn->scrnIndex, X_INFO,
-					   "found backlight control interface %s\n", path);
-				return;
+			copy = strdup(de->d_name);
+			if (copy) {
+				free(best_iface);
+				best_iface = copy;
+				best_type = v;
 			}
 		}
 	}
+
 	sna_output->backlight_iface = NULL;
+
+	if (best_iface) {
+		const char *str;
+
+		sna_output->backlight_iface = best_iface;
+		sna_output->backlight_max = sna_output_backlight_get_max(output);
+		sna_output->backlight_active_level = sna_output_backlight_get(output);
+		switch (best_type) {
+		case FIRMWARE: str = "firmware"; break;
+		case PLATFORM: str = "platform"; break;
+		case RAW: str = "raw"; break;
+		default: str = "unknown"; break;
+		}
+		xf86DrvMsg(output->scrn->scrnIndex, X_INFO,
+			   "found backlight control interface %s (type '%s')\n",
+			   best_iface, str);
+	}
 }
 
 
@@ -1515,6 +1583,8 @@ sna_output_destroy(xf86OutputPtr output)
 	drmModeFreeConnector(sna_output->mode_output);
 	sna_output->mode_output = NULL;
 
+	free(sna_output->backlight_iface);
+
 	list_del(&sna_output->link);
 	free(sna_output);
 
commit 61e16dc5673a1ac96b2ecee072cc3e80971be5d9
Author: Tom Hughes <tom at compton.nu>
Date:   Mon Jul 2 10:23:56 2012 +0100

    Add asus-nb-wmi backlight control
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51593

diff --git a/src/intel_display.c b/src/intel_display.c
index 949a822..2e2a9b1 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -128,6 +128,7 @@ intel_output_dpms_backlight(xf86OutputPtr output, int oldmode, int mode);
  */
 static const char *backlight_interfaces[] = {
 	"asus-laptop",
+	"asus-nb-wmi",
 	"eeepc",
 	"thinkpad_screen",
 	"mbp_backlight",
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f1e0bed..07ff607 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -110,6 +110,7 @@ static inline struct sna_crtc *to_sna_crtc(xf86CrtcPtr crtc)
 static const char *backlight_interfaces[] = {
 	"intel", /* prefer our own native backlight driver */
 	"asus-laptop",
+	"asus-nb-wmi",
 	"eeepc",
 	"thinkpad_screen",
 	"mbp_backlight",
commit 543816011d05f0cf40b05ec58f87a5954397a224
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 18:36:55 2012 +0100

    sna/gen7: Correct two minor misues of equivalent GEN6 constants
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 6cf75eb..d303286 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3523,7 +3523,7 @@ fallback_blt:
 	tmp.has_component_alpha = 0;
 	tmp.need_magic_ca_pass = 0;
 
-	tmp.u.gen7.wm_kernel = GEN6_WM_KERNEL_NOMASK;
+	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 65ca359..1cc3af1 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -441,7 +441,7 @@ struct gen7_render_state {
 	uint32_t sf_state;
 	uint32_t sf_mask_state;
 	uint32_t wm_state;
-	uint32_t wm_kernel[GEN6_KERNEL_COUNT];
+	uint32_t wm_kernel[GEN7_KERNEL_COUNT];
 
 	uint32_t cc_vp;
 	uint32_t cc_blend;
commit 839fea7f779ca63c32817ee4ba695dfce3344980
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 18:23:38 2012 +0100

    sna/gen6: Boost VS entries to maximum
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index ecc8dfb..81fad35 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -416,7 +416,7 @@ gen6_emit_urb(struct sna *sna)
 {
 	OUT_BATCH(GEN6_3DSTATE_URB | (3 - 2));
 	OUT_BATCH(((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) |
-		  (24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
+		  (256 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */
 	OUT_BATCH((0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) |
 		  (0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */
 }
commit fbd114507d9bf2e2b1d1e52c5e42dc6cdbd8c9a0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 14:03:31 2012 +0100

    sna/dri: Assert that the replacement bo is large enough for the pixmap
    
    Just another paranoid sanity check.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index f01de17..93e24b8 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1147,6 +1147,9 @@ sna_dri_exchange_buffers(DrawablePtr draw,
 	back_bo = get_private(back)->bo;
 	front_bo = get_private(front)->bo;
 
+	assert(pixmap->drawable.height * back_bo->pitch <= kgem_bo_size(back_bo));
+	assert(pixmap->drawable.height * front_bo->pitch <= kgem_bo_size(front_bo));
+
 	DBG(("%s: exchange front=%d/%d and back=%d/%d\n",
 	     __FUNCTION__,
 	     front_bo->handle, front->name,
@@ -1335,6 +1338,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	name = info->back->name;
 	bo = get_private(info->back)->bo;
+	assert(get_drawable_pixmap(draw)->drawable.height * bo->pitch <= kgem_bo_size(bo));
 
 	info->count = sna_page_flip(sna, bo, info, info->pipe, &info->old_fb);
 	if (info->count == 0)
commit 675cbd5fade91fd6a6bf533a31b0a211237af6e8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 12:39:13 2012 +0100

    sna/trapezoids: Skip the division when converting coverage to floating point
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 7bbe270..36defa1 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2770,6 +2770,7 @@ composite_unaligned_trap_row(struct sna *sna,
 	BoxRec box;
 	int opacity;
 	int x1, x2;
+#define u8_to_float(x) ((x) * (1.f/255))
 
 	if (covered == 0)
 		return;
@@ -2800,7 +2801,7 @@ composite_unaligned_trap_row(struct sna *sna,
 
 		if (opacity)
 			composite_unaligned_box(sna, tmp, &box,
-						opacity/255., clip);
+						u8_to_float(opacity), clip);
 	} else {
 		if (pixman_fixed_frac(trap->left.p1.x)) {
 			box.x1 = x1;
@@ -2811,7 +2812,7 @@ composite_unaligned_trap_row(struct sna *sna,
 
 			if (opacity)
 				composite_unaligned_box(sna, tmp, &box,
-							opacity/255., clip);
+							u8_to_float(opacity), clip);
 		}
 
 		if (x2 > x1) {
@@ -2819,7 +2820,8 @@ composite_unaligned_trap_row(struct sna *sna,
 			box.x2 = x2;
 
 			composite_unaligned_box(sna, tmp, &box,
-						covered*SAMPLES_X/255., clip);
+						covered == SAMPLES_Y ? 1. : u8_to_float(covered*SAMPLES_X),
+						clip);
 		}
 
 		if (pixman_fixed_frac(trap->right.p1.x)) {
@@ -2831,7 +2833,7 @@ composite_unaligned_trap_row(struct sna *sna,
 
 			if (opacity)
 				composite_unaligned_box(sna, tmp, &box,
-							opacity/255., clip);
+							u8_to_float(opacity), clip);
 		}
 	}
 }
commit 182c3637cc5d3a6ce52127087aa2f19ca2b42719
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 13:23:44 2012 +0100

    sna: If the pixmap is marked for flushing, prefer to use the GPU
    
    Again, to avoid the forced ping-pong as we upload the damage after
    nearly every operation, simply prefer to use the GPU in such cases.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cb507c1..c4dbe12 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2219,6 +2219,8 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 	}
 
+	if (priv->flush)
+		prefer_gpu = true;
 	if (priv->cpu)
 		prefer_gpu = false;
 
commit 35b1ac138002c206a6d6b866d49a0d73705dd3ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jul 1 10:53:07 2012 +0100

    sna: After an operation on the CPU, prefer not to use the GPU
    
    A nasty habit of applications is to fill an area, only to read it back
    with GetImage, render locally and replace with PutImage. This causes a
    readback of an active bo everytime, so let's try to mitigate that by
    preferring not to use the GPU after a forced readback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 66ee6c0..9d36543 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -140,6 +140,7 @@ struct sna_pixmap {
 	uint8_t undamaged :1;
 	uint8_t create :3;
 	uint8_t header :1;
+	uint8_t cpu :1;
 };
 
 struct sna_glyph {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d280f84..cb507c1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1108,6 +1108,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				       pixmap->drawable.height);
 			sna_damage_destroy(&priv->cpu_damage);
 			priv->undamaged = false;
+			priv->cpu = false;
 			list_del(&priv->list);
 			if (priv->cpu_bo) {
 				assert(!priv->cpu_bo->sync);
@@ -1134,6 +1135,7 @@ skip_inplace_map:
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
+				priv->cpu = false;
 				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
 			}
@@ -1167,6 +1169,7 @@ skip_inplace_map:
 				list_del(&priv->list);
 				priv->undamaged = false;
 				priv->clear = false;
+				priv->cpu = false;
 			}
 
 			assert_pixmap_damage(pixmap);
@@ -1266,6 +1269,7 @@ done:
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
+	priv->cpu = true;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -1369,6 +1373,11 @@ static inline bool region_inplace(struct sna *sna,
 	if (wedged(sna))
 		return false;
 
+	if (priv->cpu) {
+		DBG(("%s: no, preferring last action of CPU\n", __FUNCTION__));
+		return false;
+	}
+
 	if (!write_only && priv->cpu_damage &&
 	    region_overlaps_damage(region, priv->cpu_damage)) {
 		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
@@ -1504,6 +1513,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 						       region);
 
 				priv->clear = false;
+				priv->cpu = false;
 				assert_pixmap_damage(pixmap);
 				if (dx | dy)
 					RegionTranslate(region, -dx, -dy);
@@ -1553,6 +1563,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 			assert_pixmap_damage(pixmap);
 			priv->clear = false;
+			priv->cpu = false;
 			if (dx | dy)
 				RegionTranslate(region, -dx, -dy);
 			return true;
@@ -1587,6 +1598,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 			assert_pixmap_damage(pixmap);
 			priv->clear = false;
+			priv->cpu = false;
 			if (dx | dy)
 				RegionTranslate(region, -dx, -dy);
 			return true;
@@ -1871,10 +1883,11 @@ out:
 		priv->source_count = SOURCE_BIAS;
 		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
 	}
-	if (priv->cpu_bo) {
+	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
+	priv->cpu = true;
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
 	assert_pixmap_damage(pixmap);
@@ -2206,6 +2219,9 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 	}
 
+	if (priv->cpu)
+		prefer_gpu = false;
+
 	if (!prefer_gpu && priv->gpu_bo && !kgem_bo_is_busy(priv->gpu_bo))
 		goto use_cpu_bo;
 
@@ -2457,6 +2473,7 @@ sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
+	priv->cpu = false;
 	return priv;
 }
 
@@ -2527,6 +2544,8 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 	if (!sna_pixmap_move_to_gpu(pixmap, flags | __MOVE_FORCE))
 		return NULL;
 
+	assert(!priv->cpu);
+
 	/* For large bo, try to keep only a single copy around */
 	if (priv->create & KGEM_CAN_CREATE_LARGE && priv->ptr) {
 		sna_damage_all(&priv->gpu_damage,
@@ -2986,6 +3005,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	if (upload_inplace(sna, pixmap, priv, region) &&
 	    sna_put_image_upload_blt(drawable, gc, region,
 				     x, y, w, h, bits, stride)) {
+		assert(priv->cpu == false);
 		if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 			DBG(("%s: marking damage\n", __FUNCTION__));
 			if (region_subsumes_drawable(region, &pixmap->drawable))
@@ -3055,6 +3075,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 					assert_pixmap_damage(pixmap);
 					priv->clear = false;
+					priv->cpu = false;
 					return true;
 				}
 			} else {
@@ -3124,6 +3145,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		sna_pixmap_free_gpu(sna, priv);
 		priv->undamaged = false;
 		priv->clear = false;
+		priv->cpu = false;
 	}
 
 	if (!DAMAGE_IS_ALL(priv->cpu_damage)) {
@@ -3150,6 +3172,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
 	}
+	priv->cpu = true;
 
 blt:
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -3832,6 +3855,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		list_del(&dst_priv->list);
 		dst_priv->undamaged = true;
 		dst_priv->clear = false;
+		dst_priv->cpu = false;
 	}
 
 	if (src_priv == NULL &&
@@ -3913,6 +3937,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				fill.done(sna, &fill);
 			}
 
+			dst_priv->cpu = false;
 			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 				if (replaces) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
@@ -3946,6 +3971,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
+			dst_priv->cpu = false;
 			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 				if (replaces) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
@@ -3973,6 +3999,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				goto fallback;
 			}
 
+			dst_priv->cpu = false;
 			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 				if (replaces) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
@@ -4042,6 +4069,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 			tmp->drawable.pScreen->DestroyPixmap(tmp);
 
+			dst_priv->cpu = false;
 			if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 				RegionTranslate(&region, dst_dx, dst_dy);
 				assert_pixmap_contains_box(dst_pixmap,
@@ -4076,6 +4104,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						 bits, stride))
 					goto fallback;
 
+				dst_priv->cpu = false;
 				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
 					sna_damage_all(&dst_priv->gpu_damage,
@@ -4096,6 +4125,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 						     box, n))
 					goto fallback;
 
+				dst_priv->cpu = false;
 				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 					if (replaces) {
 						sna_damage_destroy(&dst_priv->cpu_damage);
@@ -4130,6 +4160,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			goto fallback;
 		}
 
+		dst_priv->cpu = true;
 		if (replaces) {
 			sna_damage_all(&dst_priv->cpu_damage,
 				       dst_pixmap->drawable.width,
@@ -10492,6 +10523,8 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		if (region_subsumes_damage(&region, priv->cpu_damage)) {
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
+			priv->undamaged = false;
+			priv->cpu = false;
 		}
 	}
 
@@ -12598,6 +12631,7 @@ static void sna_accel_inactive(struct sna *sna)
 			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
+			priv->cpu = false;
 
 			list_add(&priv->inactive, &preserve);
 		} else {
commit e625c02e6266403fcd8a72ccce2c6c6291e2e5fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 30 09:34:21 2012 +0100

    sna/damage: Early check for contains-box? if subtract and box outside region
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index ce16b77..745e2d1 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1260,8 +1260,13 @@ static int __sna_damage_contains_box(struct sna_damage *damage,
 	if (!damage->dirty)
 		return ret;
 
-	if (damage->mode == DAMAGE_ADD && ret == PIXMAN_REGION_IN)
-		return ret;
+	if (damage->mode == DAMAGE_ADD) {
+		if (ret == PIXMAN_REGION_IN)
+			return ret;
+	} else {
+		if (ret == PIXMAN_REGION_OUT)
+			return ret;
+	}
 
 	__sna_damage_reduce(damage);
 	return pixman_region_contains_rectangle(&damage->region, (BoxPtr)box);
commit abd7be1cee6f2f494a11cd9d2e7888c3043ffc02
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 15:32:10 2012 +0100

    sna/dri: Prefer GPU rendering if no more CPU damage on a DRI bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 87ddba0..f01de17 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -397,21 +397,16 @@ static void damage(PixmapPtr pixmap, RegionPtr region)
 
 	if (region == NULL) {
 damage_all:
-		sna_damage_all(&priv->gpu_damage,
-			       pixmap->drawable.width,
-			       pixmap->drawable.height);
+		priv->gpu_damage = _sna_damage_all(priv->gpu_damage,
+						   pixmap->drawable.width,
+						   pixmap->drawable.height);
 		sna_damage_destroy(&priv->cpu_damage);
 		priv->undamaged = false;
 	} else {
-		BoxPtr box = RegionExtents(region);
-		if (region->data == NULL &&
-		    box->x1 <= 0 && box->y1 <= 0 &&
-		    box->x2 >= pixmap->drawable.width &&
-		    box->y2 >= pixmap->drawable.height)
+		sna_damage_subtract(&priv->cpu_damage, region);
+		if (priv->cpu_damage == NULL)
 			goto damage_all;
-
 		sna_damage_add(&priv->gpu_damage, region);
-		sna_damage_subtract(&priv->cpu_damage, region);
 	}
 }
 
commit 67b87e4f7cf6c3ab9cfccc9fe43a824bfe84f393
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 15:03:12 2012 +0100

    sna/dri: Optimise clip reduction with copy-to-front to an unclipped Window
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index dddcc73..87ddba0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -451,18 +451,18 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	struct kgem_bo *bo = NULL;
 	bool flush = false;
 	xf86CrtcPtr crtc;
-	BoxRec box, *boxes;
+	BoxRec *boxes;
 	int16_t dx, dy;
 	int n;
 
-	box.x1 = draw->x;
-	box.y1 = draw->y;
-	box.x2 = draw->x + draw->width;
-	box.y2 = draw->y + draw->height;
+	clip.extents.x1 = draw->x;
+	clip.extents.y1 = draw->y;
+	clip.extents.x2 = draw->x + draw->width;
+	clip.extents.y2 = draw->y + draw->height;
+	clip.data = NULL;
 
 	if (region) {
 		pixman_region_translate(region, draw->x, draw->y);
-		pixman_region_init_rects(&clip, &box, 1);
 		pixman_region_intersect(&clip, &clip, region);
 		region = &clip;
 
@@ -476,28 +476,32 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	if (draw->type != DRAWABLE_PIXMAP) {
 		WindowPtr win = (WindowPtr)draw;
 
-		DBG(("%s: draw=(%d, %d), delta=(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
-		     __FUNCTION__, draw->x, draw->y,
-		     get_drawable_dx(draw), get_drawable_dy(draw),
-		     win->clipList.extents.x1, win->clipList.extents.y1,
-		     win->clipList.extents.x2, win->clipList.extents.y2));
+		if (win->clipList.data ||
+		    win->clipList.extents.x2 - win->clipList.extents.x1 != draw->width ||
+		    win->clipList.extents.y2 - win->clipList.extents.y1 != draw->height) {
+			DBG(("%s: draw=(%d, %d), delta=(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
+			     __FUNCTION__, draw->x, draw->y,
+			     get_drawable_dx(draw), get_drawable_dy(draw),
+			     win->clipList.extents.x1, win->clipList.extents.y1,
+			     win->clipList.extents.x2, win->clipList.extents.y2));
+
+			if (region == NULL)
+				region = &clip;
+
+			pixman_region_intersect(&clip, &win->clipList, region);
+			if (!pixman_region_not_empty(&clip)) {
+				DBG(("%s: all clipped\n", __FUNCTION__));
+				return NULL;
+			}
 
-		if (region == NULL) {
-			pixman_region_init_rects(&clip, &box, 1);
 			region = &clip;
 		}
 
-		pixman_region_intersect(region, &win->clipList, region);
-		if (!pixman_region_not_empty(region)) {
-			DBG(("%s: all clipped\n", __FUNCTION__));
-			return NULL;
-		}
-
 		if (sync && sna_pixmap_is_scanout(sna, pixmap)) {
-			crtc = sna_covering_crtc(sna->scrn, &region->extents, NULL);
+			crtc = sna_covering_crtc(sna->scrn, &clip.extents, NULL);
 			if (crtc)
 				flush = sna_wait_for_scanline(sna, pixmap, crtc,
-							      &region->extents);
+							      &clip.extents);
 		}
 
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
@@ -525,9 +529,8 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		n = REGION_NUM_RECTS(region);
 		assert(n);
 	} else {
-		pixman_region_init_rects(&clip, &box, 1);
 		region = &clip;
-		boxes = &box;
+		boxes = &clip.extents;
 		n = 1;
 	}
 	sna->render.copy_boxes(sna, GXcopy,
@@ -546,7 +549,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	DamageRegionAppend(&pixmap->drawable, region);
 	DamageRegionProcessPending(&pixmap->drawable);
 
-	if (region == &clip)
+	if (clip.data)
 		pixman_region_fini(&clip);
 
 	return bo;
commit eae5e1275cd11703de2bca67dacd1d57129b561a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 13:12:52 2012 +0100

    sna: Install the ModeSet handler as the base handler
    
    This way we can safely ignore it across server regen.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 54ff4d5..66ee6c0 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -261,7 +261,6 @@ struct sna {
 	ScreenBlockHandlerProcPtr BlockHandler;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
 	CloseScreenProcPtr CloseScreen;
-	xf86ModeSetProc *ModeSet;
 
 	PicturePtr clear;
 	struct {
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 3b3b93f..047b865 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -806,12 +806,6 @@ static void sna_mode_set(ScrnInfoPtr scrn)
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
-
-	if (sna->ModeSet) {
-		scrn->ModeSet = sna->ModeSet;
-		scrn->ModeSet(scrn);
-		scrn->ModeSet = sna_mode_set;
-	}
 	sna_mode_update(sna);
 }
 
@@ -933,9 +927,6 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	screen->CloseScreen = sna_close_screen;
 	screen->CreateScreenResources = sna_create_screen_resources;
 
-	sna->ModeSet = scrn->ModeSet;
-	scrn->ModeSet = sna_mode_set;
-
 	if (!xf86CrtcScreenInit(screen))
 		return FALSE;
 
@@ -1137,6 +1128,8 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	scrn->ValidMode = sna_valid_mode;
 	scrn->PMEvent = sna_pm_event;
 
+	scrn->ModeSet = sna_mode_set;
+
 	xf86SetEntitySharable(scrn->entityList[0]);
 
 	entity = xf86GetEntityInfo(entity_num);
commit 15a0761cad862a5d73bbc2af81bc5267e66c307e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 12:22:55 2012 +0100

    sna: Only consider the request list when deciding whether the GPU is busy
    
    Micro-optimisation to overhead extra checks and to make sure an
    unflushed bo doesn't prevent us from submitting more work before
    sleeping.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 2d8def8..f9b2a33 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -253,6 +253,16 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
 
 void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
+static inline bool kgem_is_idle(struct kgem *kgem)
+{
+	if (list_is_empty(&kgem->requests))
+		return true;
+
+	if (!kgem_retire(kgem))
+		return false;
+
+	return list_is_empty(&kgem->requests);
+}
 struct kgem_bo *kgem_get_last_request(struct kgem *kgem);
 
 void _kgem_submit(struct kgem *kgem);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b9164ae..d280f84 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12807,7 +12807,10 @@ void sna_accel_close(struct sna *sna)
 
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
-	sna_accel_wakeup_handler(sna, NULL);
+	if (sna->kgem.nbatch && kgem_is_idle(&sna->kgem)) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
+		_kgem_submit(&sna->kgem);
+	}
 
 	if (sna_accel_do_flush(sna))
 		sna_accel_flush(sna);
commit 4061f05dd61a7200d91d30b9b4a2ab0c61306870
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 11:40:18 2012 +0100

    sna/trapezoids: Write unaligned fallback boxes inplace
    
    As this is a pure write operation (though we will write the edge pixels
    twice) we can perform this operation inplace and incur a slightly slower
    trap creation at the benefit of avoiding the later copy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 72dcd3e..7bbe270 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3582,7 +3582,7 @@ composite_unaligned_boxes_fallback(CARD8 op,
 			scratch = sna_pixmap_create_upload(screen,
 							   extents.x2 - extents.x1,
 							   extents.y2 - extents.y1,
-							   8, KGEM_BUFFER_WRITE);
+							   8, KGEM_BUFFER_WRITE_INPLACE);
 		if (!scratch)
 			continue;
 
commit 44e41536b7728c03a4899a97242960ae7ed35624
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 11:39:47 2012 +0100

    sna/trapezoids: Render the partial left-edge of fallback unaligned boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index a591672..72dcd3e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2940,7 +2940,7 @@ blt_unaligned_box_row(PixmapPtr scratch,
 	} else {
 		if (pixman_fixed_frac(trap->left.p1.x)) {
 			blt_opacity(scratch,
-				    x1, x1,
+				    x1, x1 + 1,
 				    y1, y2,
 				    covered * (SAMPLES_X - grid_coverage(SAMPLES_X, trap->left.p1.x)));
 			x1++;
commit e6f9bfe1e20c4c76822d77598f4f1c7e10754929
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 11:06:33 2012 +0100

    sna: Use currentTime rather than GetTimeInMillis()
    
    The overhead of reading the hpet() on every block handler (more or less)
    is exorbitant, so trust that we update currentTime frequently enough to
    be a good approximation - the side effect is that we will wakeup
    slightly to earlier from using an old value for the current time.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index d4cb42f..54ff4d5 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -216,7 +216,6 @@ struct sna {
 	unsigned watch_flush;
 	unsigned flush;
 
-	uint32_t time;
 	OsTimerPtr timer;
 	uint32_t timer_expire[NUM_TIMERS];
 	uint16_t timer_active;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 86e22ec..b9164ae 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12308,9 +12308,10 @@ static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 	return priv && priv->gpu_bo ? priv : NULL;
 }
 
+#define TIME currentTime.milliseconds
 static void sna_accel_disarm_timer(struct sna *sna, int id)
 {
-	DBG(("%s[%d] (time=%ld)\n", __FUNCTION__, id, (long)sna->time));
+	DBG(("%s[%d] (time=%ld)\n", __FUNCTION__, id, (long)TIME));
 	sna->timer_active &= ~(1<<id);
 	sna->timer_ready &= ~(1<<id);
 }
@@ -12370,9 +12371,9 @@ static bool sna_accel_do_flush(struct sna *sna)
 	if (sna->timer_active & (1<<(FLUSH_TIMER))) {
 		DBG(("%s: flush timer active\n", __FUNCTION__));
 		if (sna->timer_ready & (1<<(FLUSH_TIMER))) {
-			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			sna->timer_expire[FLUSH_TIMER] =
-				sna->time + sna->vblank_interval;
+				TIME + sna->vblank_interval;
 			return true;
 		}
 	} else {
@@ -12382,8 +12383,8 @@ static bool sna_accel_do_flush(struct sna *sna)
 			sna->timer_active |= 1 << FLUSH_TIMER;
 			sna->timer_ready |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
-				sna->time + sna->vblank_interval / 2;
-			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+				TIME + sna->vblank_interval / 2;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 		}
 	}
 
@@ -12397,18 +12398,18 @@ static bool sna_accel_do_throttle(struct sna *sna)
 
 	if (sna->timer_active & (1<<(THROTTLE_TIMER))) {
 		if (sna->timer_ready & (1<<(THROTTLE_TIMER))) {
-			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
-			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
+			sna->timer_expire[THROTTLE_TIMER] = TIME + 20;
 			return true;
 		}
 	} else {
 		if (!sna->kgem.need_retire) {
 			DBG(("%s -- no pending activity\n", __FUNCTION__));
 		} else {
-			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 			sna->timer_active |= 1 << THROTTLE_TIMER;
 			sna->timer_ready |= 1 << THROTTLE_TIMER;
-			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+			sna->timer_expire[THROTTLE_TIMER] = TIME + 20;
 		}
 	}
 
@@ -12419,9 +12420,9 @@ static bool sna_accel_do_expire(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
 		if (sna->timer_ready & (1<<(EXPIRE_TIMER))) {
-			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			sna->timer_expire[EXPIRE_TIMER] =
-				sna->time + MAX_INACTIVE_TIME * 1000;
+				TIME + MAX_INACTIVE_TIME * 1000;
 			return true;
 		}
 	} else {
@@ -12429,8 +12430,8 @@ static bool sna_accel_do_expire(struct sna *sna)
 			sna->timer_active |= 1 << EXPIRE_TIMER;
 			sna->timer_ready |= 1 << EXPIRE_TIMER;
 			sna->timer_expire[EXPIRE_TIMER] =
-				sna->time + MAX_INACTIVE_TIME * 1000;
-			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+				TIME + MAX_INACTIVE_TIME * 1000;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 		}
 	}
 
@@ -12442,8 +12443,8 @@ static bool sna_accel_do_inactive(struct sna *sna)
 	if (sna->timer_active & (1<<(INACTIVE_TIMER))) {
 		if (sna->timer_ready & (1<<(INACTIVE_TIMER))) {
 			sna->timer_expire[INACTIVE_TIMER] =
-				sna->time + 120 * 1000;
-			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
+				TIME + 120 * 1000;
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)TIME));
 			return true;
 		}
 	} else {
@@ -12451,8 +12452,8 @@ static bool sna_accel_do_inactive(struct sna *sna)
 			sna->timer_active |= 1 << INACTIVE_TIMER;
 			sna->timer_ready |= 1 << INACTIVE_TIMER;
 			sna->timer_expire[INACTIVE_TIMER] =
-				sna->time + 120 * 1000;
-			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+				TIME + 120 * 1000;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)TIME));
 		}
 	}
 
@@ -12495,7 +12496,7 @@ static void sna_accel_flush(struct sna *sna)
 	bool busy;
 
 	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
-	     __FUNCTION__, (long)sna->time,
+	     __FUNCTION__, (long)TIME,
 	     priv && priv->cpu_damage,
 	     priv && priv->gpu_bo->exec != NULL,
 	     sna->kgem.nbatch,
@@ -12516,7 +12517,7 @@ static void sna_accel_flush(struct sna *sna)
 
 static void sna_accel_throttle(struct sna *sna)
 {
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)TIME));
 
 	if (sna->kgem.need_throttle)
 		kgem_throttle(&sna->kgem);
@@ -12527,7 +12528,7 @@ static void sna_accel_throttle(struct sna *sna)
 
 static void sna_accel_expire(struct sna *sna)
 {
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)TIME));
 
 	if (!kgem_expire_cache(&sna->kgem))
 		sna_accel_disarm_timer(sna, EXPIRE_TIMER);
@@ -12538,7 +12539,7 @@ static void sna_accel_inactive(struct sna *sna)
 	struct sna_pixmap *priv;
 	struct list preserve;
 
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)TIME));
 
 #if DEBUG_ACCEL
 	{
@@ -12633,10 +12634,10 @@ static void sna_accel_inactive(struct sna *sna)
 #ifdef DEBUG_MEMORY
 static bool sna_accel_do_debug_memory(struct sna *sna)
 {
-	int32_t delta = sna->timer_expire[DEBUG_MEMORY_TIMER] - sna->time;
+	int32_t delta = sna->timer_expire[DEBUG_MEMORY_TIMER] - TIME;
 
 	if (delta <= 3) {
-		sna->timer_expire[DEBUG_MEMORY_TIMER] = sna->time + 10 * 1000;
+		sna->timer_expire[DEBUG_MEMORY_TIMER] = TIME + 10 * 1000;
 		return true;
 	} else
 		return false;
@@ -12806,8 +12807,6 @@ void sna_accel_close(struct sna *sna)
 
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
-	sna->time = GetTimeInMillis();
-
 	sna_accel_wakeup_handler(sna, NULL);
 
 	if (sna_accel_do_flush(sna))
@@ -12846,7 +12845,7 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 		DBG(("%s: evaluating timers, ready=%x\n",
 		     __FUNCTION__, sna->timer_ready));
 		sna->timer_ready = 0;
-		timeout = sna_timeout(sna->timer, sna->time, sna);
+		timeout = sna_timeout(sna->timer, TIME, sna);
 		TimerSet(sna->timer, 0, timeout, sna_timeout, sna);
 		if (timeout) {
 			if (*tv == NULL) {
commit c6c4f30e194eb3b8a2421134bb005f1542b3ef9f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 10:14:52 2012 +0100

    sna: Add assertions to check that we do install the timers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b5e7e92..86e22ec 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12812,12 +12812,21 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 
 	if (sna_accel_do_flush(sna))
 		sna_accel_flush(sna);
+	assert(sna->flags & SNA_NO_DELAYED_FLUSH ||
+	       sna_accel_scanout(sna) == NULL ||
+	       sna_accel_scanout(sna)->gpu_bo->exec == NULL ||
+	       sna->timer_active & (1<<(FLUSH_TIMER)));
 
 	if (sna_accel_do_throttle(sna))
 		sna_accel_throttle(sna);
+	assert(sna->flags & SNA_NO_THROTTLE ||
+	       !sna->kgem.need_retire ||
+	       sna->timer_active & (1<<(THROTTLE_TIMER)));
 
 	if (sna_accel_do_expire(sna))
 		sna_accel_expire(sna);
+	assert(!sna->kgem.need_expire ||
+	       sna->timer_active & (1<<(EXPIRE_TIMER)));
 
 	if (sna_accel_do_inactive(sna))
 		sna_accel_inactive(sna);
commit 87c8f5a47e3abb7ff887aab45b6389cf9b8d8008
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 29 09:33:09 2012 +0100

    sna: Make the post-flip delay explictit
    
    As the kernel is inconsistent in enforcing this across generations,
    handle the synchronisation of the pageflip explicity. Ultimately this
    should be replaced with a tripple buffer mechanism.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 565e98c..f1e0bed 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2825,11 +2825,14 @@ disable:
 		/* XXX only works if the kernel stalls fwrites to the current
 		 * scanout whilst the flip is pending
 		 */
+		while (sna->mode.shadow_flip)
+			sna_mode_wakeup(sna);
 		(void)sna->render.copy_boxes(sna, GXcopy,
 					     sna->front, new, 0, 0,
 					     sna->front, old, 0, 0,
 					     REGION_RECTS(region),
 					     REGION_NUM_RECTS(region));
+		kgem_submit(&sna->kgem);
 
 		sna_pixmap(sna->front)->gpu_bo = old;
 		sna->mode.shadow = new;
commit 31caa43a21a68174386682fd558c911c7c03d76f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 17:05:46 2012 +0100

    sna/gen5: Check harder for need_upload() fallbacks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 0fd51d0..46a37a0 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2192,7 +2192,7 @@ untransformed(PicturePtr p)
 static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
+	return p->pDrawable && untransformed(p) && is_cpu(p->pDrawable);
 }
 
 static bool
@@ -2304,7 +2304,7 @@ gen5_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
-	if (mask && mask_fallback) {
+	if (mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
@@ -2422,7 +2422,7 @@ gen5_render_composite(struct sna *sna,
 		return FALSE;
 	}
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT  &&
+	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
 			      src, dst,
 			      src_x, src_y,
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index c14af3c..4941477 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -526,7 +526,6 @@ sna_composite(CARD8 op,
 			pixman_region_translate(&region, -x, -y);
 	}
 
-	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite(sna,
 				   op, src, mask, dst,
 				   src_x + dx,  src_y + dy,
@@ -535,7 +534,7 @@ sna_composite(CARD8 op,
 				   region.extents.y1,
 				   region.extents.x2 - region.extents.x1,
 				   region.extents.y2 - region.extents.y1,
-				   &tmp)) {
+				   memset(&tmp, 0, sizeof(tmp)))) {
 		DBG(("%s: fallback due unhandled composite op\n", __FUNCTION__));
 		goto fallback;
 	}
commit 7c3eb1fda9e567a300a8138acdac9ec11e010b11
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 17:04:26 2012 +0100

    sna: Correct inverted logic for checking xrgb drawables
    
    Reported-by: Christoph Reiter <reiter.christoph at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=51472
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 92be480..a591672 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3175,6 +3175,9 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 	PixmapPtr pixmap;
 	int16_t dx, dy;
 
+	DBG(("%s: force=%d, is_gpu=%d, op=%d, color=%x\n", __FUNCTION__,
+	     force_fallback, is_gpu(dst->pDrawable), op, color));
+
 	if (!force_fallback && is_gpu(dst->pDrawable)) {
 		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
 		     __FUNCTION__));
@@ -3183,7 +3186,7 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 	}
 
 	/* XXX a8 boxes */
-	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8) {
+	if (!(dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)) {
 		DBG(("%s: fallback -- can not perform operation in place, unhanbled format %08lx\n",
 		     __FUNCTION__, dst->format));
 		goto pixman;
@@ -3209,6 +3212,7 @@ composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
 		goto pixman;
 	}
 
+	DBG(("%s: inplace operation on argb32 destination\n", __FUNCTION__));
 	do {
 		RegionRec clip;
 		BoxPtr extents;
@@ -3420,6 +3424,8 @@ composite_unaligned_boxes_inplace(CARD8 op,
 		return false;
 	}
 
+	DBG(("%s\n", __FUNCTION__));
+
 	src_x -= pixman_fixed_to_int(t[0].left.p1.x);
 	src_y -= pixman_fixed_to_int(t[0].left.p1.y);
 	do {
@@ -3649,7 +3655,8 @@ composite_unaligned_boxes(struct sna *sna,
 	if (NO_UNALIGNED_BOXES)
 		return false;
 
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s: force_fallback=%d, mask=%x, n=%d\n",
+	     __FUNCTION__, force_fallback, maskFormat ? (int)maskFormat->format : 0, ntrap));
 
 	/* need a span converter to handle overlapping traps */
 	if (ntrap > 1 && maskFormat)
@@ -5330,7 +5337,7 @@ sna_composite_trapezoids(CARD8 op,
 		goto fallback;
 	}
 
-	force_fallback = FORCE_FALLBACK;
+	force_fallback = FORCE_FALLBACK > 0;
 	if ((too_small(priv) || DAMAGE_IS_ALL(priv->cpu_damage)) &&
 	    !picture_is_gpu(src)) {
 		DBG(("%s: force fallbacks -- dst is too small, %dx%d\n",
@@ -5339,6 +5346,8 @@ sna_composite_trapezoids(CARD8 op,
 		     dst->pDrawable->height));
 		force_fallback = true;
 	}
+	if (FORCE_FALLBACK < 0)
+		force_fallback = false;
 
 	/* scan through for fast rectangles */
 	rectilinear = pixel_aligned = true;
commit c3e2c1332d8d5a3944df99cc11aa66c586add3e8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 12:34:36 2012 +0100

    sna: Fix the application of the crtc offset for posting damage
    
    The damage boxes are in framebuffer (source) space, so we need to apply
    the offset for the boxes in crtc (destination) space.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 648d5c5..565e98c 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2700,8 +2700,8 @@ sna_crtc_redisplay(xf86CrtcPtr crtc, RegionPtr region)
 		 */
 
 		if (sna->render.copy_boxes(sna, GXcopy,
-					   sna->front, sna_pixmap_get_bo(sna->front), tx, ty,
-					   &tmp, sna_crtc->bo, 0, 0,
+					   sna->front, sna_pixmap_get_bo(sna->front), 0, 0,
+					   &tmp, sna_crtc->bo, -tx, -ty,
 					   REGION_RECTS(region), REGION_NUM_RECTS(region)))
 			return;
 	}
commit 47e6bfa4f40cf7efcfe7eee24d2512d737fd7e89
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 12:24:27 2012 +0100

    sna: Force use of per-crtc scanout if the offset is too large
    
    On gen4+, the scanout offset into a tiled surface is specified through
    the DSPTILEOFF register and limited to 12bits of precision. So if we
    have a CRTC positioned in that nether-region, we need to allocate a
    separate per-crtc pixmap for it and perform shadowing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 2e8925b..d4cb42f 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -234,6 +234,7 @@ struct sna {
 
 	struct sna_mode {
 		drmModeResPtr kmode;
+		int max_tile_offset;
 
 		int shadow_active;
 		DamagePtr shadow_damage;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index fed7411..648d5c5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -790,6 +790,14 @@ static bool use_shadow(struct sna *sna, xf86CrtcPtr crtc)
 		return true;
 	}
 
+	if (crtc->x >= sna->mode.max_tile_offset ||
+	    crtc->y >= sna->mode.max_tile_offset) {
+		DBG(("%s: offset too large (%d, %d) >= %d\n",
+		    __FUNCTION__,
+		    crtc->x, crtc->y, sna->mode.max_tile_offset));
+		return true;
+	}
+
 	transform = NULL;
 	if (crtc->transformPresent)
 		transform = &crtc->transform;
@@ -2201,6 +2209,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 			   "failed to get resources: %s\n", strerror(errno));
 		return FALSE;
 	}
+	mode->max_tile_offset = 4096;
 
 	set_size_range(sna);
 
commit 93e77ee019248fe77483e83f2210d584bb5d1be2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 12:02:32 2012 +0100

    sna: Quieten kernel debug complaints when disabling crtc
    
    Even if we are obviously turning the crtc off, it still complains if the
    number of connectors is non-zero. So make it so.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b7c5a40..fed7411 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -488,11 +488,12 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		   sna_crtc->kmode.vdisplay,
 		   sna_crtc->id, sna_crtc->pipe);
 
-	DBG(("%s: applying crtc [%d] mode=%dx%d@%d, fb=%d%s update to %d outputs\n",
+	DBG(("%s: applying crtc [%d] mode=%dx%d+%d+%d@%d, fb=%d%s update to %d outputs\n",
 	     __FUNCTION__, sna_crtc->id,
-	     sna_crtc->kmode.hdisplay,
-	     sna_crtc->kmode.vdisplay,
-	     sna_crtc->kmode.clock,
+	     arg.mode.hdisplay,
+	     arg.mode.vdisplay,
+	     arg.x, arg.y,
+	     arg.mode.clock,
 	     arg.fb_id,
 	     sna_crtc->shadow ? " [shadow]" : "",
 	     output_count));
@@ -593,10 +594,8 @@ sna_crtc_disable(xf86CrtcPtr crtc)
 
 	DBG(("%s: disabling crtc [%d]\n", __FUNCTION__, sna_crtc->id));
 
-	VG_CLEAR(arg);
+	memset(&arg, 0, sizeof(arg));
 	arg.crtc_id = sna_crtc->id;
-	arg.fb_id = 0;
-	arg.mode_valid = 0;
 	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
 
 	sna_crtc_disable_shadow(sna, sna_crtc);
commit 85e4f48a87ddbc227af8f4af5ea46ae17902b111
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 11:44:45 2012 +0100

    sna: Add a DBG to the periodic flush mechanism
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 925e355..76f6cae 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3029,6 +3029,8 @@ bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
 		return false;
 
 	bo->needs_flush = kgem_busy(kgem, bo->handle);
+	DBG(("%s: handle=%d, busy?=%d\n",
+	     __FUNCTION__, bo->handle, bo->needs_flush));
 	return bo->needs_flush;
 }
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a83e5cb..b5e7e92 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12333,7 +12333,7 @@ static bool has_shadow(struct sna *sna)
 
 static bool need_flush(struct sna *sna, struct sna_pixmap *scanout)
 {
-	DBG(("%s: scanout=%d shadow?=%d || (cpu?=%d || gpu?=%d) && !busy=%d)\n",
+	DBG(("%s: scanout=%d shadow?=%d, (cpu?=%d || gpu?=%d), busy=%d)\n",
 	     __FUNCTION__,
 	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
 	     has_shadow(sna),
commit 87dd6408a5c29e4808283df78a981de0a3c0a79c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 10:53:59 2012 +0100

    sna: Correct the reversal of the periodic flushing semantics
    
    Regression from 1e9319d (sna: extend RandR to support super sized
    monitor configurations) which tried to take into account the need to
    flush the shadow CRTC bo in addition to the normal scanout bo. In the
    refactoring of the need_flush(), the double negative was missed.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0f52a27..a83e5cb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12347,8 +12347,10 @@ static bool need_flush(struct sna *sna, struct sna_pixmap *scanout)
 	if (!scanout)
 		return false;
 
-	return (scanout->cpu_damage || scanout->gpu_bo->exec) &&
-		!__kgem_flush(&sna->kgem, scanout->gpu_bo);
+	if (scanout->cpu_damage || scanout->gpu_bo->exec)
+		return true;
+
+	return __kgem_flush(&sna->kgem, scanout->gpu_bo);
 }
 
 static bool sna_accel_do_flush(struct sna *sna)
commit 05f486f64bc7ea4a8a71f5d792fa586ac0843414
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 28 10:42:21 2012 +0100

    sna: Flush the per-crtc render caches for rotated scanouts
    
    We need to manually flush the render cache in order for results to be
    visible on the scanout.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9e47017..b7c5a40 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2758,8 +2758,10 @@ void sna_mode_redisplay(struct sna *sna)
 		damage.extents = crtc->bounds;
 		damage.data = NULL;
 		RegionIntersect(&damage, &damage, region);
-		if (RegionNotEmpty(&damage))
+		if (RegionNotEmpty(&damage)) {
 			sna_crtc_redisplay(crtc, &damage);
+			__kgem_flush(&sna->kgem, sna_crtc->bo);
+		}
 		RegionUninit(&damage);
 	}
 
commit db7979981032d5647069f213a063dcbf78e57890
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 27 09:36:58 2012 +0100

    sna: s/width/height/ cut'n'paste typo
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 4a63cff..9e47017 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -978,7 +978,7 @@ sna_crtc_damage(xf86CrtcPtr crtc)
 		region.extents.y1 = 0;
 	if (region.extents.x2 > screen->width)
 		region.extents.x2 = screen->width;
-	if (region.extents.y2 > screen->width)
+	if (region.extents.y2 > screen->height)
 		region.extents.y2 = screen->height;
 
 	DBG(("%s: marking crtc %d as completely damaged (%d, %d), (%d, %d)\n",
commit fcbbe1664ae62ae275068cc040f385d2320d5213
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 27 09:30:44 2012 +0100

    sna: Remove a trailing ';'
    
    The unwanted ';' caused is_cpu() to always return false if a GPU bo was
    attached. Not necessary a bad thing, just misses the potential
    optimisation where having chosen to prefer to use the CPU path we then
    have to migrate to the GPU even though the bo is undamaged or idle.
    
    Spotted-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 8a5a405..2210127 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -89,7 +89,7 @@ is_cpu(DrawablePtr drawable)
 		return true;
 
 	assert(!priv->gpu_bo->proxy); /* should be marked as cpu damaged */
-	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo));
+	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo))
 		return false;
 
 	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
commit a072ab506569ecff5b4c57fa90f7a417db69f33b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 26 17:24:37 2012 +0100

    test: Add client side copy to FakeFront for emulating CopyBuffer correctly
    
    The server manages FakeFront following a flip, but it the client
    optimises a swap by replacing it with a CopyRegion, it is expected to
    also update the FakeFront itself. Replicate that behaviour so that the
    timings for the test case are consistent with mesa.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/dri2-swap.c b/test/dri2-swap.c
index e592cd8..ba6b237 100644
--- a/test/dri2-swap.c
+++ b/test/dri2-swap.c
@@ -41,7 +41,8 @@ static int dri2_open(Display *dpy)
 	return fd;
 }
 
-static void dri2_copy_swap(Display *dpy, Drawable d, int width, int height)
+static void dri2_copy_swap(Display *dpy, Drawable d,
+			   int width, int height, int has_front)
 {
 	XRectangle rect;
 	XserverRegion region;
@@ -53,6 +54,8 @@ static void dri2_copy_swap(Display *dpy, Drawable d, int width, int height)
 
 	region = XFixesCreateRegion(dpy, &rect, 1);
 	DRI2CopyRegion(dpy, d, region, DRI2BufferFrontLeft, DRI2BufferBackLeft);
+	if (has_front)
+		DRI2CopyRegion(dpy, d, region, DRI2BufferFakeFrontLeft, DRI2BufferFrontLeft);
 	XFixesDestroyRegion(dpy, region);
 }
 
@@ -114,7 +117,7 @@ static void run(Display *dpy, int width, int height,
 	xsync(dpy, win);
 	clock_gettime(CLOCK_MONOTONIC, &start);
 	for (count = 0; count < COUNT; count++)
-		dri2_copy_swap(dpy, win, width, height);
+		dri2_copy_swap(dpy, win, width, height, nattachments == 2);
 	xsync(dpy, win);
 	clock_gettime(CLOCK_MONOTONIC, &end);
 
@@ -165,6 +168,5 @@ int main(void)
 	run(dpy, width, height, attachments, 1, "windowed");
 	run(dpy, width, height, attachments, 2, "windowed (with front)");
 
-
 	return 0;
 }
commit 96804c74f8f0d19fc1b9db467cf050f4934fe83b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 26 17:09:45 2012 +0100

    test: FakeFront rules
    
    Oh my, I just once again rediscovered the copy on every flip due to the
    requirement for keeping FakeFront uptodate for reads after a SwapBuffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/dri2-swap.c b/test/dri2-swap.c
index 5da3c72..e592cd8 100644
--- a/test/dri2-swap.c
+++ b/test/dri2-swap.c
@@ -73,14 +73,11 @@ static double elapsed(const struct timespec *start,
 }
 
 static void run(Display *dpy, int width, int height,
+		unsigned int *attachments, int nattachments,
 		const char *name)
 {
 	Window win;
 	XSetWindowAttributes attr;
-	unsigned int attachments[] = {
-		DRI2BufferFrontLeft,
-		DRI2BufferBackLeft,
-	};
 	int count;
 	DRI2Buffer *buffers;
 	struct timespec start, end;
@@ -100,10 +97,9 @@ static void run(Display *dpy, int width, int height,
 
 	DRI2CreateDrawable(dpy, win);
 
-	count = 2;
 	buffers = DRI2GetBuffers(dpy, win, &width, &height,
-				 attachments, count, &count);
-	if (count != 2)
+				 attachments, nattachments, &count);
+	if (count != nattachments)
 		return;
 
 	xsync(dpy, win);
@@ -146,6 +142,10 @@ int main(void)
 {
 	Display *dpy;
 	int width, height, fd;
+	unsigned int attachments[] = {
+		DRI2BufferBackLeft,
+		DRI2BufferFrontLeft,
+	};
 
 	dpy = XOpenDisplay (NULL);
 	if (dpy == NULL)
@@ -157,11 +157,13 @@ int main(void)
 
 	width = WidthOfScreen(DefaultScreenOfDisplay(dpy));
 	height = HeightOfScreen(DefaultScreenOfDisplay(dpy));
-	run(dpy, width, height, "fullscreen");
+	run(dpy, width, height, attachments, 1, "fullscreen");
+	run(dpy, width, height, attachments, 2, "fullscreen (with front)");
 
 	width /= 2;
 	height /= 2;
-	run(dpy, width, height, "windowed");
+	run(dpy, width, height, attachments, 1, "windowed");
+	run(dpy, width, height, attachments, 2, "windowed (with front)");
 
 
 	return 0;
commit f306cd557ef263ff5057e413c335cc75bc0f7895
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 26 14:59:37 2012 +0100

    sna/dri: Hold a reference to the cached DRI2 buffer on the front buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 3c5722c..dddcc73 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -229,7 +229,7 @@ sna_dri_create_buffer(DrawablePtr drawable,
 			}
 			FreeResourceByType(drawable->id,
 					   dri_drawable_type,
-					   TRUE);
+					   FALSE);
 		}
 
 		bo = sna_pixmap_set_dri(sna, pixmap);
@@ -332,8 +332,9 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	if (pixmap)
 		pixmap->refcnt++;
 
-	if (attachment == DRI2BufferFrontLeft)
-		(void)AddResource(drawable->id, dri_drawable_type, buffer);
+	if (attachment == DRI2BufferFrontLeft &&
+	    AddResource(drawable->id, dri_drawable_type, buffer))
+		private->refcnt++;
 
 	return buffer;
 
@@ -843,6 +844,10 @@ static int
 sna_dri_drawable_gone(void *data, XID id)
 {
 	DBG(("%s(%ld)\n", __FUNCTION__, (long)id));
+
+	_sna_dri_destroy_buffer(to_sna_from_pixmap(get_private(data)->pixmap),
+				data);
+
 	return Success;
 }
 
commit a87f2b9325bfad2bb3f93226706b6f9a09598945
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 24 13:48:38 2012 +0100

    sna/gen4: Check for peculiar initial values for the surface offset
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index cd4ca36..c3a8204 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1270,6 +1270,8 @@ gen4_emit_state_base_address(struct sna *sna)
 static void
 gen4_emit_invariant(struct sna *sna)
 {
+	assert(sna->kgem.surface == sna->kgem.batch_size);
+
 	if (sna->kgem.gen >= 45)
 		OUT_BATCH(NEW_PIPELINE_SELECT | PIPELINE_SELECT_3D);
 	else
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c79903b..925e355 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1155,6 +1155,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(list_is_empty(&bo->list));
 	assert(bo->refcnt == 0);
+	assert(!bo->purged);
 
 	bo->binding.offset = 0;
 	kgem_bo_clear_scanout(kgem, bo);
commit 8f4221a2520b7ed1f67b41185c2e4842e83e53ef
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 26 15:56:50 2012 +0100

    test: Add a simple exercise for DRI2 swap paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/Makefile.am b/test/Makefile.am
index b0e0e13..ba4966c 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -16,12 +16,13 @@ stress_TESTS = \
 	render-copyarea-size \
 	render-copy-alphaless \
 	mixed-stress \
+	dri2-swap \
 	$(NULL)
 
 check_PROGRAMS = $(stress_TESTS)
 
-AM_CFLAGS = @CWARNFLAGS@ @X11_CFLAGS@
-LDADD = libtest.la @X11_LIBS@
+AM_CFLAGS = @CWARNFLAGS@ @X11_CFLAGS@ @DRM_CFLAGS@
+LDADD = libtest.la @X11_LIBS@ -lXfixes @DRM_LIBS@ -lrt
 
 noinst_LTLIBRARIES = libtest.la
 libtest_la_SOURCES = \
@@ -30,6 +31,7 @@ libtest_la_SOURCES = \
 	test_image.c \
 	test_log.c \
 	test_render.c \
+	dri2.c \
 	$(NULL)
 
 EXTRA_DIST = README
diff --git a/test/dri2-swap.c b/test/dri2-swap.c
new file mode 100644
index 0000000..5da3c72
--- /dev/null
+++ b/test/dri2-swap.c
@@ -0,0 +1,168 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <X11/Xlib.h>
+#include <X11/Xutil.h>
+#include <X11/extensions/Xfixes.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <time.h>
+
+#include <xf86drm.h>
+#include <drm.h>
+
+#include "dri2.h"
+
+#define COUNT 60
+
+static int dri2_open(Display *dpy)
+{
+	drm_auth_t auth;
+	char *driver, *device;
+	int fd;
+
+	if (!DRI2Connect(dpy, DefaultRootWindow(dpy), &driver, &device))
+		return -1;
+
+	printf ("Connecting to %s driver on %s\n", driver, device);
+
+	fd = open("/dev/dri/card0", O_RDWR);
+	if (fd < 0)
+		return -1;
+
+	if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
+		return -1;
+
+	if (!DRI2Authenticate(dpy, DefaultRootWindow(dpy), auth.magic))
+		return -1;
+
+	return fd;
+}
+
+static void dri2_copy_swap(Display *dpy, Drawable d, int width, int height)
+{
+	XRectangle rect;
+	XserverRegion region;
+
+	rect.x = 0;
+	rect.y = 0;
+	rect.width = width;
+	rect.height = height;
+
+	region = XFixesCreateRegion(dpy, &rect, 1);
+	DRI2CopyRegion(dpy, d, region, DRI2BufferFrontLeft, DRI2BufferBackLeft);
+	XFixesDestroyRegion(dpy, region);
+}
+
+static void xsync(Display *dpy, Window win)
+{
+	XImage *image;
+
+	image = XGetImage(dpy, win, 0, 0, 1, 1, ~0, ZPixmap);
+	if (image)
+		XDestroyImage(image);
+}
+
+static double elapsed(const struct timespec *start,
+		      const struct timespec *end)
+{
+	return (end->tv_sec - start->tv_sec) +
+		1e-9*(end->tv_nsec - start->tv_nsec);
+}
+
+static void run(Display *dpy, int width, int height,
+		const char *name)
+{
+	Window win;
+	XSetWindowAttributes attr;
+	unsigned int attachments[] = {
+		DRI2BufferFrontLeft,
+		DRI2BufferBackLeft,
+	};
+	int count;
+	DRI2Buffer *buffers;
+	struct timespec start, end;
+
+	/* Be nasty and install a fullscreen window on top so that we
+	 * can guarantee we do not get clipped by children.
+	 */
+	attr.override_redirect = 1;
+	win = XCreateWindow(dpy, DefaultRootWindow(dpy),
+			 0, 0, width, height, 0,
+			 DefaultDepth(dpy, DefaultScreen(dpy)),
+			 InputOutput,
+			 DefaultVisual(dpy, DefaultScreen(dpy)),
+			 CWOverrideRedirect, &attr);
+	XMapWindow(dpy, win);
+	xsync(dpy, win);
+
+	DRI2CreateDrawable(dpy, win);
+
+	count = 2;
+	buffers = DRI2GetBuffers(dpy, win, &width, &height,
+				 attachments, count, &count);
+	if (count != 2)
+		return;
+
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	for (count = 0; count < COUNT; count++)
+		DRI2SwapBuffers(dpy, win, 0, 0, 0);
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &end);
+	printf("%d %s (%dx%d) swaps in %fs.\n",
+	       count, name, width, height, elapsed(&start, &end));
+
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	for (count = 0; count < COUNT; count++)
+		dri2_copy_swap(dpy, win, width, height);
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &end);
+
+	printf("%d %s (%dx%d) blits in %fs.\n",
+	       count, name, width, height, elapsed(&start, &end));
+
+	DRI2SwapInterval(dpy, win, 0);
+
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	for (count = 0; count < COUNT; count++)
+		DRI2SwapBuffers(dpy, win, 0, 0, 0);
+	xsync(dpy, win);
+	clock_gettime(CLOCK_MONOTONIC, &end);
+	printf("%d %s (%dx%d) vblank=0 swaps in %fs.\n",
+	       count, name, width, height, elapsed(&start, &end));
+
+	XDestroyWindow(dpy, win);
+	free(buffers);
+
+	XSync(dpy, 1);
+}
+
+int main(void)
+{
+	Display *dpy;
+	int width, height, fd;
+
+	dpy = XOpenDisplay (NULL);
+	if (dpy == NULL)
+		return 77;
+
+	fd = dri2_open(dpy);
+	if (fd < 0)
+		return 1;
+
+	width = WidthOfScreen(DefaultScreenOfDisplay(dpy));
+	height = HeightOfScreen(DefaultScreenOfDisplay(dpy));
+	run(dpy, width, height, "fullscreen");
+
+	width /= 2;
+	height /= 2;
+	run(dpy, width, height, "windowed");
+
+
+	return 0;
+}
diff --git a/test/dri2.c b/test/dri2.c
new file mode 100644
index 0000000..86a0a74
--- /dev/null
+++ b/test/dri2.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright © 2008 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Soft-
+ * ware"), to deal in the Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge, publish, distribute,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, provided that the above copyright
+ * notice(s) and this permission notice appear in all copies of the Soft-
+ * ware and that both the above copyright notice(s) and this permission
+ * notice appear in supporting documentation.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+ * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
+ * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
+ * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
+ * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
+ * MANCE OF THIS SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or
+ * other dealings in this Software without prior written authorization of
+ * the copyright holder.
+ *
+ * Authors:
+ *   Kristian Høgsberg (krh at redhat.com)
+ */
+
+
+#include <stdio.h>
+#include <X11/Xlibint.h>
+#include <X11/extensions/Xext.h>
+#include <X11/extensions/extutil.h>
+#include <X11/extensions/dri2proto.h>
+
+#include <GL/glx.h>
+#include <GL/glxext.h>
+
+#include "dri2.h"
+
+/* Allow the build to work with an older versions of dri2proto.h and
+ * dri2tokens.h.
+ */
+#if DRI2_MINOR < 1
+#undef DRI2_MINOR
+#define DRI2_MINOR 1
+#define X_DRI2GetBuffersWithFormat 7
+#endif
+
+
+static char dri2ExtensionName[] = DRI2_NAME;
+static XExtensionInfo *dri2Info;
+static XEXT_GENERATE_CLOSE_DISPLAY (DRI2CloseDisplay, dri2Info)
+
+static Bool
+DRI2WireToEvent(Display *dpy, XEvent *event, xEvent *wire);
+static Status
+DRI2EventToWire(Display *dpy, XEvent *event, xEvent *wire);
+static int
+DRI2Error(Display *display, xError *err, XExtCodes *codes, int *ret_code);
+
+static /* const */ XExtensionHooks dri2ExtensionHooks = {
+  NULL,                   /* create_gc */
+  NULL,                   /* copy_gc */
+  NULL,                   /* flush_gc */
+  NULL,                   /* free_gc */
+  NULL,                   /* create_font */
+  NULL,                   /* free_font */
+  DRI2CloseDisplay,       /* close_display */
+  DRI2WireToEvent,        /* wire_to_event */
+  DRI2EventToWire,        /* event_to_wire */
+  DRI2Error,              /* error */
+  NULL,                   /* error_string */
+};
+
+static XEXT_GENERATE_FIND_DISPLAY (DRI2FindDisplay,
+                                   dri2Info,
+                                   dri2ExtensionName,
+                                   &dri2ExtensionHooks,
+                                   0, NULL)
+
+static Bool
+DRI2WireToEvent(Display *dpy, XEvent *event, xEvent *wire)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   switch ((wire->u.u.type & 0x7f) - info->codes->first_event) {
+#ifdef X_DRI2SwapBuffers
+   case DRI2_BufferSwapComplete:
+      /* Ignore swap events if we're not looking for them */
+	   printf("BufferSwapComplete\n");
+      return False;
+#endif
+#ifdef DRI2_InvalidateBuffers
+   case DRI2_InvalidateBuffers:
+	   printf("InvalidateBuffers\n");
+      return False;
+#endif
+   default:
+      /* client doesn't support server event */
+      break;
+   }
+
+   return False;
+}
+
+/* We don't actually support this.  It doesn't make sense for clients to
+ * send each other DRI2 events.
+ */
+static Status
+DRI2EventToWire(Display *dpy, XEvent *event, xEvent *wire)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   switch (event->type) {
+   default:
+      /* client doesn't support server event */
+      break;
+   }
+
+   return Success;
+}
+
+static int
+DRI2Error(Display *display, xError *err, XExtCodes *codes, int *ret_code)
+{
+    if (err->majorCode == codes->major_opcode &&
+	err->errorCode == BadDrawable &&
+	err->minorCode == X_DRI2CopyRegion)
+	return True;
+
+    /* If the X drawable was destroyed before the GLX drawable, the
+     * DRI2 drawble will be gone by the time we call
+     * DRI2DestroyDrawable.  So just ignore BadDrawable here. */
+    if (err->majorCode == codes->major_opcode &&
+	err->errorCode == BadDrawable &&
+	err->minorCode == X_DRI2DestroyDrawable)
+	return True;
+
+    /* If the server is non-local DRI2Connect will raise BadRequest.
+     * Swallow this so that DRI2Connect can signal this in its return code */
+    if (err->majorCode == codes->major_opcode &&
+        err->minorCode == X_DRI2Connect &&
+        err->errorCode == BadRequest) {
+	*ret_code = False;
+	return True;
+    }
+
+    return False;
+}
+
+Bool
+DRI2QueryExtension(Display * dpy, int *eventBase, int *errorBase)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+
+   if (XextHasExtension(info)) {
+      *eventBase = info->codes->first_event;
+      *errorBase = info->codes->first_error;
+      return True;
+   }
+
+   return False;
+}
+
+Bool
+DRI2QueryVersion(Display * dpy, int *major, int *minor)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2QueryVersionReply rep;
+   xDRI2QueryVersionReq *req;
+   int i, nevents;
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   LockDisplay(dpy);
+   GetReq(DRI2QueryVersion, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2QueryVersion;
+   req->majorVersion = DRI2_MAJOR;
+   req->minorVersion = DRI2_MINOR;
+   if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+   *major = rep.majorVersion;
+   *minor = rep.minorVersion;
+   UnlockDisplay(dpy);
+   SyncHandle();
+
+   switch (rep.minorVersion) {
+   case 1:
+	   nevents = 0;
+	   break;
+   case 2:
+	   nevents = 1;
+	   break;
+   case 3:
+   default:
+	   nevents = 2;
+	   break;
+   }
+	
+   for (i = 0; i < nevents; i++) {
+       XESetWireToEvent (dpy, info->codes->first_event + i, DRI2WireToEvent);
+       XESetEventToWire (dpy, info->codes->first_event + i, DRI2EventToWire);
+   }
+
+   return True;
+}
+
+Bool
+DRI2Connect(Display * dpy, XID window, char **driverName, char **deviceName)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2ConnectReply rep;
+   xDRI2ConnectReq *req;
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   LockDisplay(dpy);
+   GetReq(DRI2Connect, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2Connect;
+   req->window = window;
+   req->driverType = DRI2DriverDRI;
+   if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+
+   if (rep.driverNameLength == 0 && rep.deviceNameLength == 0) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+
+   *driverName = Xmalloc(rep.driverNameLength + 1);
+   if (*driverName == NULL) {
+      _XEatData(dpy,
+                ((rep.driverNameLength + 3) & ~3) +
+                ((rep.deviceNameLength + 3) & ~3));
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+   _XReadPad(dpy, *driverName, rep.driverNameLength);
+   (*driverName)[rep.driverNameLength] = '\0';
+
+   *deviceName = Xmalloc(rep.deviceNameLength + 1);
+   if (*deviceName == NULL) {
+      Xfree(*driverName);
+      _XEatData(dpy, ((rep.deviceNameLength + 3) & ~3));
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+   _XReadPad(dpy, *deviceName, rep.deviceNameLength);
+   (*deviceName)[rep.deviceNameLength] = '\0';
+
+   UnlockDisplay(dpy);
+   SyncHandle();
+
+   return True;
+}
+
+Bool
+DRI2Authenticate(Display * dpy, XID window, unsigned int magic)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2AuthenticateReq *req;
+   xDRI2AuthenticateReply rep;
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   LockDisplay(dpy);
+   GetReq(DRI2Authenticate, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2Authenticate;
+   req->window = window;
+   req->magic = magic;
+
+   if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return False;
+   }
+
+   UnlockDisplay(dpy);
+   SyncHandle();
+
+   return rep.authenticated;
+}
+
+void
+DRI2CreateDrawable(Display * dpy, XID drawable)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2CreateDrawableReq *req;
+
+   XextSimpleCheckExtension(dpy, info, dri2ExtensionName);
+
+   LockDisplay(dpy);
+   GetReq(DRI2CreateDrawable, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2CreateDrawable;
+   req->drawable = drawable;
+   UnlockDisplay(dpy);
+   SyncHandle();
+}
+
+void
+DRI2DestroyDrawable(Display * dpy, XID drawable)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2DestroyDrawableReq *req;
+
+   XextSimpleCheckExtension(dpy, info, dri2ExtensionName);
+
+   XSync(dpy, False);
+
+   LockDisplay(dpy);
+   GetReq(DRI2DestroyDrawable, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2DestroyDrawable;
+   req->drawable = drawable;
+   UnlockDisplay(dpy);
+   SyncHandle();
+}
+
+DRI2Buffer *
+DRI2GetBuffers(Display * dpy, XID drawable,
+               int *width, int *height,
+               unsigned int *attachments, int count, int *outCount)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2GetBuffersReply rep;
+   xDRI2GetBuffersReq *req;
+   DRI2Buffer *buffers;
+   xDRI2Buffer repBuffer;
+   uint32_t *p;
+   int i;
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   LockDisplay(dpy);
+   GetReqExtra(DRI2GetBuffers, count * 4, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2GetBuffers;
+   req->drawable = drawable;
+   req->count = count;
+   p = (uint32_t *) & req[1];
+   for (i = 0; i < count; i++)
+      p[i] = attachments[i];
+
+   if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return NULL;
+   }
+
+   *width = rep.width;
+   *height = rep.height;
+   *outCount = rep.count;
+
+   buffers = Xmalloc(rep.count * sizeof buffers[0]);
+   if (buffers == NULL) {
+      _XEatData(dpy, rep.count * sizeof repBuffer);
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return NULL;
+   }
+
+   for (i = 0; i < rep.count; i++) {
+      _XReadPad(dpy, (char *) &repBuffer, sizeof repBuffer);
+      buffers[i].attachment = repBuffer.attachment;
+      buffers[i].name = repBuffer.name;
+      buffers[i].pitch = repBuffer.pitch;
+      buffers[i].cpp = repBuffer.cpp;
+      buffers[i].flags = repBuffer.flags;
+   }
+
+   UnlockDisplay(dpy);
+   SyncHandle();
+
+   return buffers;
+}
+
+
+DRI2Buffer *
+DRI2GetBuffersWithFormat(Display * dpy, XID drawable,
+                         int *width, int *height,
+                         unsigned int *attachments, int count, int *outCount)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2GetBuffersReply rep;
+   xDRI2GetBuffersReq *req;
+   DRI2Buffer *buffers;
+   xDRI2Buffer repBuffer;
+   uint32_t *p;
+   int i;
+
+   XextCheckExtension(dpy, info, dri2ExtensionName, False);
+
+   LockDisplay(dpy);
+   GetReqExtra(DRI2GetBuffers, count * (4 * 2), req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2GetBuffersWithFormat;
+   req->drawable = drawable;
+   req->count = count;
+   p = (uint32_t *) & req[1];
+   for (i = 0; i < (count * 2); i++)
+      p[i] = attachments[i];
+
+   if (!_XReply(dpy, (xReply *) & rep, 0, xFalse)) {
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return NULL;
+   }
+
+   *width = rep.width;
+   *height = rep.height;
+   *outCount = rep.count;
+
+   buffers = Xmalloc(rep.count * sizeof buffers[0]);
+   if (buffers == NULL) {
+      _XEatData(dpy, rep.count * sizeof repBuffer);
+      UnlockDisplay(dpy);
+      SyncHandle();
+      return NULL;
+   }
+
+   for (i = 0; i < rep.count; i++) {
+      _XReadPad(dpy, (char *) &repBuffer, sizeof repBuffer);
+      buffers[i].attachment = repBuffer.attachment;
+      buffers[i].name = repBuffer.name;
+      buffers[i].pitch = repBuffer.pitch;
+      buffers[i].cpp = repBuffer.cpp;
+      buffers[i].flags = repBuffer.flags;
+   }
+
+   UnlockDisplay(dpy);
+   SyncHandle();
+
+   return buffers;
+}
+
+
+void
+DRI2CopyRegion(Display * dpy, XID drawable, XserverRegion region,
+               uint32_t dest, uint32_t src)
+{
+   XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+   xDRI2CopyRegionReq *req;
+   xDRI2CopyRegionReply rep;
+
+   XextSimpleCheckExtension(dpy, info, dri2ExtensionName);
+
+   LockDisplay(dpy);
+   GetReq(DRI2CopyRegion, req);
+   req->reqType = info->codes->major_opcode;
+   req->dri2ReqType = X_DRI2CopyRegion;
+   req->drawable = drawable;
+   req->region = region;
+   req->dest = dest;
+   req->src = src;
+
+   _XReply(dpy, (xReply *) & rep, 0, xFalse);
+
+   UnlockDisplay(dpy);
+   SyncHandle();
+}
+
+#ifdef X_DRI2SwapBuffers
+static void
+load_swap_req(xDRI2SwapBuffersReq *req, uint64_t target, uint64_t divisor,
+	     uint64_t remainder)
+{
+    req->target_msc_hi = target >> 32;
+    req->target_msc_lo = target & 0xffffffff;
+    req->divisor_hi = divisor >> 32;
+    req->divisor_lo = divisor & 0xffffffff;
+    req->remainder_hi = remainder >> 32;
+    req->remainder_lo = remainder & 0xffffffff;
+}
+
+static uint64_t
+vals_to_card64(uint32_t lo, uint32_t hi)
+{
+    return (uint64_t)hi << 32 | lo;
+}
+
+uint64_t DRI2SwapBuffers(Display *dpy, XID drawable,
+			 uint64_t target_msc, uint64_t divisor, uint64_t remainder)
+{
+    XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+    xDRI2SwapBuffersReq *req;
+    xDRI2SwapBuffersReply rep;
+    uint64_t count;
+
+    XextCheckExtension (dpy, info, dri2ExtensionName, 0);
+
+    LockDisplay(dpy);
+    GetReq(DRI2SwapBuffers, req);
+    req->reqType = info->codes->major_opcode;
+    req->dri2ReqType = X_DRI2SwapBuffers;
+    req->drawable = drawable;
+    load_swap_req(req, target_msc, divisor, remainder);
+
+    _XReply(dpy, (xReply *)&rep, 0, xFalse);
+
+    count = vals_to_card64(rep.swap_lo, rep.swap_hi);
+
+    UnlockDisplay(dpy);
+    SyncHandle();
+
+    return count;
+}
+#endif
+
+#ifdef X_DRI2GetMSC
+Bool DRI2GetMSC(Display *dpy, XID drawable, uint64_t *ust, uint64_t *msc,
+		uint64_t *sbc)
+{
+    XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+    xDRI2GetMSCReq *req;
+    xDRI2MSCReply rep;
+
+    XextCheckExtension (dpy, info, dri2ExtensionName, False);
+
+    LockDisplay(dpy);
+    GetReq(DRI2GetMSC, req);
+    req->reqType = info->codes->major_opcode;
+    req->dri2ReqType = X_DRI2GetMSC;
+    req->drawable = drawable;
+
+    if (!_XReply(dpy, (xReply *)&rep, 0, xFalse)) {
+	UnlockDisplay(dpy);
+	SyncHandle();
+	return False;
+    }
+
+    *ust = vals_to_card64(rep.ust_lo, rep.ust_hi);
+    *msc = vals_to_card64(rep.msc_lo, rep.msc_hi);
+    *sbc = vals_to_card64(rep.sbc_lo, rep.sbc_hi);
+
+    UnlockDisplay(dpy);
+    SyncHandle();
+
+    return True;
+}
+#endif
+
+#ifdef X_DRI2WaitMSC
+static void
+load_msc_req(xDRI2WaitMSCReq *req, uint64_t target, uint64_t divisor,
+	     uint64_t remainder)
+{
+    req->target_msc_hi = target >> 32;
+    req->target_msc_lo = target & 0xffffffff;
+    req->divisor_hi = divisor >> 32;
+    req->divisor_lo = divisor & 0xffffffff;
+    req->remainder_hi = remainder >> 32;
+    req->remainder_lo = remainder & 0xffffffff;
+}
+
+Bool DRI2WaitMSC(Display *dpy, XID drawable, uint64_t target_msc, uint64_t divisor,
+		 uint64_t remainder, uint64_t *ust, uint64_t *msc, uint64_t *sbc)
+{
+    XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+    xDRI2WaitMSCReq *req;
+    xDRI2MSCReply rep;
+
+    XextCheckExtension (dpy, info, dri2ExtensionName, False);
+
+    LockDisplay(dpy);
+    GetReq(DRI2WaitMSC, req);
+    req->reqType = info->codes->major_opcode;
+    req->dri2ReqType = X_DRI2WaitMSC;
+    req->drawable = drawable;
+    load_msc_req(req, target_msc, divisor, remainder);
+
+    if (!_XReply(dpy, (xReply *)&rep, 0, xFalse)) {
+	UnlockDisplay(dpy);
+	SyncHandle();
+	return False;
+    }
+
+    *ust = ((uint64_t)rep.ust_hi << 32) | (uint64_t)rep.ust_lo;
+    *msc = ((uint64_t)rep.msc_hi << 32) | (uint64_t)rep.msc_lo;
+    *sbc = ((uint64_t)rep.sbc_hi << 32) | (uint64_t)rep.sbc_lo;
+
+    UnlockDisplay(dpy);
+    SyncHandle();
+
+    return True;
+}
+#endif
+
+#ifdef X_DRI2WaitSBC
+static void
+load_sbc_req(xDRI2WaitSBCReq *req, uint64_t target)
+{
+    req->target_sbc_hi = target >> 32;
+    req->target_sbc_lo = target & 0xffffffff;
+}
+
+Bool DRI2WaitSBC(Display *dpy, XID drawable, uint64_t target_sbc, uint64_t *ust,
+		 uint64_t *msc, uint64_t *sbc)
+{
+    XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+    xDRI2WaitSBCReq *req;
+    xDRI2MSCReply rep;
+
+    XextCheckExtension (dpy, info, dri2ExtensionName, False);
+
+    LockDisplay(dpy);
+    GetReq(DRI2WaitSBC, req);
+    req->reqType = info->codes->major_opcode;
+    req->dri2ReqType = X_DRI2WaitSBC;
+    req->drawable = drawable;
+    load_sbc_req(req, target_sbc);
+
+    if (!_XReply(dpy, (xReply *)&rep, 0, xFalse)) {
+	UnlockDisplay(dpy);
+	SyncHandle();
+	return False;
+    }
+
+    *ust = ((uint64_t)rep.ust_hi << 32) | rep.ust_lo;
+    *msc = ((uint64_t)rep.msc_hi << 32) | rep.msc_lo;
+    *sbc = ((uint64_t)rep.sbc_hi << 32) | rep.sbc_lo;
+
+    UnlockDisplay(dpy);
+    SyncHandle();
+
+    return True;
+}
+#endif
+
+#ifdef X_DRI2SwapInterval
+void DRI2SwapInterval(Display *dpy, XID drawable, int interval)
+{
+    XExtDisplayInfo *info = DRI2FindDisplay(dpy);
+    xDRI2SwapIntervalReq *req;
+
+    XextSimpleCheckExtension (dpy, info, dri2ExtensionName);
+
+    LockDisplay(dpy);
+    GetReq(DRI2SwapInterval, req);
+    req->reqType = info->codes->major_opcode;
+    req->dri2ReqType = X_DRI2SwapInterval;
+    req->drawable = drawable;
+    req->interval = interval;
+    UnlockDisplay(dpy);
+    SyncHandle();
+}
+#endif
diff --git a/test/dri2.h b/test/dri2.h
new file mode 100644
index 0000000..9034d3a
--- /dev/null
+++ b/test/dri2.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright © 2007,2008 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Soft-
+ * ware"), to deal in the Software without restriction, including without
+ * limitation the rights to use, copy, modify, merge, publish, distribute,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, provided that the above copyright
+ * notice(s) and this permission notice appear in all copies of the Soft-
+ * ware and that both the above copyright notice(s) and this permission
+ * notice appear in supporting documentation.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+ * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY
+ * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN
+ * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE-
+ * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR-
+ * MANCE OF THIS SOFTWARE.
+ *
+ * Except as contained in this notice, the name of a copyright holder shall
+ * not be used in advertising or otherwise to promote the sale, use or
+ * other dealings in this Software without prior written authorization of
+ * the copyright holder.
+ *
+ * Authors:
+ *   Kristian Høgsberg (krh at redhat.com)
+ */
+
+#ifndef _DRI2_H_
+#define _DRI2_H_
+
+#include <stdint.h>
+
+#include <X11/extensions/Xfixes.h>
+#include <X11/extensions/dri2tokens.h>
+
+typedef struct
+{
+   unsigned int attachment;
+   unsigned int name;
+   unsigned int pitch;
+   unsigned int cpp;
+   unsigned int flags;
+} DRI2Buffer;
+
+extern Bool
+DRI2QueryExtension(Display * display, int *eventBase, int *errorBase);
+
+extern Bool
+DRI2QueryVersion(Display * display, int *major, int *minor);
+
+extern Bool
+DRI2Connect(Display * display, XID window,
+            char **driverName, char **deviceName);
+
+extern Bool
+DRI2Authenticate(Display * display, XID window, unsigned int magic);
+
+extern void
+DRI2CreateDrawable(Display * display, XID drawable);
+
+extern void
+DRI2DestroyDrawable(Display * display, XID handle);
+
+extern DRI2Buffer*
+DRI2GetBuffers(Display * dpy, XID drawable,
+               int *width, int *height,
+               unsigned int *attachments, int count,
+               int *outCount);
+
+/**
+ * \note
+ * This function is only supported with DRI2 version 1.1 or later.
+ */
+extern DRI2Buffer*
+DRI2GetBuffersWithFormat(Display * dpy, XID drawable,
+                         int *width, int *height,
+                         unsigned int *attachments,
+                         int count, int *outCount);
+
+extern void
+DRI2CopyRegion(Display * dpy, XID drawable,
+               XserverRegion region,
+               uint32_t dest, uint32_t src);
+
+extern uint64_t
+DRI2SwapBuffers(Display *dpy, XID drawable,
+		uint64_t target_msc, uint64_t divisor, uint64_t remainder);
+
+extern Bool
+DRI2GetMSC(Display *dpy, XID drawable, uint64_t *ust, uint64_t *msc, uint64_t *sbc);
+
+extern Bool
+DRI2WaitMSC(Display *dpy, XID drawable, uint64_t target_msc, uint64_t divisor,
+	    uint64_t remainder, uint64_t *ust, uint64_t *msc, uint64_t *sbc);
+
+extern Bool
+DRI2WaitSBC(Display *dpy, XID drawable, uint64_t target_sbc, uint64_t *ust,
+	    uint64_t *msc, uint64_t *sbc);
+
+extern void
+DRI2SwapInterval(Display *dpy, XID drawable, int interval);
+
+#endif
commit a505015a254d6c6e24f0542bc141cde873dc6f34
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 25 22:50:47 2012 +0100

    sna: Force DPMS to be on following a modeset
    
    Similarly to UXA, this papers over inconsistent behaviour in the kernel
    in handling the DPMS upon a modeswitch.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0896af4..4a63cff 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -413,6 +413,29 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 	return mode.mode_valid && fb_id(sna_crtc->bo) == mode.fb_id;
 }
 
+static void
+sna_crtc_force_outputs_on(xf86CrtcPtr crtc)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
+	int i;
+
+	/* DPMS handling by the kernel is inconsistent, so after setting a
+	 * mode on an output presume that we intend for it to be on, or that
+	 * the kernel will force it on.
+	 *
+	 * So force DPMS to be on for all connected outputs, and restore
+	 * the backlight.
+	 */
+	for (i = 0; i < xf86_config->num_output; i++) {
+		xf86OutputPtr output = xf86_config->output[i];
+
+		if (output->crtc != crtc)
+			continue;
+
+		output->funcs->dpms(output, DPMSModeOn);
+	}
+}
+
 static Bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
@@ -478,14 +501,14 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	if (ret) {
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(errno));
-		ret = FALSE;
-	} else
-		ret = TRUE;
+		return FALSE;
+	}
 
 	if (crtc->scrn->pScreen)
 		xf86_reload_cursors(crtc->scrn->pScreen);
 
-	return ret;
+	sna_crtc_force_outputs_on(crtc);
+	return TRUE;
 }
 
 static bool sna_mode_enable_shadow(struct sna *sna)
commit b7a8c94cdb9cf42a31f8ce128d70e23458ba2042
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 25 20:49:59 2012 +0100

    sna: remove the assert(0)s along error paths
    
    This were there as a debugging aide to see if we ever hit unreachable
    code paths - mainly along corruption inducing GPU wedged recovery paths.
    They are superfluous and just scare the reader.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5b58e0e..c79903b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -217,7 +217,6 @@ retry_gtt:
 		if (kgem_expire_cache(kgem))
 			goto retry_gtt;
 
-		assert(0);
 		return NULL;
 	}
 
@@ -230,7 +229,6 @@ retry_mmap:
 		if (__kgem_throttle_retire(kgem, 0))
 			goto retry_mmap;
 
-		assert(0);
 		ptr = NULL;
 	}
 
@@ -295,7 +293,6 @@ static int gem_read(int fd, uint32_t handle, const void *dst,
 	ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
 	if (ret) {
 		DBG(("%s: failed, errno=%d\n", __FUNCTION__, errno));
-		assert(0);
 		return ret;
 	}
 
@@ -3523,14 +3520,12 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 #else
 static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 {
-	assert(0);
 	return 0;
 }
 struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				void *ptr, uint32_t size,
 				bool read_only)
 {
-	assert(0);
 	return 0;
 }
 #endif
commit 15c0ee445f603033c82f357fedfc7737d198d7b3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 24 19:28:03 2012 +0100

    sna/gen5: Tweak thread allocations
    
    Bump the alloted number of threads to their max. Using more threads than
    cores helps hide the stalls due to sampler fetch, math functions and urb
    write. Specifying too many threads seems to not incur a performance
    regression, suggesting that the hardware scheduler is sane enough not to
    overpopulate the EU.
    
    A small but significant boost, peak x11perf -aa10text on an i3-330m is
    raised from 1.93Mglyphs/s to 2.35Mglyphs/s.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 27ba04d..0fd51d0 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -65,7 +65,7 @@
 #define URB_CS_ENTRIES	      0
 
 #define URB_VS_ENTRY_SIZE     1
-#define URB_VS_ENTRIES	      128 /* minimum of 8 */
+#define URB_VS_ENTRIES	      256 /* minimum of 8 */
 
 #define URB_GS_ENTRY_SIZE     0
 #define URB_GS_ENTRIES	      0
@@ -74,7 +74,7 @@
 #define URB_CLIP_ENTRIES      0
 
 #define URB_SF_ENTRY_SIZE     2
-#define URB_SF_ENTRIES	      32
+#define URB_SF_ENTRIES	      64
 
 /*
  * this program computes dA/dx and dA/dy for the texture coordinates along
@@ -82,10 +82,10 @@
  */
 
 #define SF_KERNEL_NUM_GRF  16
-#define SF_MAX_THREADS	   2
+#define SF_MAX_THREADS	   48
 
 #define PS_KERNEL_NUM_GRF   32
-#define PS_MAX_THREADS	    48
+#define PS_MAX_THREADS	    72
 
 static const uint32_t sf_kernel[][4] = {
 #include "exa_sf.g5b"
commit fa10005ce31483827547b7f71eae066899f0026c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 23 17:28:00 2012 +0100

    sna/dri: Perform an exchange for a composited windowed SwapBuffers
    
    If the front buffer is not attached to the scanout and has not been
    reparented, we can simply exchange the underlying bo between the
    front/back attachments and inform the compositor of the damage.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index a323b4e..3c5722c 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -60,6 +60,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 enum frame_event_type {
 	DRI2_SWAP,
 	DRI2_SWAP_THROTTLE,
+	DRI2_XCHG_THROTTLE,
 	DRI2_ASYNC_FLIP,
 	DRI2_FLIP,
 	DRI2_FLIP_THROTTLE,
@@ -1075,6 +1076,45 @@ can_flip(struct sna * sna,
 	return TRUE;
 }
 
+static Bool
+can_exchange(struct sna * sna,
+	     DrawablePtr draw,
+	     DRI2BufferPtr front,
+	     DRI2BufferPtr back)
+{
+	WindowPtr win = (WindowPtr)draw;
+	PixmapPtr pixmap;
+
+	if (draw->type == DRAWABLE_PIXMAP)
+		return TRUE;
+
+	if (front->format != back->format) {
+		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
+		     __FUNCTION__, front->format, back->format));
+		return FALSE;
+	}
+
+	pixmap = get_window_pixmap(win);
+	if (pixmap == sna->front) {
+		DBG(("%s: no, window is attached to the front buffer\n",
+		     __FUNCTION__));
+		return FALSE;
+	}
+
+	if (pixmap->drawable.width != win->drawable.width ||
+	    pixmap->drawable.height != win->drawable.height) {
+		DBG(("%s: no, window has been reparented, window size %dx%d, parent %dx%d\n",
+		     __FUNCTION__,
+		     win->drawable.width,
+		     win->drawable.height,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height));
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
 inline static uint32_t pipe_select(int pipe)
 {
 	/* The third pipe was introduced with IvyBridge long after
@@ -1090,6 +1130,75 @@ inline static uint32_t pipe_select(int pipe)
 		return 0;
 }
 
+static void
+sna_dri_exchange_buffers(DrawablePtr draw,
+			 DRI2BufferPtr front,
+			 DRI2BufferPtr back)
+{
+	struct kgem_bo *back_bo, *front_bo;
+	PixmapPtr pixmap;
+	int tmp;
+
+	pixmap = get_drawable_pixmap(draw);
+
+	back_bo = get_private(back)->bo;
+	front_bo = get_private(front)->bo;
+
+	DBG(("%s: exchange front=%d/%d and back=%d/%d\n",
+	     __FUNCTION__,
+	     front_bo->handle, front->name,
+	     back_bo->handle, back->name));
+
+	set_bo(pixmap, back_bo);
+
+	get_private(front)->bo = back_bo;
+	get_private(back)->bo = front_bo;
+
+	tmp = front->name;
+	front->name = back->name;
+	back->name = tmp;
+}
+
+static void chain_swap(struct sna *sna,
+		       DrawablePtr draw,
+		       struct drm_event_vblank *event,
+		       struct sna_dri_frame_event *chain)
+{
+	drmVBlank vbl;
+	int type;
+
+	/* In theory, it shoudln't be possible for cross-chaining to occur! */
+	if (chain->type == DRI2_XCHG_THROTTLE) {
+		DBG(("%s: performing chained exchange\n", __FUNCTION__));
+		sna_dri_exchange_buffers(draw, chain->front, chain->back);
+		type = DRI2_EXCHANGE_COMPLETE;
+	} else {
+		DBG(("%s: emitting chained vsync'ed blit\n", __FUNCTION__));
+
+		chain->bo = sna_dri_copy_to_front(sna, draw, NULL,
+						  get_private(chain->front)->bo,
+						  get_private(chain->back)->bo,
+						 true);
+
+		type = DRI2_BLIT_COMPLETE;
+	}
+
+	DRI2SwapComplete(chain->client, draw,
+			 event->sequence, event->tv_sec, event->tv_usec,
+			 type, chain->client ? chain->event_complete : NULL, chain->event_data);
+
+	VG_CLEAR(vbl);
+	vbl.request.type =
+		DRM_VBLANK_RELATIVE |
+		DRM_VBLANK_NEXTONMISS |
+		DRM_VBLANK_EVENT |
+		pipe_select(chain->pipe);
+	vbl.request.sequence = 0;
+	vbl.request.signal = (unsigned long)chain;
+	if (sna_wait_vblank(sna, &vbl))
+		sna_dri_frame_event_info_free(sna, chain);
+}
+
 void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 {
 	struct sna_dri_frame_event *info = (void *)(uintptr_t)event->user_data;
@@ -1153,36 +1262,11 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 
 		if (info->chain) {
 			struct sna_dri_frame_event *chain = info->chain;
-			drmVBlank vbl;
-
-			DBG(("%s: emitting chained vsync'ed blit\n",
-			     __FUNCTION__));
 
 			assert(get_private(info->front)->chain == info);
 			get_private(info->front)->chain = chain;
 
-			chain->bo = sna_dri_copy_to_front(sna, draw, NULL,
-							  get_private(chain->front)->bo,
-							  get_private(chain->back)->bo,
-							  true);
-
-			DRI2SwapComplete(chain->client,
-					 draw, event->sequence,
-					 event->tv_sec, event->tv_usec,
-					 DRI2_BLIT_COMPLETE,
-					 chain->client ? chain->event_complete : NULL,
-					 chain->event_data);
-
-			VG_CLEAR(vbl);
-			vbl.request.type =
-				DRM_VBLANK_RELATIVE |
-				DRM_VBLANK_NEXTONMISS |
-				DRM_VBLANK_EVENT |
-				pipe_select(chain->pipe);
-			vbl.request.sequence = 0;
-			vbl.request.signal = (unsigned long)chain;
-			if (sna_wait_vblank(sna, &vbl))
-				sna_dri_frame_event_info_free(sna, chain);
+			chain_swap(sna, draw, event, chain);
 
 			info->chain = NULL;
 		} else if (get_private(info->front)->chain == info) {
@@ -1200,6 +1284,24 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 		}
 		break;
 
+	case DRI2_XCHG_THROTTLE:
+		DBG(("%s: xchg throttle\n", __FUNCTION__));
+
+		if (info->chain) {
+			struct sna_dri_frame_event *chain = info->chain;
+
+			assert(get_private(info->front)->chain == info);
+			get_private(info->front)->chain = chain;
+
+			chain_swap(sna, draw, event, chain);
+
+			info->chain = NULL;
+		} else {
+			DBG(("%s: chain complete\n", __FUNCTION__));
+			get_private(info->front)->chain = NULL;
+		}
+		break;
+
 	case DRI2_WAITMSC:
 		if (info->client)
 			DRI2WaitMSCComplete(info->client, draw,
@@ -1601,6 +1703,108 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	return TRUE;
 }
 
+static void
+sna_dri_immediate_xchg(struct sna *sna,
+		       DrawablePtr draw,
+		       struct sna_dri_frame_event *info)
+{
+	struct sna_dri_private *priv = get_private(info->front);
+	drmVBlank vbl;
+
+	DBG(("%s: emitting immediate exchange, throttling client\n", __FUNCTION__));
+
+	if ((sna->flags & SNA_NO_WAIT) == 0) {
+		info->type = DRI2_XCHG_THROTTLE;
+		if (priv->chain == NULL) {
+			DBG(("%s: no pending xchg, starting chain\n",
+			     __FUNCTION__));
+
+			sna_dri_exchange_buffers(draw, info->front, info->back);
+			DRI2SwapComplete(info->client, draw, 0, 0, 0,
+					 DRI2_EXCHANGE_COMPLETE,
+					 info->event_complete,
+					 info->event_data);
+			vbl.request.type =
+				DRM_VBLANK_RELATIVE |
+				DRM_VBLANK_NEXTONMISS |
+				DRM_VBLANK_EVENT |
+				pipe_select(info->pipe);
+			vbl.request.sequence = 0;
+			vbl.request.signal = (unsigned long)info;
+			if (sna_wait_vblank(sna, &vbl) == 0)
+				priv->chain = info;
+			else
+				sna_dri_frame_event_info_free(sna, info);
+		} else {
+			DBG(("%s: attaching to vsync chain\n",
+			     __FUNCTION__));
+			assert(priv->chain->chain == NULL);
+			priv->chain->chain = info;
+		}
+	} else {
+		DRI2SwapComplete(info->client, draw, 0, 0, 0,
+				 DRI2_EXCHANGE_COMPLETE,
+				 info->event_complete,
+				 info->event_data);
+		sna_dri_frame_event_info_free(sna, info);
+	}
+}
+
+static void
+sna_dri_immediate_blit(struct sna *sna,
+		       DrawablePtr draw,
+		       struct sna_dri_frame_event *info)
+{
+	struct sna_dri_private *priv = get_private(info->front);
+	drmVBlank vbl;
+
+	DBG(("%s: emitting immediate blit, throttling client\n", __FUNCTION__));
+
+	if ((sna->flags & SNA_NO_WAIT) == 0) {
+		info->type = DRI2_SWAP_THROTTLE;
+		if (priv->chain == NULL) {
+			DBG(("%s: no pending blit, starting chain\n",
+			     __FUNCTION__));
+
+			info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+							 get_private(info->front)->bo,
+							 get_private(info->back)->bo,
+							 true);
+			DRI2SwapComplete(info->client, draw, 0, 0, 0,
+					 DRI2_BLIT_COMPLETE,
+					 info->event_complete,
+					 info->event_data);
+
+			vbl.request.type =
+				DRM_VBLANK_RELATIVE |
+				DRM_VBLANK_NEXTONMISS |
+				DRM_VBLANK_EVENT |
+				pipe_select(info->pipe);
+			vbl.request.sequence = 0;
+			vbl.request.signal = (unsigned long)info;
+			if (sna_wait_vblank(sna, &vbl) == 0)
+				priv->chain = info;
+			else
+				sna_dri_frame_event_info_free(sna, info);
+		} else {
+			DBG(("%s: attaching to vsync chain\n",
+			     __FUNCTION__));
+			assert(priv->chain->chain == NULL);
+			priv->chain->chain = info;
+		}
+	} else {
+		info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+						 get_private(info->front)->bo,
+						 get_private(info->back)->bo,
+						 true);
+		DRI2SwapComplete(info->client, draw, 0, 0, 0,
+				 DRI2_BLIT_COMPLETE,
+				 info->event_complete,
+				 info->event_data);
+		sna_dri_frame_event_info_free(sna, info);
+	}
+}
+
 /*
  * ScheduleSwap is responsible for requesting a DRM vblank event for the
  * appropriate frame.
@@ -1654,6 +1858,15 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	/* Drawable not displayed... just complete the swap */
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1) {
+		if (can_exchange(sna, draw, front, back)) {
+			DBG(("%s: unattached, exchange pixmaps\n", __FUNCTION__));
+			sna_dri_exchange_buffers(draw, front, back);
+
+			DRI2SwapComplete(client, draw, 0, 0, 0,
+					 DRI2_EXCHANGE_COMPLETE, func, data);
+			return TRUE;
+		}
+
 		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
 		goto blit_fallback;
 	}
@@ -1690,51 +1903,10 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 	info->type = swap_type;
 	if (divisor == 0) {
-		DBG(("%s: emitting immediate vsync'ed blit, throttling client\n",
-		     __FUNCTION__));
-
-		if ((sna->flags & SNA_NO_WAIT) == 0) {
-			struct sna_dri_private *priv = get_private(front);
-
-			info->type = DRI2_SWAP_THROTTLE;
-			if (priv->chain == NULL) {
-				DBG(("%s: no pending blit, starting chain\n",
-				     __FUNCTION__));
-
-				info->bo = sna_dri_copy_to_front(sna, draw, NULL,
-								 get_private(front)->bo,
-								 get_private(back)->bo,
-								 true);
-
-				DRI2SwapComplete(client, draw, 0, 0, 0,
-						 DRI2_BLIT_COMPLETE, func, data);
-				vbl.request.type =
-					DRM_VBLANK_RELATIVE |
-					DRM_VBLANK_NEXTONMISS |
-					DRM_VBLANK_EVENT |
-					pipe_select(pipe);
-				vbl.request.sequence = 0;
-				vbl.request.signal = (unsigned long)info;
-				if (sna_wait_vblank(sna, &vbl) == 0) {
-					priv->chain = info;
-					return TRUE;
-				}
-			} else {
-				DBG(("%s: attaching to vsync chain\n",
-				     __FUNCTION__));
-				assert(priv->chain->chain == NULL);
-				priv->chain->chain = info;
-				return TRUE;
-			}
-		} else {
-			info->bo = sna_dri_copy_to_front(sna, draw, NULL,
-							 get_private(front)->bo,
-							 get_private(back)->bo,
-							 true);
-		}
-
-		sna_dri_frame_event_info_free(sna, info);
-		DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
+		if (can_exchange(sna, draw, info->front, info->back))
+			sna_dri_immediate_xchg(sna, draw, info);
+		else
+			sna_dri_immediate_blit(sna, draw, info);
 		return TRUE;
 	}
 
commit 53d735ddb16b0204662b8584aa22998ba53deec1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 23 10:16:44 2012 +0100

    sna/dri: Queue windowed swaps
    
    Implement "tripple-buffering" for windowed SwapBuffers by allowing the
    client to submit one extra frame before throttling. That is we emit the
    vsync'ed blit and immediately unblock the client so that it renders to
    the GPU (which is guaranteed to be executed after the blit so that its
    Front/Back buffers are still correct) and requests another SwapBuffers.
    The subsequent swapbuffers are appended to the vsync chain with the
    blit/unblock then executed on the vblank following the original blit.
    That is both the client and xserver render concurrently.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 7cf1d1c..a323b4e 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -66,12 +66,6 @@ enum frame_event_type {
 	DRI2_WAITMSC,
 };
 
-struct sna_dri_private {
-	int refcnt;
-	PixmapPtr pixmap;
-	struct kgem_bo *bo;
-};
-
 struct sna_dri_frame_event {
 	XID drawable_id;
 	ClientPtr client;
@@ -90,6 +84,8 @@ struct sna_dri_frame_event {
 	DRI2BufferPtr back;
 	struct kgem_bo *bo;
 
+	struct sna_dri_frame_event *chain;
+
 	unsigned int fe_frame;
 	unsigned int fe_tv_sec;
 	unsigned int fe_tv_usec;
@@ -103,8 +99,20 @@ struct sna_dri_frame_event {
 	int off_delay;
 };
 
+struct sna_dri_private {
+	int refcnt;
+	PixmapPtr pixmap;
+	int width, height;
+	struct kgem_bo *bo;
+	struct sna_dri_frame_event *chain;
+};
+
 static DevPrivateKeyRec sna_client_key;
 
+static RESTYPE frame_event_client_type;
+static RESTYPE frame_event_drawable_type;
+static RESTYPE dri_drawable_type;
+
 static inline struct sna_dri_frame_event *
 to_frame_event(uintptr_t  data)
 {
@@ -204,6 +212,25 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
 		pixmap = get_drawable_pixmap(drawable);
+
+		buffer = NULL;
+		dixLookupResourceByType((void **)&buffer, drawable->id,
+					dri_drawable_type, NULL, DixWriteAccess);
+		if (buffer) {
+			private = get_private(buffer);
+			if (private->pixmap == pixmap &&
+			    private->width  == pixmap->drawable.width &&
+			    private->height == pixmap->drawable.height)  {
+				DBG(("%s: reusing front buffer attachment\n",
+				     __FUNCTION__));
+				private->refcnt++;
+				return buffer;
+			}
+			FreeResourceByType(drawable->id,
+					   dri_drawable_type,
+					   TRUE);
+		}
+
 		bo = sna_pixmap_set_dri(sna, pixmap);
 		if (bo == NULL)
 			return NULL;
@@ -292,6 +319,10 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	buffer->name = kgem_bo_flink(&sna->kgem, bo);
 	private->refcnt = 1;
 	private->pixmap = pixmap;
+	if (pixmap) {
+		private->width  = pixmap->drawable.width;
+		private->height = pixmap->drawable.height;
+	}
 	private->bo = bo;
 
 	if (buffer->name == 0)
@@ -300,6 +331,9 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	if (pixmap)
 		pixmap->refcnt++;
 
+	if (attachment == DRI2BufferFrontLeft)
+		(void)AddResource(drawable->id, dri_drawable_type, buffer);
+
 	return buffer;
 
 err:
@@ -727,8 +761,6 @@ sna_dri_get_pipe(DrawablePtr pDraw)
 	return pipe;
 }
 
-static RESTYPE frame_event_client_type, frame_event_drawable_type;
-
 static struct list *
 get_resource(XID id, RESTYPE type)
 {
@@ -806,6 +838,13 @@ sna_dri_frame_event_drawable_gone(void *data, XID id)
 	return Success;
 }
 
+static int
+sna_dri_drawable_gone(void *data, XID id)
+{
+	DBG(("%s(%ld)\n", __FUNCTION__, (long)id));
+	return Success;
+}
+
 static Bool
 sna_dri_register_frame_event_resource_types(void)
 {
@@ -815,12 +854,26 @@ sna_dri_register_frame_event_resource_types(void)
 	if (!frame_event_client_type)
 		return FALSE;
 
+	DBG(("%s: frame_event_client_type=%d\n",
+	     __FUNCTION__, frame_event_client_type));
+
 	frame_event_drawable_type =
 		CreateNewResourceType(sna_dri_frame_event_drawable_gone,
 				      "Frame Event Drawable");
 	if (!frame_event_drawable_type)
 		return FALSE;
 
+	DBG(("%s: frame_event_drawable_type=%d\n",
+	     __FUNCTION__, frame_event_drawable_type));
+
+	dri_drawable_type =
+		CreateNewResourceType(sna_dri_drawable_gone,
+				      "DRI2 Drawable");
+	if (!dri_drawable_type)
+		return FALSE;
+
+	DBG(("%s: dri_drawable_type=%d\n", __FUNCTION__, dri_drawable_type));
+
 	return TRUE;
 }
 
@@ -1083,6 +1136,9 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 			if (kgem_bo_is_busy(info->bo)) {
 				drmVBlank vbl;
 
+				DBG(("%s: vsync'ed blit is still busy, postponing\n",
+				     __FUNCTION__));
+
 				VG_CLEAR(vbl);
 				vbl.request.type =
 					DRM_VBLANK_RELATIVE |
@@ -1095,12 +1151,53 @@ void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 			}
 		}
 
-		DRI2SwapComplete(info->client,
-				 draw, event->sequence,
-				 event->tv_sec, event->tv_usec,
-				 DRI2_BLIT_COMPLETE,
-				 info->client ? info->event_complete : NULL,
-				 info->event_data);
+		if (info->chain) {
+			struct sna_dri_frame_event *chain = info->chain;
+			drmVBlank vbl;
+
+			DBG(("%s: emitting chained vsync'ed blit\n",
+			     __FUNCTION__));
+
+			assert(get_private(info->front)->chain == info);
+			get_private(info->front)->chain = chain;
+
+			chain->bo = sna_dri_copy_to_front(sna, draw, NULL,
+							  get_private(chain->front)->bo,
+							  get_private(chain->back)->bo,
+							  true);
+
+			DRI2SwapComplete(chain->client,
+					 draw, event->sequence,
+					 event->tv_sec, event->tv_usec,
+					 DRI2_BLIT_COMPLETE,
+					 chain->client ? chain->event_complete : NULL,
+					 chain->event_data);
+
+			VG_CLEAR(vbl);
+			vbl.request.type =
+				DRM_VBLANK_RELATIVE |
+				DRM_VBLANK_NEXTONMISS |
+				DRM_VBLANK_EVENT |
+				pipe_select(chain->pipe);
+			vbl.request.sequence = 0;
+			vbl.request.signal = (unsigned long)chain;
+			if (sna_wait_vblank(sna, &vbl))
+				sna_dri_frame_event_info_free(sna, chain);
+
+			info->chain = NULL;
+		} else if (get_private(info->front)->chain == info) {
+			DBG(("%s: chain complete\n", __FUNCTION__));
+			get_private(info->front)->chain = NULL;
+		} else {
+			DBG(("%s: deferred blit complete, unblock client\n",
+			     __FUNCTION__));
+			DRI2SwapComplete(info->client,
+					 draw, event->sequence,
+					 event->tv_sec, event->tv_usec,
+					 DRI2_BLIT_COMPLETE,
+					 info->client ? info->event_complete : NULL,
+					 info->event_data);
+		}
 		break;
 
 	case DRI2_WAITMSC:
@@ -1596,21 +1693,44 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		DBG(("%s: emitting immediate vsync'ed blit, throttling client\n",
 		     __FUNCTION__));
 
-		info->bo = sna_dri_copy_to_front(sna, draw, NULL,
-						 get_private(front)->bo,
-						 get_private(back)->bo,
-						 true);
 		if ((sna->flags & SNA_NO_WAIT) == 0) {
+			struct sna_dri_private *priv = get_private(front);
+
 			info->type = DRI2_SWAP_THROTTLE;
-			vbl.request.type =
-				DRM_VBLANK_RELATIVE |
-				DRM_VBLANK_NEXTONMISS |
-				DRM_VBLANK_EVENT |
-				pipe_select(pipe);
-			vbl.request.sequence = 0;
-			vbl.request.signal = (unsigned long)info;
-			if (sna_wait_vblank(sna, &vbl) == 0)
+			if (priv->chain == NULL) {
+				DBG(("%s: no pending blit, starting chain\n",
+				     __FUNCTION__));
+
+				info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+								 get_private(front)->bo,
+								 get_private(back)->bo,
+								 true);
+
+				DRI2SwapComplete(client, draw, 0, 0, 0,
+						 DRI2_BLIT_COMPLETE, func, data);
+				vbl.request.type =
+					DRM_VBLANK_RELATIVE |
+					DRM_VBLANK_NEXTONMISS |
+					DRM_VBLANK_EVENT |
+					pipe_select(pipe);
+				vbl.request.sequence = 0;
+				vbl.request.signal = (unsigned long)info;
+				if (sna_wait_vblank(sna, &vbl) == 0) {
+					priv->chain = info;
+					return TRUE;
+				}
+			} else {
+				DBG(("%s: attaching to vsync chain\n",
+				     __FUNCTION__));
+				assert(priv->chain->chain == NULL);
+				priv->chain->chain = info;
 				return TRUE;
+			}
+		} else {
+			info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+							 get_private(front)->bo,
+							 get_private(back)->bo,
+							 true);
 		}
 
 		sna_dri_frame_event_info_free(sna, info);
commit 1e9319d5f56583be99f573f208cebb0ee3b5cc26
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 22 11:22:16 2012 +0100

    sna: extend RandR to support super sized monitor configurations
    
    With the introduction of the third pipe on IvyBridge it is possible to
    encounter situations where the combination of the three monitors exceed
    the limits of the scanout engine and so prevent them being used at their
    native resolutions. (It is conceivable to hit similar issues on earlier
    generation, especially gen2/3.) One workaround, this patch, is to extend
    the RandR shadow support to break the extended framebuffer into per-crtc
    pixmaps.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index 78575a6..d8455f9 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -22,6 +22,7 @@ const OptionInfoRec intel_options[] = {
 	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	1},
 	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	0},
 	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_TEAR_FREE,	"TearFree",	OPTV_BOOLEAN,	{0},	1},
 #endif
 #ifdef USE_UXA
 	{OPTION_FALLBACKDEBUG,	"FallbackDebug",OPTV_BOOLEAN,	{0},	0},
diff --git a/src/intel_options.h b/src/intel_options.h
index 05a2ad1..c3e4999 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -28,6 +28,7 @@ enum intel_options {
 	OPTION_THROTTLE,
 	OPTION_ZAPHOD,
 	OPTION_DELAYED_FLUSH,
+	OPTION_TEAR_FREE,
 #endif
 #ifdef USE_UXA
 	OPTION_FALLBACKDEBUG,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 9fe3661..5b58e0e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1990,7 +1990,6 @@ void _kgem_submit(struct kgem *kgem)
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
 
-	kgem->flush_now = kgem->scanout;
 	kgem_reset(kgem);
 
 	assert(kgem->next_request != NULL);
@@ -2486,14 +2485,30 @@ done:
 	return tiling;
 }
 
+static int bits_per_pixel(int depth)
+{
+	switch (depth) {
+	case 8: return 8;
+	case 15:
+	case 16: return 16;
+	case 24:
+	case 30:
+	case 32: return 32;
+	default: return 0;
+	}
+}
+
 unsigned kgem_can_create_2d(struct kgem *kgem,
 			    int width, int height, int depth)
 {
-	int bpp = BitsPerPixel(depth);
 	uint32_t pitch, size;
 	unsigned flags = 0;
+	int bpp;
+
+	DBG(("%s: %dx%d @ %d\n", __FUNCTION__, width, height, depth));
 
-	if (depth < 8) {
+	bpp = bits_per_pixel(depth);
+	if (bpp == 0) {
 		DBG(("%s: unhandled depth %d\n", __FUNCTION__, depth));
 		return 0;
 	}
@@ -2509,6 +2524,8 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 				 I915_TILING_NONE, &pitch);
 	if (size > 0 && size <= kgem->max_cpu_size)
 		flags |= KGEM_CAN_CREATE_CPU | KGEM_CAN_CREATE_GPU;
+	if (size > 0 && size <= kgem->aperture_mappable/4)
+		flags |= KGEM_CAN_CREATE_GTT;
 	if (size > kgem->large_object_size)
 		flags |= KGEM_CAN_CREATE_LARGE;
 	if (size > kgem->max_object_size) {
@@ -2524,6 +2541,8 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 				 &pitch);
 	if (size > 0 && size <= kgem->max_gpu_size)
 		flags |= KGEM_CAN_CREATE_GPU;
+	if (size > 0 && size <= kgem->aperture_mappable/4)
+		flags |= KGEM_CAN_CREATE_GTT;
 	if (size > kgem->large_object_size)
 		flags |= KGEM_CAN_CREATE_LARGE;
 	if (size > kgem->max_object_size) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index c154be5..2d8def8 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -150,7 +150,6 @@ struct kgem {
 	uint32_t need_retire:1;
 	uint32_t need_throttle:1;
 	uint32_t scanout:1;
-	uint32_t flush_now:1;
 	uint32_t busy:1;
 
 	uint32_t has_vmap :1;
@@ -218,6 +217,7 @@ unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth)
 #define KGEM_CAN_CREATE_GPU	0x1
 #define KGEM_CAN_CREATE_CPU	0x2
 #define KGEM_CAN_CREATE_LARGE	0x4
+#define KGEM_CAN_CREATE_GTT	0x8
 
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index ee8273c..2e8925b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -46,6 +46,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "compiler.h"
 
 #include <xf86Crtc.h>
+#include <xf86str.h>
 #include <windowstr.h>
 #include <glyphstr.h>
 #include <picturestr.h>
@@ -58,6 +59,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "../compat-api.h"
 #define _XF86DRI_SERVER_
+#include <drm.h>
 #include <dri2.h>
 #include <i915_drm.h>
 
@@ -209,6 +211,7 @@ struct sna {
 #define SNA_NO_DELAYED_FLUSH	0x2
 #define SNA_NO_WAIT		0x4
 #define SNA_NO_FLIP		0x8
+#define SNA_TEAR_FREE		0x10
 
 	unsigned watch_flush;
 	unsigned flush;
@@ -226,14 +229,16 @@ struct sna {
 	struct list active_pixmaps;
 	struct list inactive_clock[2];
 
-	PixmapPtr front, shadow;
+	PixmapPtr front;
 	PixmapPtr freed_pixmap;
 
 	struct sna_mode {
-		uint32_t fb_id;
-		uint32_t fb_pixmap;
-		drmModeResPtr mode_res;
-		int cpp;
+		drmModeResPtr kmode;
+
+		int shadow_active;
+		DamagePtr shadow_damage;
+		struct kgem_bo *shadow;
+		int shadow_flip;
 
 		struct list outputs;
 		struct list crtcs;
@@ -256,6 +261,7 @@ struct sna {
 	ScreenBlockHandlerProcPtr BlockHandler;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
 	CloseScreenProcPtr CloseScreen;
+	xf86ModeSetProc *ModeSet;
 
 	PicturePtr clear;
 	struct {
@@ -302,9 +308,10 @@ struct sna {
 
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 void sna_mode_adjust_frame(struct sna *sna, int x, int y);
-extern void sna_mode_remove_fb(struct sna *sna);
 extern void sna_mode_update(struct sna *sna);
 extern void sna_mode_disable_unused(struct sna *sna);
+extern void sna_mode_wakeup(struct sna *sna);
+extern void sna_mode_redisplay(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
 extern int sna_crtc_id(xf86CrtcPtr crtc);
@@ -356,17 +363,17 @@ to_sna_from_kgem(struct kgem *kgem)
 
 extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 				     const BoxRec *box,
-				     xf86CrtcPtr desired,
-				     BoxPtr crtc_box_ret);
+				     xf86CrtcPtr desired);
 
 extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
 
 Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
-void sna_dri_wakeup(struct sna *sna);
+void sna_dri_page_flip_handler(struct sna *sna, struct drm_event_vblank *event);
+void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event);
 void sna_dri_close(struct sna *sna, ScreenPtr pScreen);
 
-extern Bool sna_crtc_on(xf86CrtcPtr crtc);
+extern bool sna_crtc_on(xf86CrtcPtr crtc);
 int sna_crtc_to_pipe(xf86CrtcPtr crtc);
 int sna_crtc_to_plane(xf86CrtcPtr crtc);
 
@@ -408,10 +415,12 @@ get_drawable_dy(DrawablePtr drawable)
 	return 0;
 }
 
-static inline Bool pixmap_is_scanout(PixmapPtr pixmap)
+bool sna_pixmap_attach_to_bo(PixmapPtr pixmap, struct kgem_bo *bo);
+static inline bool sna_pixmap_is_scanout(struct sna *sna, PixmapPtr pixmap)
 {
-	ScreenPtr screen = pixmap->drawable.pScreen;
-	return pixmap == screen->GetScreenPixmap(screen);
+	return (pixmap == sna->front &&
+		!sna->mode.shadow_active &&
+		(sna->flags & SNA_NO_WAIT) == 0);
 }
 
 PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
@@ -429,6 +438,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_INPLACE_HINT 0x4
 #define MOVE_ASYNC_HINT 0x8
 #define MOVE_SOURCE_HINT 0x10
+#define __MOVE_FORCE 0x20
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5b0b33d..0f52a27 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -645,7 +645,7 @@ _sna_pixmap_reset(PixmapPtr pixmap)
 	return _sna_pixmap_init(priv, pixmap);
 }
 
-static struct sna_pixmap *sna_pixmap_attach(struct sna *sna, PixmapPtr pixmap)
+static struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
 
@@ -657,6 +657,22 @@ static struct sna_pixmap *sna_pixmap_attach(struct sna *sna, PixmapPtr pixmap)
 	return _sna_pixmap_init(priv, pixmap);
 }
 
+bool sna_pixmap_attach_to_bo(PixmapPtr pixmap, struct kgem_bo *bo)
+{
+	struct sna_pixmap *priv;
+
+	priv = sna_pixmap_attach(pixmap);
+	if (!priv)
+		return false;
+
+	priv->gpu_bo = kgem_bo_reference(bo);
+	sna_damage_all(&priv->gpu_damage,
+		       pixmap->drawable.width,
+		       pixmap->drawable.height);
+
+	return true;
+}
+
 static inline PixmapPtr
 create_pixmap(struct sna *sna, ScreenPtr screen,
 	      int width, int height, int depth,
@@ -724,7 +740,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 		pixmap->drawable.depth = depth;
 		pixmap->drawable.bitsPerPixel = bpp;
 
-		priv = sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(pixmap);
 		if (!priv) {
 			fbDestroyPixmap(pixmap);
 			return NullPixmap;
@@ -816,7 +832,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 		pixmap->drawable.depth = depth;
 		pixmap->drawable.bitsPerPixel = bpp;
 
-		priv = sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(pixmap);
 		if (!priv) {
 			fbDestroyPixmap(pixmap);
 			return NullPixmap;
@@ -907,7 +923,7 @@ force_create:
 		if (pixmap == NullPixmap)
 			return NullPixmap;
 
-		sna_pixmap_attach(sna, pixmap);
+		sna_pixmap_attach(pixmap);
 	} else {
 		struct sna_pixmap *priv;
 
@@ -923,7 +939,7 @@ force_create:
 		pixmap->devKind = pad;
 		pixmap->devPrivate.ptr = NULL;
 
-		priv = sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(pixmap);
 		if (priv == NULL) {
 			free(pixmap);
 			goto fallback;
@@ -2508,7 +2524,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		}
 	}
 
-	if (!sna_pixmap_move_to_gpu(pixmap, flags))
+	if (!sna_pixmap_move_to_gpu(pixmap, flags | __MOVE_FORCE))
 		return NULL;
 
 	/* For large bo, try to keep only a single copy around */
@@ -2537,6 +2553,9 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	DBG(("%s(pixmap=%ld, usage=%d)\n",
 	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
 
+	if ((flags & __MOVE_FORCE) == 0 && wedged(sna))
+		return NULL;
+
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
 		DBG(("%s: not attached\n", __FUNCTION__));
@@ -12277,7 +12296,6 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	}
 
 	kgem_submit(&sna->kgem);
-	sna->kgem.flush_now = 0;
 
 	kgem_sync(&sna->kgem);
 
@@ -12286,8 +12304,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 {
-	PixmapPtr front = sna->shadow ? sna->shadow : sna->front;
-	struct sna_pixmap *priv = sna_pixmap(front);
+	struct sna_pixmap *priv = sna_pixmap(sna->front);
 	return priv && priv->gpu_bo ? priv : NULL;
 }
 
@@ -12298,12 +12315,48 @@ static void sna_accel_disarm_timer(struct sna *sna, int id)
 	sna->timer_ready &= ~(1<<id);
 }
 
+static bool has_shadow(struct sna *sna)
+{
+	DamagePtr damage = sna->mode.shadow_damage;
+
+	if (!(damage && RegionNotEmpty(DamageRegion(damage))))
+		return false;
+
+	DBG(("%s: has pending damage\n", __FUNCTION__));
+	if ((sna->flags & SNA_TEAR_FREE) == 0)
+		return true;
+
+	DBG(("%s: outstanding flips: %d\n",
+	     __FUNCTION__, sna->mode.shadow_flip));
+	return !sna->mode.shadow_flip;
+}
+
+static bool need_flush(struct sna *sna, struct sna_pixmap *scanout)
+{
+	DBG(("%s: scanout=%d shadow?=%d || (cpu?=%d || gpu?=%d) && !busy=%d)\n",
+	     __FUNCTION__,
+	     scanout && scanout->gpu_bo ? scanout->gpu_bo->handle : 0,
+	     has_shadow(sna),
+	     scanout && scanout->cpu_damage != NULL,
+	     scanout && scanout->gpu_bo && scanout->gpu_bo->exec != NULL,
+	     scanout && scanout->gpu_bo && __kgem_flush(&sna->kgem, scanout->gpu_bo)));
+
+	if (has_shadow(sna))
+		return true;
+
+	if (!scanout)
+		return false;
+
+	return (scanout->cpu_damage || scanout->gpu_bo->exec) &&
+		!__kgem_flush(&sna->kgem, scanout->gpu_bo);
+}
+
 static bool sna_accel_do_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv;
 
 	priv = sna_accel_scanout(sna);
-	if (priv == NULL || priv->gpu_bo == NULL) {
+	if (priv == NULL && !sna->mode.shadow_active) {
 		DBG(("%s -- no scanout attached\n", __FUNCTION__));
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 		return false;
@@ -12313,27 +12366,19 @@ static bool sna_accel_do_flush(struct sna *sna)
 		return true;
 
 	if (sna->timer_active & (1<<(FLUSH_TIMER))) {
-		if (sna->kgem.flush_now) {
-			sna->kgem.flush_now = 0;
-			if (priv->gpu_bo->exec) {
-				DBG(("%s -- forcing flush\n", __FUNCTION__));
-				sna->timer_ready |= 1 << FLUSH_TIMER;
-			}
-		}
-
+		DBG(("%s: flush timer active\n", __FUNCTION__));
 		if (sna->timer_ready & (1<<(FLUSH_TIMER))) {
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval;
-			return priv->cpu_damage || !__kgem_flush(&sna->kgem, priv->gpu_bo);
+			return true;
 		}
 	} else {
-		if (priv->cpu_damage == NULL &&
-		    !__kgem_flush(&sna->kgem, priv->gpu_bo)) {
+		if (!need_flush(sna, priv)) {
 			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
-			sna->timer_ready  |= 1 << FLUSH_TIMER;
+			sna->timer_ready |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval / 2;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
@@ -12447,24 +12492,24 @@ static void sna_accel_flush(struct sna *sna)
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
 	bool busy;
 
-	assert(priv != NULL);
 	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
 	     __FUNCTION__, (long)sna->time,
-	     priv->cpu_damage,
-	     priv->gpu_bo->exec != NULL,
+	     priv && priv->cpu_damage,
+	     priv && priv->gpu_bo->exec != NULL,
 	     sna->kgem.nbatch,
 	     sna->kgem.busy));
 
-	busy = priv->cpu_damage || priv->gpu_bo->rq;
+	busy = need_flush(sna, priv);
 	if (!sna->kgem.busy && !busy)
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 	sna->kgem.busy = busy;
 
-	if (priv->cpu_damage)
-		sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ);
+	if (priv) {
+		sna_pixmap_force_to_gpu(priv->pixmap, MOVE_READ);
+		kgem_bo_flush(&sna->kgem, priv->gpu_bo);
+	}
 
-	kgem_bo_flush(&sna->kgem, priv->gpu_bo);
-	sna->kgem.flush_now = 0;
+	sna_mode_redisplay(sna);
 }
 
 static void sna_accel_throttle(struct sna *sna)
@@ -12811,10 +12856,9 @@ void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 	DBG(("%s\n", __FUNCTION__));
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
-	if (!sna->kgem.need_retire) {
+	if (!sna->mode.shadow_active && !sna->kgem.need_retire) {
 		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
 		kgem_submit(&sna->kgem);
-		sna->kgem.flush_now = 0;
 	}
 	if (sna->kgem.need_purge)
 		kgem_purge_cache(&sna->kgem);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index de834ae..0896af4 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -43,6 +43,9 @@
 #include <xf86drm.h>
 #include <xf86DDC.h> /* for xf86InterpretEDID */
 
+#include <fb.h>
+#include <fbpict.h>
+
 #include "sna.h"
 #include "sna_reg.h"
 
@@ -61,13 +64,12 @@
 
 struct sna_crtc {
 	struct drm_mode_modeinfo kmode;
-	PixmapPtr shadow;
-	uint32_t shadow_fb_id;
+	struct kgem_bo *bo;
 	uint32_t cursor;
+	bool shadow;
 	uint8_t id;
 	uint8_t pipe;
 	uint8_t plane;
-	uint8_t active;
 	struct list link;
 };
 
@@ -126,10 +128,9 @@ static const char *backlight_interfaces[] = {
 /* Enough for 10 digits of backlight + '\n' + '\0' */
 #define BACKLIGHT_VALUE_LEN 12
 
-static inline int
-crtc_id(struct sna_crtc *crtc)
+static inline uint32_t fb_id(struct kgem_bo *bo)
 {
-	return crtc->id;
+	return bo->delta;
 }
 
 int sna_crtc_id(xf86CrtcPtr crtc)
@@ -137,9 +138,9 @@ int sna_crtc_id(xf86CrtcPtr crtc)
 	return to_sna_crtc(crtc)->id;
 }
 
-int sna_crtc_on(xf86CrtcPtr crtc)
+bool sna_crtc_on(xf86CrtcPtr crtc)
 {
-	return to_sna_crtc(crtc)->active;
+	return to_sna_crtc(crtc)->bo != NULL;
 }
 
 int sna_crtc_to_pipe(xf86CrtcPtr crtc)
@@ -159,7 +160,6 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 {
 	ScrnInfoPtr scrn = sna->scrn;
 	struct drm_mode_fb_cmd arg;
-	int ret;
 
 	assert(bo->proxy == NULL);
 	if (bo->delta) {
@@ -181,11 +181,11 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	arg.depth = scrn->depth;
 	arg.handle = bo->handle;
 
-	if ((ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_ADDFB, &arg))) {
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_ADDFB, &arg)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d: %d\n",
 			   __FUNCTION__, width, height,
-			   scrn->depth, scrn->bitsPerPixel, bo->pitch, ret);
+			   scrn->depth, scrn->bitsPerPixel, bo->pitch, errno);
 		return 0;
 	}
 
@@ -397,16 +397,20 @@ mode_to_kmode(struct drm_mode_modeinfo *kmode, DisplayModePtr mode)
 
 bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 {
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_crtc mode;
 
+	if (!sna_crtc->bo)
+		return false;
+
 	VG_CLEAR(mode);
-	mode.crtc_id = to_sna_crtc(crtc)->id;
+	mode.crtc_id = sna_crtc->id;
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
 		return false;
 
 	DBG(("%s: crtc=%d, mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
-	     mode.crtc_id, mode.mode_valid, sna->mode.fb_id == mode.fb_id));
-	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
+	     mode.crtc_id, mode.mode_valid, fb_id(sna_crtc->bo) == mode.fb_id));
+	return mode.mode_valid && fb_id(sna_crtc->bo) == mode.fb_id;
 }
 
 static Bool
@@ -415,11 +419,9 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
-	struct sna_mode *mode = &sna->mode;
 	struct drm_mode_crtc arg;
 	uint32_t output_ids[16];
 	int output_count = 0;
-	int fb_id, x, y;
 	int i, ret = FALSE;
 
 	DBG(("%s\n", __FUNCTION__));
@@ -439,22 +441,23 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		output_count++;
 	}
 
-	if (!xf86CrtcRotate(crtc)) {
-		DBG(("%s: failed to rotate crtc\n", __FUNCTION__));
-		return FALSE;
-	}
-
 	crtc->funcs->gamma_set(crtc, crtc->gamma_red, crtc->gamma_green,
 			       crtc->gamma_blue, crtc->gamma_size);
 
-	x = crtc->x;
-	y = crtc->y;
-	fb_id = mode->fb_id;
-	if (sna_crtc->shadow_fb_id) {
-		fb_id = sna_crtc->shadow_fb_id;
-		x = 0;
-		y = 0;
+	VG_CLEAR(arg);
+	arg.crtc_id = sna_crtc->id;
+	arg.fb_id = fb_id(sna_crtc->bo);
+	if (sna_crtc->shadow) {
+		arg.x = 0;
+		arg.y = 0;
+	} else {
+		arg.x = crtc->x;
+		arg.y = crtc->y;
 	}
+	arg.set_connectors_ptr = (uintptr_t)output_ids;
+	arg.count_connectors = output_count;
+	arg.mode = sna_crtc->kmode;
+	arg.mode_valid = 1;
 
 	xf86DrvMsg(crtc->scrn->scrnIndex, X_INFO,
 		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
@@ -467,22 +470,14 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     sna_crtc->kmode.hdisplay,
 	     sna_crtc->kmode.vdisplay,
 	     sna_crtc->kmode.clock,
-	     fb_id, sna_crtc->shadow_fb_id ? " [shadow]" : "",
+	     arg.fb_id,
+	     sna_crtc->shadow ? " [shadow]" : "",
 	     output_count));
 
-	VG_CLEAR(arg);
-	arg.x = x;
-	arg.y = y;
-	arg.crtc_id = sna_crtc->id;
-	arg.fb_id = fb_id;
-	arg.set_connectors_ptr = (uintptr_t)output_ids;
-	arg.count_connectors = output_count;
-	arg.mode = sna_crtc->kmode;
-	arg.mode_valid = 1;
 	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
 	if (ret) {
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
-			   "failed to set mode: %s\n", strerror(-ret));
+			   "failed to set mode: %s\n", strerror(errno));
 		ret = FALSE;
 	} else
 		ret = TRUE;
@@ -493,6 +488,79 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	return ret;
 }
 
+static bool sna_mode_enable_shadow(struct sna *sna)
+{
+	ScreenPtr screen = sna->scrn->pScreen;
+
+	DBG(("%s\n", __FUNCTION__));
+	assert(sna->mode.shadow == NULL);
+	assert(sna->mode.shadow_damage == NULL);
+	assert(sna->mode.shadow_active == 0);
+
+	sna->mode.shadow_damage = DamageCreate(NULL, NULL,
+					       DamageReportNone, TRUE,
+					       screen, screen);
+	if (!sna->mode.shadow_damage)
+		return false;
+
+	DamageRegister(&sna->front->drawable, sna->mode.shadow_damage);
+	return true;
+}
+
+static void sna_mode_disable_shadow(struct sna *sna)
+{
+	if (!sna->mode.shadow_damage)
+		return;
+
+	DBG(("%s\n", __FUNCTION__));
+
+	DamageUnregister(&sna->front->drawable, sna->mode.shadow_damage);
+	DamageDestroy(sna->mode.shadow_damage);
+	sna->mode.shadow_damage = NULL;
+
+	if (sna->mode.shadow) {
+		kgem_bo_destroy(&sna->kgem, sna->mode.shadow);
+		sna->mode.shadow = NULL;
+	}
+
+	sna->mode.shadow_active = 0;
+}
+
+static bool sna_crtc_enable_shadow(struct sna *sna, struct sna_crtc *crtc)
+{
+	if (crtc->shadow) {
+		assert(sna->mode.shadow_damage && sna->mode.shadow_active);
+		return true;
+	}
+
+	DBG(("%s: enabling for crtc %d\n", __FUNCTION__, crtc->id));
+
+	if (!sna->mode.shadow_active) {
+		if (!sna_mode_enable_shadow(sna))
+			return false;
+		assert(sna->mode.shadow_damage);
+		assert(sna->mode.shadow == NULL);
+	}
+
+	crtc->shadow = true;
+	sna->mode.shadow_active++;
+	return true;
+}
+
+static void sna_crtc_disable_shadow(struct sna *sna, struct sna_crtc *crtc)
+{
+	if (!crtc->shadow)
+		return;
+
+	DBG(("%s: disabling for crtc %d\n", __FUNCTION__, crtc->id));
+	assert(sna->mode.shadow_active > 0);
+
+	if (!--sna->mode.shadow_active)
+		sna_mode_disable_shadow(sna);
+
+	crtc->shadow = false;
+}
+
 static void
 sna_crtc_disable(xf86CrtcPtr crtc)
 {
@@ -507,7 +575,13 @@ sna_crtc_disable(xf86CrtcPtr crtc)
 	arg.fb_id = 0;
 	arg.mode_valid = 0;
 	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
-	sna_crtc->active = false;
+
+	sna_crtc_disable_shadow(sna, sna_crtc);
+
+	if (sna_crtc->bo) {
+		kgem_bo_destroy(&sna->kgem, sna_crtc->bo);
+		sna_crtc->bo = NULL;
+	}
 }
 
 static void
@@ -675,70 +749,267 @@ static void update_flush_interval(struct sna *sna)
 	       max_vrefresh, sna->vblank_interval));
 }
 
-static Bool
-sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
-			Rotation rotation, int x, int y)
+static bool use_shadow(struct sna *sna, xf86CrtcPtr crtc)
 {
+	RRTransformPtr transform;
+	PictTransform crtc_to_fb;
+	struct pict_f_transform f_crtc_to_fb, f_fb_to_crtc;
+	BoxRec b;
+
+	assert(sna->scrn->virtualX && sna->scrn->virtualY);
+
+	if (sna->scrn->virtualX > sna->mode.kmode->max_width ||
+	    sna->scrn->virtualY > sna->mode.kmode->max_height) {
+		DBG(("%s: framebuffer too large (%dx%d) > (%dx%d)\n",
+		    __FUNCTION__,
+		    sna->scrn->virtualX, sna->scrn->virtualY,
+		    sna->mode.kmode->max_width,
+		    sna->mode.kmode->max_height));
+		return true;
+	}
+
+	transform = NULL;
+	if (crtc->transformPresent)
+		transform = &crtc->transform;
+	if (RRTransformCompute(crtc->x, crtc->y,
+			       crtc->mode.HDisplay, crtc->mode.VDisplay,
+			       crtc->rotation, transform,
+			       &crtc_to_fb,
+			       &f_crtc_to_fb,
+			       &f_fb_to_crtc)) {
+		DBG(("%s: RandR transform present\n", __FUNCTION__));
+		return true;
+	}
+
+	/* And finally check that it is entirely visible */
+	b.x1 = b.y1 = 0;
+	b.x2 = crtc->mode.HDisplay;
+	b.y2 = crtc->mode.VDisplay;
+	pixman_f_transform_bounds(&f_crtc_to_fb, &b);
+	DBG(("%s? bounds (%d, %d), (%d, %d), framebufer %dx%d\n",
+	     __FUNCTION__, b.x1, b.y1, b.x2, b.y2,
+		 sna->scrn->virtualX, sna->scrn->virtualY));
+
+	if  (b.x1 < 0 || b.y1 < 0 ||
+	     b.x2 > sna->scrn->virtualX ||
+	     b.y2 > sna->scrn->virtualY) {
+		DBG(("%s: scanout is partly outside the framebuffer\n",
+		     __FUNCTION__));
+		return true;
+	}
+
+	return false;
+}
+
+static struct kgem_bo *sna_crtc_attach(xf86CrtcPtr crtc)
+{
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	ScrnInfoPtr scrn = crtc->scrn;
 	struct sna *sna = to_sna(scrn);
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	struct sna_mode *sna_mode = &sna->mode;
-	int saved_x, saved_y;
-	Rotation saved_rotation;
-	DisplayModeRec saved_mode;
+	struct kgem_bo *bo;
 
-	DBG(("%s(rotation=%d, x=%d, y=%d, mode=%dx%d@%d)\n",
-	     __FUNCTION__, rotation, x, y,
-	     mode->HDisplay, mode->VDisplay, mode->Clock));
+	if (use_shadow(sna, crtc)) {
+		if (!sna_crtc_enable_shadow(sna, sna_crtc))
+			return NULL;
 
-	DBG(("%s: current fb pixmap = %d, front is %lu\n",
-	     __FUNCTION__,
-	     sna_mode->fb_pixmap,
-	     sna->front->drawable.serialNumber));
+		DBG(("%s: attaching to per-crtc pixmap %dx%d\n",
+		     __FUNCTION__, crtc->mode.HDisplay, crtc->mode.VDisplay));
 
-	if (sna_mode->fb_pixmap != sna->front->drawable.serialNumber) {
-		kgem_submit(&sna->kgem);
-		sna_mode_remove_fb(sna);
+		bo = kgem_create_2d(&sna->kgem,
+				    crtc->mode.HDisplay, crtc->mode.VDisplay,
+				    scrn->bitsPerPixel,
+				    I915_TILING_X, CREATE_SCANOUT);
+		if (bo == NULL)
+			return NULL;
+
+		if (!get_fb(sna, bo, crtc->mode.HDisplay, crtc->mode.VDisplay)) {
+			kgem_bo_destroy(&sna->kgem, bo);
+			return NULL;
+		}
+
+		return bo;
+	} else if (sna->flags & SNA_TEAR_FREE) {
+		DBG(("%s: tear-free updates requested\n", __FUNCTION__));
+
+		if (!sna_crtc_enable_shadow(sna, sna_crtc))
+			return NULL;
+
+		DBG(("%s: attaching to single shadow pixmap\n", __FUNCTION__));
+		if (sna->mode.shadow == NULL) {
+			bo = kgem_create_2d(&sna->kgem,
+					    sna->scrn->virtualX,
+					    sna->scrn->virtualY,
+					    scrn->bitsPerPixel,
+					    I915_TILING_X,
+					    CREATE_SCANOUT);
+			if (bo == NULL)
+				return NULL;
+
+			if (!get_fb(sna, bo,
+				    sna->scrn->virtualX,
+				    sna->scrn->virtualY)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				return NULL;
+			}
+
+			sna->mode.shadow = bo;
+		}
+
+		return kgem_bo_reference(sna->mode.shadow);
+	} else {
+		DBG(("%s: attaching to framebuffer\n", __FUNCTION__));
+		sna_crtc_disable_shadow(sna, sna_crtc);
+		bo = sna_pixmap_pin(sna->front);
+		if (!get_fb(sna, bo, scrn->virtualX, scrn->virtualY))
+			return NULL;
+
+		return kgem_bo_reference(bo);
 	}
+}
 
-	if (sna_mode->fb_id == 0) {
-		struct kgem_bo *bo = sna_pixmap_pin(sna->front);
-		if (!bo)
-			return FALSE;
+static void sna_crtc_randr(xf86CrtcPtr crtc)
+{
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	struct pict_f_transform f_crtc_to_fb, f_fb_to_crtc;
+	PictTransform crtc_to_fb;
+	PictFilterPtr filter;
+	xFixed *params;
+	int nparams;
+	RRTransformPtr transform;
+
+	transform = NULL;
+	if (crtc->transformPresent)
+		transform = &crtc->transform;
+
+	RRTransformCompute(crtc->x, crtc->y,
+			   crtc->mode.HDisplay, crtc->mode.VDisplay,
+			   crtc->rotation, transform,
+			   &crtc_to_fb,
+			   &f_crtc_to_fb,
+			   &f_fb_to_crtc);
+
+	filter = NULL;
+	params = NULL;
+	nparams = 0;
+	if (sna_crtc->shadow) {
+#ifdef RANDR_12_INTERFACE
+		if (transform) {
+			if (transform->nparams) {
+				params = malloc(transform->nparams * sizeof(xFixed));
+				if (params) {
+					memcpy(params, transform->params,
+					       transform->nparams * sizeof(xFixed));
+					nparams = transform->nparams;
+					filter = transform->filter;
+				}
+			} else
+				filter = transform->filter;
+		}
+#endif
+		crtc->transform_in_use = TRUE;
+	} else
+		crtc->transform_in_use = FALSE;
 
-		/* XXX recreate the fb in case the size has changed? */
-		sna_mode->fb_id = get_fb(sna, bo,
-					 scrn->virtualX, scrn->virtualY);
-		if (sna_mode->fb_id == 0)
-			return FALSE;
+	crtc->crtc_to_framebuffer = crtc_to_fb;
+	crtc->f_crtc_to_framebuffer = f_crtc_to_fb;
+	crtc->f_framebuffer_to_crtc = f_fb_to_crtc;
 
-		DBG(("%s: handle %d attached to fb %d\n",
-		     __FUNCTION__, bo->handle, sna_mode->fb_id));
+	free(crtc->params);
+	crtc->params  = params;
+	crtc->nparams = nparams;
 
-		sna_mode->fb_pixmap = sna->front->drawable.serialNumber;
+	crtc->filter = filter;
+	if (filter) {
+		crtc->filter_width  = filter->width;
+		crtc->filter_height = filter->height;
+	} else {
+		crtc->filter_width  = 0;
+		crtc->filter_height = 0;
 	}
 
-	saved_mode = crtc->mode;
-	saved_x = crtc->x;
-	saved_y = crtc->y;
-	saved_rotation = crtc->rotation;
+	crtc->bounds.x1 = 0;
+	crtc->bounds.x2 = crtc->mode.HDisplay;
+	crtc->bounds.y1 = 0;
+	crtc->bounds.y2 = crtc->mode.VDisplay;
+	pixman_f_transform_bounds(&f_crtc_to_fb, &crtc->bounds);
+
+	DBG(("%s: transform? %d, bounds (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, crtc->transform_in_use,
+	     crtc->bounds.x1, crtc->bounds.y1,
+	     crtc->bounds.x2, crtc->bounds.y2));
+}
+
+static void
+sna_crtc_damage(xf86CrtcPtr crtc)
+{
+	ScreenPtr screen = crtc->scrn->pScreen;
+	struct sna *sna = to_sna(crtc->scrn);
+	RegionRec region, *damage;
+
+	region.extents = crtc->bounds;
+	region.data = NULL;
 
-	crtc->mode = *mode;
-	crtc->x = x;
-	crtc->y = y;
-	crtc->rotation = rotation;
+	if (region.extents.x1 < 0)
+		region.extents.x1 = 0;
+	if (region.extents.y1 < 0)
+		region.extents.y1 = 0;
+	if (region.extents.x2 > screen->width)
+		region.extents.x2 = screen->width;
+	if (region.extents.y2 > screen->width)
+		region.extents.y2 = screen->height;
 
+	DBG(("%s: marking crtc %d as completely damaged (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, to_sna_crtc(crtc)->id,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	assert(sna->mode.shadow_damage && sna->mode.shadow_active);
+	damage = DamageRegion(sna->mode.shadow_damage);
+	RegionUnion(damage, damage, &region);
+}
+
+static Bool
+sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
+			Rotation rotation, int x, int y)
+{
+	ScrnInfoPtr scrn = crtc->scrn;
+	struct sna *sna = to_sna(scrn);
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	struct kgem_bo *saved_bo, *bo;
+	struct drm_mode_modeinfo saved_kmode;
+
+	DBG(("%s(crtc=%d [pipe=%d] rotation=%d, x=%d, y=%d, mode=%dx%d@%d)\n",
+	     __FUNCTION__, sna_crtc->id, sna_crtc->pipe, rotation, x, y,
+	     mode->HDisplay, mode->VDisplay, mode->Clock));
+
+	assert(mode->HDisplay <= sna->mode.kmode->max_width &&
+	       mode->VDisplay <= sna->mode.kmode->max_height);
+
+	/* Attach per-crtc pixmap or direct */
+	bo = sna_crtc_attach(crtc);
+	if (bo == NULL)
+		return FALSE;
+
+	saved_kmode = sna_crtc->kmode;
+	saved_bo = sna_crtc->bo;
+	sna_crtc->bo = bo;
 	mode_to_kmode(&sna_crtc->kmode, mode);
+
 	if (!sna_crtc_apply(crtc)) {
-		crtc->x = saved_x;
-		crtc->y = saved_y;
-		crtc->rotation = saved_rotation;
-		crtc->mode = saved_mode;
+		sna_crtc->bo = saved_bo;
+		sna_crtc->kmode = saved_kmode;
+		kgem_bo_destroy(&sna->kgem, bo);
 		return FALSE;
 	}
-	sna_mode_update(sna);
+	if (saved_bo)
+		kgem_bo_destroy(&sna->kgem, saved_bo);
 
 	update_flush_interval(sna);
+
+	sna_crtc_randr(crtc);
+	if (sna_crtc->shadow)
+		sna_crtc_damage(crtc);
+
 	return TRUE;
 }
 
@@ -748,8 +1019,18 @@ void sna_mode_adjust_frame(struct sna *sna, int x, int y)
 	xf86OutputPtr output = config->output[config->compat_output];
 	xf86CrtcPtr crtc = output->crtc;
 
-	if (crtc && crtc->enabled)
-		sna_crtc_set_mode_major(crtc, &crtc->mode, crtc->rotation, x, y);
+	if (crtc && crtc->enabled) {
+		int saved_x = crtc->x;
+		int saved_y = crtc->y;
+
+		crtc->x = x;
+		crtc->y = y;
+		if (!sna_crtc_set_mode_major(crtc, &crtc->mode,
+					     crtc->rotation, x, y)) {
+			crtc->x = saved_x;
+			crtc->y = saved_y;
+		}
+	}
 }
 
 static void
@@ -831,65 +1112,6 @@ sna_crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 *image)
 	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
 }
 
-static void *
-sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
-{
-	ScrnInfoPtr scrn = crtc->scrn;
-	struct sna *sna = to_sna(scrn);
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-	PixmapPtr shadow;
-	struct kgem_bo *bo;
-
-	DBG(("%s(%d, %d)\n", __FUNCTION__, width, height));
-
-	shadow = scrn->pScreen->CreatePixmap(scrn->pScreen,
-					     width, height, scrn->depth,
-					     SNA_CREATE_FB);
-	if (!shadow)
-		return NULL;
-
-	bo = sna_pixmap_pin(shadow);
-	if (!bo) {
-		scrn->pScreen->DestroyPixmap(shadow);
-		return NULL;
-	}
-
-	sna_crtc->shadow_fb_id = get_fb(sna, bo, width, height);
-	if (sna_crtc->shadow_fb_id == 0) {
-		scrn->pScreen->DestroyPixmap(shadow);
-		return NULL;
-	}
-
-	DBG(("%s: attached handle %d to fb %d\n",
-	     __FUNCTION__, bo->handle, sna_crtc->shadow_fb_id));
-	return sna_crtc->shadow = shadow;
-}
-
-static PixmapPtr
-sna_crtc_shadow_create(xf86CrtcPtr crtc, void *data, int width, int height)
-{
-	return data;
-}
-
-static void
-sna_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr pixmap, void *data)
-{
-	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-
-	/* We may have not called shadow_create() on the data yet and
-	 * be cleaning up a NULL shadow_pixmap.
-	 */
-	pixmap = data;
-
-	DBG(("%s(fb=%d, handle=%d)\n", __FUNCTION__,
-	     sna_crtc->shadow_fb_id, sna_pixmap_get_bo(pixmap)->handle));
-
-	sna_crtc->shadow_fb_id = 0;
-
-	pixmap->drawable.pScreen->DestroyPixmap(pixmap);
-	sna_crtc->shadow = NULL;
-}
-
 static void
 sna_crtc_gamma_set(xf86CrtcPtr crtc,
 		       CARD16 *red, CARD16 *green, CARD16 *blue, int size)
@@ -924,9 +1146,6 @@ static const xf86CrtcFuncsRec sna_crtc_funcs = {
 	.show_cursor = sna_crtc_show_cursor,
 	.hide_cursor = sna_crtc_hide_cursor,
 	.load_cursor_argb = sna_crtc_load_cursor_argb,
-	.shadow_create = sna_crtc_shadow_create,
-	.shadow_allocate = sna_crtc_shadow_allocate,
-	.shadow_destroy = sna_crtc_shadow_destroy,
 	.gamma_set = sna_crtc_gamma_set,
 	.destroy = sna_crtc_destroy,
 };
@@ -986,7 +1205,7 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (sna_crtc == NULL)
 		return;
 
-	sna_crtc->id = mode->mode_res->crtcs[num];
+	sna_crtc->id = mode->kmode->crtcs[num];
 
 	VG_CLEAR(get_pipe);
 	get_pipe.pipe = 0;
@@ -1061,17 +1280,29 @@ sna_output_detect(xf86OutputPtr output)
 }
 
 static Bool
-sna_output_mode_valid(xf86OutputPtr output, DisplayModePtr pModes)
+sna_output_mode_valid(xf86OutputPtr output, DisplayModePtr mode)
 {
 	struct sna_output *sna_output = output->driver_private;
+	struct sna *sna = to_sna(output->scrn);
+
+	if (mode->HDisplay > sna->mode.kmode->max_width)
+		return MODE_VIRTUAL_X;
+	if (mode->VDisplay > sna->mode.kmode->max_height)
+		return MODE_VIRTUAL_Y;
+
+	/* Check that we can successfully pin this into the global GTT */
+	if ((kgem_can_create_2d(&sna->kgem,
+				mode->HDisplay, mode->VDisplay,
+				sna->scrn->bitsPerPixel) & KGEM_CAN_CREATE_GTT) == 0)
+		return MODE_MEM_VIRT;
 
 	/*
 	 * If the connector type is a panel, we will use the panel limit to
 	 * verfiy whether the mode is valid.
 	 */
 	if (sna_output->has_panel_limits) {
-		if (pModes->HDisplay > sna_output->panel_hdisplay ||
-		    pModes->VDisplay > sna_output->panel_vdisplay)
+		if (mode->HDisplay > sna_output->panel_hdisplay ||
+		    mode->VDisplay > sna_output->panel_vdisplay)
 			return MODE_PANEL;
 	}
 
@@ -1684,7 +1915,7 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	char name[32];
 
 	koutput = drmModeGetConnector(sna->kgem.fd,
-				      mode->mode_res->connectors[num]);
+				      mode->kmode->connectors[num]);
 	if (!koutput)
 		return;
 
@@ -1713,7 +1944,7 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (!sna_output)
 		goto cleanup_output;
 
-	sna_output->id = mode->mode_res->connectors[num];
+	sna_output->id = mode->kmode->connectors[num];
 	sna_output->mode_output = koutput;
 
 	output->mm_width = koutput->mmWidth;
@@ -1773,12 +2004,9 @@ sna_redirect_screen_pixmap(ScrnInfoPtr scrn, PixmapPtr old, PixmapPtr new)
 static Bool
 sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 {
-	struct sna *sna = to_sna(scrn);
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
-	struct sna_mode *mode = &sna->mode;
-	PixmapPtr old_front;
-	uint32_t old_fb_id;
-	struct kgem_bo *bo;
+	struct sna *sna = to_sna(scrn);
+	PixmapPtr old_front, new_front;
 	int i;
 
 	DBG(("%s (%d, %d) -> (%d, %d)\n",
@@ -1791,32 +2019,27 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 
 	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
 	assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
+	DBG(("%s: creating new framebuffer %dx%d\n",
+	     __FUNCTION__, width, height));
 
-	kgem_submit(&sna->kgem);
-
-	old_fb_id = mode->fb_id;
 	old_front = sna->front;
-
-	sna->front = scrn->pScreen->CreatePixmap(scrn->pScreen,
+	new_front = scrn->pScreen->CreatePixmap(scrn->pScreen,
 						 width, height,
 						 scrn->depth,
 						 SNA_CREATE_FB);
-	if (!sna->front)
-		goto fail;
-
-	bo = sna_pixmap_pin(sna->front);
-	if (!bo)
-		goto fail;
-
-	assert(bo->delta == 0);
+	if (!new_front)
+		return FALSE;
 
-	mode->fb_id = get_fb(sna, bo, width, height);
-	if (mode->fb_id == 0)
-		goto fail;
+	for (i = 0; i < xf86_config->num_crtc; i++)
+		sna_crtc_disable_shadow(sna, to_sna_crtc(xf86_config->crtc[i]));
+	assert(sna->mode.shadow_active == 0);
+	assert(sna->mode.shadow_damage == NULL);
+	assert(sna->mode.shadow == NULL);
 
-	DBG(("%s: handle %d, pixmap serial %lu attached to fb %d\n",
-	     __FUNCTION__, bo->handle,
-	     sna->front->drawable.serialNumber, mode->fb_id));
+	sna->front = new_front;
+	scrn->virtualX = width;
+	scrn->virtualY = height;
+	scrn->displayWidth = width;
 
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
@@ -1824,18 +2047,12 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		if (!crtc->enabled)
 			continue;
 
-		if (!sna_crtc_apply(crtc))
-			goto fail;
+		if (!sna_crtc_set_mode_major(crtc,
+					     &crtc->mode, crtc->rotation,
+					     crtc->x, crtc->y))
+			sna_crtc_disable(crtc);
 	}
-	sna_mode_update(sna);
 
-	kgem_bo_retire(&sna->kgem, bo);
-
-	scrn->virtualX = width;
-	scrn->virtualY = height;
-	scrn->displayWidth = bo->pitch / sna->mode.cpp;
-
-	sna->mode.fb_pixmap = sna->front->drawable.serialNumber;
 	sna_redirect_screen_pixmap(scrn, old_front, sna->front);
 	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
 	assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
@@ -1843,20 +2060,14 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	scrn->pScreen->DestroyPixmap(old_front);
 
 	return TRUE;
-
-fail:
-	DBG(("%s: restoring original front pixmap and fb\n", __FUNCTION__));
-	mode->fb_id = old_fb_id;
-
-	if (sna->front)
-		scrn->pScreen->DestroyPixmap(sna->front);
-	sna->front = old_front;
-	return FALSE;
 }
 
-static int do_page_flip(struct sna *sna, void *data, int ref_crtc_hw_id)
+static int do_page_flip(struct sna *sna, struct kgem_bo *bo,
+			void *data, int ref_crtc_hw_id)
 {
 	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	int width = sna->scrn->virtualX;
+	int height = sna->scrn->virtualY;
 	int count = 0;
 	int i;
 
@@ -1871,36 +2082,40 @@ static int do_page_flip(struct sna *sna, void *data, int ref_crtc_hw_id)
 	 */
 	for (i = 0; i < config->num_crtc; i++) {
 		struct sna_crtc *crtc = config->crtc[i]->driver_private;
-		uintptr_t evdata;
+		struct drm_mode_crtc_page_flip arg;
 
-		DBG(("%s: crtc %d active? %d\n",__FUNCTION__, i,crtc->active));
-		if (!crtc->active)
+		DBG(("%s: crtc %d active? %d\n",
+		     __FUNCTION__, i, crtc->bo != NULL));
+		if (crtc->bo == NULL)
 			continue;
 
+		arg.crtc_id = crtc->id;
+		arg.fb_id = get_fb(sna, bo, width, height);
+		if (arg.fb_id == 0)
+			goto disable;
+
 		/* Only the reference crtc will finally deliver its page flip
 		 * completion event. All other crtc's events will be discarded.
 		 */
-		evdata = (uintptr_t)data;
-		evdata |= crtc->pipe == ref_crtc_hw_id;
+		arg.user_data = (uintptr_t)data;
+		arg.user_data |= crtc->pipe == ref_crtc_hw_id;
+		arg.flags = DRM_MODE_PAGE_FLIP_EVENT;
+		arg.reserved = 0;
 
 		DBG(("%s: crtc %d [ref? %d] --> fb %d\n",
 		     __FUNCTION__, crtc->id,
-		     crtc->pipe == ref_crtc_hw_id,
-		     sna->mode.fb_id));
-		if (drmModePageFlip(sna->kgem.fd,
-				    crtc->id,
-				    sna->mode.fb_id,
-				    DRM_MODE_PAGE_FLIP_EVENT,
-				    (void*)evdata)) {
-			int err = errno;
+		     crtc->pipe == ref_crtc_hw_id, arg.fb_id));
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_PAGE_FLIP, &arg)) {
 			DBG(("%s: flip [fb=%d] on crtc %d [%d] failed - %d\n",
-			     __FUNCTION__, sna->mode.fb_id,
-			     i, crtc->id, err));
-			xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-				   "flip queue failed: %s\n", strerror(err));
+			     __FUNCTION__, arg.fb_id, i, crtc->id, errno));
+disable:
+			sna_crtc_disable(config->crtc[i]);
 			continue;
 		}
 
+		kgem_bo_destroy(&sna->kgem, crtc->bo);
+		crtc->bo = kgem_bo_reference(bo);
+
 		count++;
 	}
 
@@ -1914,23 +2129,9 @@ sna_page_flip(struct sna *sna,
 	      int ref_crtc_hw_id,
 	      uint32_t *old_fb)
 {
-	ScrnInfoPtr scrn = sna->scrn;
-	struct sna_mode *mode = &sna->mode;
 	int count;
 
-	*old_fb = mode->fb_id;
-
-	/*
-	 * Create a new handle for the back buffer
-	 */
-	mode->fb_id = get_fb(sna, bo, scrn->virtualX, scrn->virtualY);
-	if (mode->fb_id == 0) {
-		mode->fb_id = *old_fb;
-		return 0;
-	}
-
-	DBG(("%s: handle %d attached to fb %d\n",
-	     __FUNCTION__, bo->handle, mode->fb_id));
+	DBG(("%s: handle %d attached\n", __FUNCTION__, bo->handle));
 
 	kgem_submit(&sna->kgem);
 
@@ -1943,10 +2144,8 @@ sna_page_flip(struct sna *sna,
 	 * Also, flips queued on disabled or incorrectly configured displays
 	 * may never complete; this is a configuration error.
 	 */
-	count = do_page_flip(sna, data, ref_crtc_hw_id);
+	count = do_page_flip(sna, bo, data, ref_crtc_hw_id);
 	DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
-	if (count == 0)
-		mode->fb_id = *old_fb;
 
 	return count;
 }
@@ -1955,6 +2154,15 @@ static const xf86CrtcConfigFuncsRec sna_crtc_config_funcs = {
 	sna_crtc_resize
 };
 
+static void set_size_range(struct sna *sna)
+{
+	/* We lie slightly as we expect no single monitor to exceed the
+	 * crtc limits, so if the mode exceeds the scanout restrictions,
+	 * we will quietly convert that to per-crtc pixmaps.
+	 */
+	xf86CrtcSetSizeRange(sna->scrn, 320, 200, INT16_MAX, INT16_MAX);
+}
+
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 {
 	struct sna_mode *mode = &sna->mode;
@@ -1965,21 +2173,19 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 
 	xf86CrtcConfigInit(scrn, &sna_crtc_config_funcs);
 
-	mode->mode_res = drmModeGetResources(sna->kgem.fd);
-	if (!mode->mode_res) {
+	mode->kmode = drmModeGetResources(sna->kgem.fd);
+	if (!mode->kmode) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "failed to get resources: %s\n", strerror(errno));
 		return FALSE;
 	}
 
-	xf86CrtcSetSizeRange(scrn,
-			     320, 200,
-			     mode->mode_res->max_width,
-			     mode->mode_res->max_height);
-	for (i = 0; i < mode->mode_res->count_crtcs; i++)
+	set_size_range(sna);
+
+	for (i = 0; i < mode->kmode->count_crtcs; i++)
 		sna_crtc_init(scrn, mode, i);
 
-	for (i = 0; i < mode->mode_res->count_connectors; i++)
+	for (i = 0; i < mode->kmode->count_connectors; i++)
 		sna_output_init(scrn, mode, i);
 
 	xf86InitialConfiguration(scrn, TRUE);
@@ -1988,18 +2194,6 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna)
 }
 
 void
-sna_mode_remove_fb(struct sna *sna)
-{
-	struct sna_mode *mode = &sna->mode;
-
-	DBG(("%s: deleting fb id %d for pixmap serial %d\n",
-	     __FUNCTION__, mode->fb_id,mode->fb_pixmap));
-
-	mode->fb_id = 0;
-	mode->fb_pixmap = 0;
-}
-
-void
 sna_mode_fini(struct sna *sna)
 {
 #if 0
@@ -2015,38 +2209,9 @@ sna_mode_fini(struct sna *sna)
 						   link)->output);
 	}
 #endif
-
-	sna_mode_remove_fb(sna);
-
-	/* mode->shadow_fb_id should have been destroyed already */
-}
-
-static void sna_crtc_box(xf86CrtcPtr crtc, BoxPtr crtc_box)
-{
-	if (crtc->enabled) {
-		crtc_box->x1 = crtc->x;
-		crtc_box->y1 = crtc->y;
-
-		switch (crtc->rotation & 0xf) {
-		default:
-			assert(0);
-		case RR_Rotate_0:
-		case RR_Rotate_180:
-			crtc_box->x2 = crtc->x + crtc->mode.HDisplay;
-			crtc_box->y2 = crtc->y + crtc->mode.VDisplay;
-			break;
-
-		case RR_Rotate_90:
-		case RR_Rotate_270:
-			crtc_box->x2 = crtc->x + crtc->mode.VDisplay;
-			crtc_box->y2 = crtc->y + crtc->mode.HDisplay;
-			break;
-		}
-	} else
-		crtc_box->x1 = crtc_box->x2 = crtc_box->y1 = crtc_box->y2 = 0;
 }
 
-static void sna_box_intersect(BoxPtr r, const BoxRec *a, const BoxRec *b)
+static bool sna_box_intersect(BoxPtr r, const BoxRec *a, const BoxRec *b)
 {
 	r->x1 = a->x1 > b->x1 ? a->x1 : b->x1;
 	r->x2 = a->x2 < b->x2 ? a->x2 : b->x2;
@@ -2057,8 +2222,7 @@ static void sna_box_intersect(BoxPtr r, const BoxRec *a, const BoxRec *b)
 	     a->x1, a->y1, a->x2, a->y2,
 	     b->x1, b->y1, b->x2, b->y2,
 	     r->x1, r->y1, r->x2, r->y2));
-	if (r->x1 >= r->x2 || r->y1 >= r->y2)
-		r->x1 = r->x2 = r->y1 = r->y2 = 0;
+	return r->x2 > r->x1 && r->y2 > r->y1;
 }
 
 static int sna_box_area(const BoxRec *box)
@@ -2074,13 +2238,11 @@ static int sna_box_area(const BoxRec *box)
 xf86CrtcPtr
 sna_covering_crtc(ScrnInfoPtr scrn,
 		  const BoxRec *box,
-		  xf86CrtcPtr desired,
-		  BoxPtr crtc_box_ret)
+		  xf86CrtcPtr desired)
 {
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
 	xf86CrtcPtr best_crtc;
 	int best_coverage, c;
-	BoxRec best_crtc_box;
 
 	/* If we do not own the VT, we do not own the CRTC either */
 	if (!scrn->vtSema)
@@ -2091,51 +2253,46 @@ sna_covering_crtc(ScrnInfoPtr scrn,
 
 	best_crtc = NULL;
 	best_coverage = 0;
-	best_crtc_box.x1 = 0;
-	best_crtc_box.x2 = 0;
-	best_crtc_box.y1 = 0;
-	best_crtc_box.y2 = 0;
 	for (c = 0; c < xf86_config->num_crtc; c++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[c];
-		BoxRec crtc_box, cover_box;
+		BoxRec cover_box;
 		int coverage;
 
 		/* If the CRTC is off, treat it as not covering */
-		if (!sna_crtc_on(crtc)) {
+		if (to_sna_crtc(crtc)->bo == NULL) {
 			DBG(("%s: crtc %d off, skipping\n", __FUNCTION__, c));
 			continue;
 		}
 
-		sna_crtc_box(crtc, &crtc_box);
 		DBG(("%s: crtc %d: (%d, %d), (%d, %d)\n",
 		     __FUNCTION__, c,
-		     crtc_box.x1, crtc_box.y1,
-		     crtc_box.x2, crtc_box.y2));
+		     crtc->bounds.x1, crtc->bounds.y1,
+		     crtc->bounds.x2, crtc->bounds.y2));
+
+		if (!sna_box_intersect(&cover_box, &crtc->bounds, box))
+			continue;
 
-		sna_box_intersect(&cover_box, &crtc_box, box);
 		DBG(("%s: box instersects (%d, %d), (%d, %d) of crtc %d\n",
 		     __FUNCTION__,
 		     cover_box.x1, cover_box.y1,
 		     cover_box.x2, cover_box.y2,
 		     c));
-		coverage = sna_box_area(&cover_box);
-		DBG(("%s: box covers %d of crtc %d\n",
-		     __FUNCTION__, coverage, c));
-		if (coverage && crtc == desired) {
+		if (crtc == desired) {
 			DBG(("%s: box is on desired crtc [%p]\n",
 			     __FUNCTION__, crtc));
-			*crtc_box_ret = crtc_box;
 			return crtc;
 		}
+
+		coverage = sna_box_area(&cover_box);
+		DBG(("%s: box covers %d of crtc %d\n",
+		     __FUNCTION__, coverage, c));
 		if (coverage > best_coverage) {
-			best_crtc_box = crtc_box;
 			best_crtc = crtc;
 			best_coverage = coverage;
 		}
 	}
 	DBG(("%s: best crtc = %p, coverage = %d\n",
 	     __FUNCTION__, best_crtc, best_coverage));
-	*crtc_box_ret = best_crtc_box;
 	return best_crtc;
 }
 
@@ -2235,40 +2392,32 @@ sna_wait_for_scanline(struct sna *sna,
 		      xf86CrtcPtr crtc,
 		      const BoxRec *clip)
 {
-	pixman_box16_t box, crtc_box;
 	Bool full_height;
 	int y1, y2, pipe;
 
 	assert(crtc);
 	assert(sna_crtc_on(crtc));
-	assert(pixmap_is_scanout(pixmap));
+	assert(pixmap == sna->front);
 
 	/* XXX WAIT_EVENT is still causing hangs on SNB */
 	if (sna->kgem.gen >= 60)
 		return false;
 
-	sna_crtc_box(crtc, &crtc_box);
-	if (crtc->transform_in_use) {
-		box = *clip;
-		pixman_f_transform_bounds(&crtc->f_framebuffer_to_crtc, &box);
-		clip = &box;
-	}
-
 	/*
 	 * Make sure we don't wait for a scanline that will
 	 * never occur
 	 */
-	y1 = clip->y1 - crtc_box.y1;
+	y1 = clip->y1 - crtc->bounds.y1;
 	if (y1 < 0)
 		y1 = 0;
-	y2 = clip->y2 - crtc_box.y1;
-	if (y2 > crtc_box.y2 - crtc_box.y1)
-		y2 = crtc_box.y2 - crtc_box.y1;
+	y2 = clip->y2 - crtc->bounds.y1;
+	if (y2 > crtc->bounds.y2 - crtc->bounds.y1)
+		y2 = crtc->bounds.y2 - crtc->bounds.y1;
 	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
 	if (y2 <= y1)
 		return false;
 
-	full_height = y1 == 0 && y2 == crtc_box.y2 - crtc_box.y1;
+	full_height = y1 == 0 && y2 == crtc->bounds.y2 - crtc->bounds.y1;
 
 	if (crtc->mode.Flags & V_INTERLACE) {
 		/* DSL count field lines */
@@ -2298,10 +2447,396 @@ void sna_mode_update(struct sna *sna)
 	/* Validate CRTC attachments */
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
+		if (!crtc->active || !sna_crtc_is_bound(sna, crtc))
+			sna_crtc_disable(crtc);
+	}
+}
+
+static void
+sna_crtc_redisplay__fallback(xf86CrtcPtr crtc, RegionPtr region)
+{
+	struct sna *sna = to_sna(crtc->scrn);
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	ScreenPtr screen = sna->scrn->pScreen;
+	PictFormatPtr format;
+	PicturePtr src, dst;
+	PixmapPtr pixmap;
+	BoxPtr b;
+	int n, error;
+	void *ptr;
+
+	DBG(("%s: compositing transformed damage boxes\n", __FUNCTION__));
+
+	ptr = kgem_bo_map__gtt(&sna->kgem, sna_crtc->bo);
+	if (ptr == NULL)
+		return;
+
+	pixmap = fbCreatePixmap(screen, 0, 0, sna->front->drawable.depth, 0);
+	if (pixmap == NullPixmap)
+		return;
+
+	if (!screen->ModifyPixmapHeader(pixmap,
+					crtc->mode.HDisplay,
+					crtc->mode.VDisplay,
+					sna->front->drawable.depth,
+					sna->front->drawable.bitsPerPixel,
+					sna_crtc->bo->pitch, ptr))
+		goto free_pixmap;
+
+	error = sna_render_format_for_depth(sna->front->drawable.depth);
+	format = PictureMatchFormat(screen,
+				    PIXMAN_FORMAT_DEPTH(error), error);
+	if (format == NULL) {
+		DBG(("%s: can't find format for depth=%d [%08x]\n",
+		     __FUNCTION__, sna->front->drawable.depth,
+		     (int)sna_render_format_for_depth(sna->front->drawable.depth)));
+		goto free_pixmap;
+	}
+
+	src = CreatePicture(None, &sna->front->drawable, format,
+			    0, NULL, serverClient, &error);
+	if (!src)
+		goto free_pixmap;
+
+	error = SetPictureTransform(src, &crtc->crtc_to_framebuffer);
+	if (error)
+		goto free_src;
+
+	if (crtc->filter)
+		SetPicturePictFilter(src, crtc->filter,
+				     crtc->params, crtc->nparams);
+
+	dst = CreatePicture(None, &pixmap->drawable, format,
+			    0, NULL, serverClient, &error);
+	if (!dst)
+		goto free_src;
+
+	kgem_bo_sync__gtt(&sna->kgem, sna_crtc->bo);
+	n = REGION_NUM_RECTS(region);
+	b = REGION_RECTS(region);
+	do {
+		BoxRec box;
+
+		box = *b++;
+		box.x1 -= crtc->filter_width >> 1;
+		box.x2 += crtc->filter_width >> 1;
+		box.y1 -= crtc->filter_height >> 1;
+		box.y2 += crtc->filter_height >> 1;
+		pixman_f_transform_bounds(&crtc->f_framebuffer_to_crtc, & box);
+
+		DBG(("%s: (%d, %d)x(%d, %d) -> (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     b[-1].x1, b[-1].y1, b[-1].x2-b[-1].x1, b[-1].y2-b[-1].y1,
+		     box.x1, box.y1, box.x2, box.y2));
+
+		fbComposite(PictOpSrc, src, NULL, dst,
+			    box.x1, box.y1,
+			    0, 0,
+			    box.x1, box.y1,
+			    box.x2 - box.x1, box.y2 - box.y1);
+	} while (--n);
+
+	FreePicture(dst, None);
+free_src:
+	FreePicture(src, None);
+free_pixmap:
+	screen->DestroyPixmap(pixmap);
+}
+
+static void
+sna_crtc_redisplay__composite(xf86CrtcPtr crtc, RegionPtr region)
+{
+	struct sna *sna = to_sna(crtc->scrn);
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	ScreenPtr screen = sna->scrn->pScreen;
+	struct sna_composite_op tmp;
+	PictFormatPtr format;
+	PicturePtr src, dst;
+	PixmapPtr pixmap;
+	BoxPtr b;
+	int n, error;
+
+	DBG(("%s: compositing transformed damage boxes\n", __FUNCTION__));
+
+	pixmap = sna_pixmap_create_unattached(screen,
+					      crtc->mode.HDisplay,
+					      crtc->mode.VDisplay,
+					      sna->front->drawable.depth);
+	if (pixmap == NullPixmap)
+		return;
+
+	if (!sna_pixmap_attach_to_bo(pixmap, sna_crtc->bo))
+		goto free_pixmap;
+
+	error = sna_render_format_for_depth(sna->front->drawable.depth);
+	format = PictureMatchFormat(screen,
+				    PIXMAN_FORMAT_DEPTH(error), error);
+	if (format == NULL) {
+		DBG(("%s: can't find format for depth=%d [%08x]\n",
+		     __FUNCTION__, sna->front->drawable.depth,
+		     (int)sna_render_format_for_depth(sna->front->drawable.depth)));
+		goto free_pixmap;
+	}
+
+	src = CreatePicture(None, &sna->front->drawable, format,
+			    0, NULL, serverClient, &error);
+	if (!src)
+		goto free_pixmap;
+
+	error = SetPictureTransform(src, &crtc->crtc_to_framebuffer);
+	if (error)
+		goto free_src;
+
+	if (crtc->filter)
+		SetPicturePictFilter(src, crtc->filter,
+				     crtc->params, crtc->nparams);
+
+	dst = CreatePicture(None, &pixmap->drawable, format,
+			    0, NULL, serverClient, &error);
+	if (!dst)
+		goto free_src;
+
+	if (!sna->render.composite(sna,
+				   PictOpSrc, src, NULL, dst,
+				   0, 0,
+				   0, 0,
+				   0, 0,
+				   0, 0,
+				   memset(&tmp, 0, sizeof(tmp)))) {
+		DBG(("%s: unsupported operation!\n", __FUNCTION__));
+		sna_crtc_redisplay__fallback(crtc, region);
+		goto free_dst;
+	}
+
+	n = REGION_NUM_RECTS(region);
+	b = REGION_RECTS(region);
+	do {
+		BoxRec box;
+
+		box = *b++;
+		box.x1 -= crtc->filter_width >> 1;
+		box.x2 += crtc->filter_width >> 1;
+		box.y1 -= crtc->filter_height >> 1;
+		box.y2 += crtc->filter_height >> 1;
+		pixman_f_transform_bounds(&crtc->f_framebuffer_to_crtc, & box);
+
+		DBG(("%s: (%d, %d)x(%d, %d) -> (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     b[-1].x1, b[-1].y1, b[-1].x2-b[-1].x1, b[-1].y2-b[-1].y1,
+		     box.x1, box.y1, box.x2, box.y2));
+
+		tmp.box(sna, &tmp, &box);
+	} while (--n);
+	tmp.done(sna, &tmp);
+
+free_dst:
+	FreePicture(dst, None);
+free_src:
+	FreePicture(src, None);
+free_pixmap:
+	screen->DestroyPixmap(pixmap);
+}
+
+static void
+sna_crtc_redisplay(xf86CrtcPtr crtc, RegionPtr region)
+{
+	struct sna *sna = to_sna(crtc->scrn);
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	int16_t tx, ty;
+
+	DBG(("%s: crtc %d [pipe=%d], damage (%d, %d), (%d, %d) x %d\n",
+	     __FUNCTION__, sna_crtc->id, sna_crtc->pipe,
+	     region->extents.x1, region->extents.y1,
+	     region->extents.x2, region->extents.y2,
+	     REGION_NUM_RECTS(region)));
+
+	assert(!wedged(sna));
+
+	if (crtc->filter == NULL &&
+	    sna_transform_is_integer_translation(&crtc->crtc_to_framebuffer,
+						 &tx, &ty)) {
+		PixmapRec tmp;
+
+		DBG(("%s: copy damage boxes\n", __FUNCTION__));
+
+		tmp.drawable.width = crtc->mode.HDisplay;
+		tmp.drawable.height = crtc->mode.VDisplay;
+		tmp.drawable.depth = sna->front->drawable.depth;
+		tmp.drawable.bitsPerPixel = sna->front->drawable.bitsPerPixel;
+
+		/* XXX for tear-free we may want to try copying to a back
+		 * and flipping.
+		 */
+
+		if (sna->render.copy_boxes(sna, GXcopy,
+					   sna->front, sna_pixmap_get_bo(sna->front), tx, ty,
+					   &tmp, sna_crtc->bo, 0, 0,
+					   REGION_RECTS(region), REGION_NUM_RECTS(region)))
+			return;
+	}
+
+	sna_crtc_redisplay__composite(crtc, region);
+}
+
+void sna_mode_redisplay(struct sna *sna)
+{
+	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	RegionPtr region;
+	int i;
+
+	if (!sna->mode.shadow_damage)
+		return;
+
+	DBG(("%s: posting shadow damage\n", __FUNCTION__));
+	assert(sna->mode.shadow_active);
+
+	region = DamageRegion(sna->mode.shadow_damage);
+	if (!RegionNotEmpty(region))
+		return;
+
+	if (!sna_pixmap_move_to_gpu(sna->front, MOVE_READ)) {
+		if (!sna_pixmap_move_to_cpu(sna->front, MOVE_READ))
+			return;
+
+		for (i = 0; i < config->num_crtc; i++) {
+			xf86CrtcPtr crtc = config->crtc[i];
+			struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+			RegionRec damage;
+
+			if (!sna_crtc->shadow)
+				continue;
+
+			assert(crtc->enabled);
+			assert(crtc->transform_in_use);
+
+			damage.extents = crtc->bounds;
+			damage.data = NULL;
+			RegionIntersect(&damage, &damage, region);
+			if (RegionNotEmpty(&damage))
+				sna_crtc_redisplay__fallback(crtc, &damage);
+			RegionUninit(&damage);
+		}
+
+		RegionEmpty(region);
+		return;
+	}
+
+	for (i = 0; i < config->num_crtc; i++) {
+		xf86CrtcPtr crtc = config->crtc[i];
 		struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
-		if (crtc->enabled)
-			sna_crtc->active = sna_crtc_is_bound(sna, crtc);
-		else
-			sna_crtc->active = false;
+		RegionRec damage;
+
+		if (!sna_crtc->shadow || sna_crtc->bo == sna->mode.shadow)
+			continue;
+
+		assert(crtc->enabled);
+		assert(crtc->transform_in_use);
+
+		damage.extents = crtc->bounds;
+		damage.data = NULL;
+		RegionIntersect(&damage, &damage, region);
+		if (RegionNotEmpty(&damage))
+			sna_crtc_redisplay(crtc, &damage);
+		RegionUninit(&damage);
+	}
+
+	if (!sna->mode.shadow) {
+		kgem_submit(&sna->kgem);
+		RegionEmpty(region);
+		return;
+	}
+
+	if (sna->mode.shadow_flip == 0) {
+		struct kgem_bo *new = sna_pixmap_get_bo(sna->front);
+		struct kgem_bo *old = sna->mode.shadow;
+
+		DBG(("%s: flipping tear-free outputs\n", __FUNCTION__));
+		kgem_bo_submit(&sna->kgem, new);
+
+		for (i = 0; i < config->num_crtc; i++) {
+			struct sna_crtc *crtc = config->crtc[i]->driver_private;
+			struct drm_mode_crtc_page_flip arg;
+
+			DBG(("%s: crtc %d active? %d\n",
+			     __FUNCTION__, i, crtc->bo != NULL));
+			if (crtc->bo != old)
+				continue;
+
+			arg.crtc_id = crtc->id;
+			arg.fb_id = get_fb(sna, new,
+					   sna->scrn->virtualX,
+					   sna->scrn->virtualY);
+			if (arg.fb_id == 0)
+				goto disable;
+
+			/* Only the reference crtc will finally deliver its page flip
+			 * completion event. All other crtc's events will be discarded.
+			 */
+			arg.user_data = 0;
+			arg.flags = DRM_MODE_PAGE_FLIP_EVENT;
+			arg.reserved = 0;
+
+			if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_PAGE_FLIP, &arg)) {
+				DBG(("%s: flip [fb=%d] on crtc %d [%d] failed - %d\n",
+				     __FUNCTION__, arg.fb_id, i, crtc->id, errno));
+disable:
+				sna_crtc_disable(config->crtc[i]);
+				continue;
+			}
+
+			kgem_bo_destroy(&sna->kgem, old);
+			crtc->bo = kgem_bo_reference(new);
+			sna->mode.shadow_flip++;
+		}
+
+		/* XXX only works if the kernel stalls fwrites to the current
+		 * scanout whilst the flip is pending
+		 */
+		(void)sna->render.copy_boxes(sna, GXcopy,
+					     sna->front, new, 0, 0,
+					     sna->front, old, 0, 0,
+					     REGION_RECTS(region),
+					     REGION_NUM_RECTS(region));
+
+		sna_pixmap(sna->front)->gpu_bo = old;
+		sna->mode.shadow = new;
+
+		new->flush = old->flush;
+
+		RegionEmpty(region);
+	}
+}
+
+void sna_mode_wakeup(struct sna *sna)
+{
+	char buffer[1024];
+	int len, i;
+
+	/* The DRM read semantics guarantees that we always get only
+	 * complete events.
+	 */
+	len = read(sna->kgem.fd, buffer, sizeof (buffer));
+	if (len < (int)sizeof(struct drm_event))
+		return;
+
+	DBG(("%s: len=%d\n", __FUNCTION__, len));
+
+	i = 0;
+	while (i < len) {
+		struct drm_event *e = (struct drm_event *)&buffer[i];
+		switch (e->type) {
+		case DRM_EVENT_VBLANK:
+			sna_dri_vblank_handler(sna, (struct drm_event_vblank *)e);
+			break;
+		case DRM_EVENT_FLIP_COMPLETE:
+			if (((struct drm_event_vblank *)e)->user_data)
+				sna_dri_page_flip_handler(sna, (struct drm_event_vblank *)e);
+			else
+				sna->mode.shadow_flip--;
+			break;
+		default:
+			break;
+		}
+		i += e->length;
 	}
 }
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 46a43e9..7cf1d1c 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -73,7 +73,6 @@ struct sna_dri_private {
 };
 
 struct sna_dri_frame_event {
-	struct sna *sna;
 	XID drawable_id;
 	ClientPtr client;
 	enum frame_event_type type;
@@ -107,9 +106,9 @@ struct sna_dri_frame_event {
 static DevPrivateKeyRec sna_client_key;
 
 static inline struct sna_dri_frame_event *
-to_frame_event(void *data)
+to_frame_event(uintptr_t  data)
 {
-	 return (struct sna_dri_frame_event *)((uintptr_t)data & ~1);
+	 return (struct sna_dri_frame_event *)(data & ~1);
 }
 
 static inline struct sna_dri_private *
@@ -316,7 +315,7 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 	if (buffer == NULL)
 		return;
 
-	DBG(("%s: %p [handle=%d] -- refcnt=%d, pixmap=%d\n",
+	DBG(("%s: %p [handle=%d] -- refcnt=%d, pixmap=%ld\n",
 	     __FUNCTION__, buffer, private->bo->handle, private->refcnt,
 	     private->pixmap ? private->pixmap->drawable.serialNumber : 0));
 
@@ -458,12 +457,8 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 			return NULL;
 		}
 
-		if (pixmap == sna->front && sync &&
-		    (sna->flags & SNA_NO_WAIT) == 0) {
-			BoxRec crtc_box;
-
-			crtc = sna_covering_crtc(sna->scrn, &region->extents,
-						 NULL, &crtc_box);
+		if (sync && sna_pixmap_is_scanout(sna, pixmap)) {
+			crtc = sna_covering_crtc(sna->scrn, &region->extents, NULL);
 			if (crtc)
 				flush = sna_wait_for_scanline(sna, pixmap, crtc,
 							      &region->extents);
@@ -707,8 +702,8 @@ static int
 sna_dri_get_pipe(DrawablePtr pDraw)
 {
 	ScrnInfoPtr pScrn = xf86ScreenToScrn(pDraw->pScreen);
-	BoxRec box, crtcbox;
 	xf86CrtcPtr crtc;
+	BoxRec box;
 	int pipe;
 
 	if (pDraw->type == DRAWABLE_PIXMAP)
@@ -719,18 +714,15 @@ sna_dri_get_pipe(DrawablePtr pDraw)
 	box.x2 = box.x1 + pDraw->width;
 	box.y2 = box.y1 + pDraw->height;
 
-	crtc = sna_covering_crtc(pScrn, &box, NULL, &crtcbox);
+	crtc = sna_covering_crtc(pScrn, &box, NULL);
 
 	/* Make sure the CRTC is valid and this is the real front buffer */
 	pipe = -1;
-	if (crtc != NULL && !crtc->rotatedData)
+	if (crtc != NULL)
 		pipe = sna_crtc_to_pipe(crtc);
 
-	DBG(("%s(box=((%d, %d), (%d, %d)), crtcbox=((%d, %d), (%d, %d)), pipe=%d)\n",
-	     __FUNCTION__,
-	     box.x1, box.y1, box.x2, box.y2,
-	     crtcbox.x1, crtcbox.y1, crtcbox.x2, crtcbox.y2,
-	     pipe));
+	DBG(("%s(box=((%d, %d), (%d, %d)), pipe=%d)\n",
+	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2, pipe));
 
 	return pipe;
 }
@@ -882,7 +874,8 @@ sna_dri_frame_event_release_bo(struct kgem *kgem, struct kgem_bo *bo)
 }
 
 static void
-sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
+sna_dri_frame_event_info_free(struct sna *sna,
+			      struct sna_dri_frame_event *info)
 {
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
 	     info, info->client, (long)info->drawable_id));
@@ -890,23 +883,20 @@ sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 	list_del(&info->client_resource);
 	list_del(&info->drawable_resource);
 
-	_sna_dri_destroy_buffer(info->sna, info->front);
-	_sna_dri_destroy_buffer(info->sna, info->back);
+	_sna_dri_destroy_buffer(sna, info->front);
+	_sna_dri_destroy_buffer(sna, info->back);
 
 	if (info->old_front.bo)
-		sna_dri_frame_event_release_bo(&info->sna->kgem,
-					       info->old_front.bo);
+		sna_dri_frame_event_release_bo(&sna->kgem, info->old_front.bo);
 
 	if (info->next_front.bo)
-		sna_dri_frame_event_release_bo(&info->sna->kgem,
-					       info->next_front.bo);
+		sna_dri_frame_event_release_bo(&sna->kgem, info->next_front.bo);
 
 	if (info->cache.bo)
-		sna_dri_frame_event_release_bo(&info->sna->kgem,
-					       info->cache.bo);
+		sna_dri_frame_event_release_bo(&sna->kgem, info->cache.bo);
 
 	if (info->bo)
-		kgem_bo_destroy(&info->sna->kgem, info->bo);
+		kgem_bo_destroy(&sna->kgem, info->bo);
 
 	free(info);
 }
@@ -974,7 +964,7 @@ can_flip(struct sna * sna,
 		return FALSE;
 	}
 
-	if (sna->shadow) {
+	if (sna->mode.shadow_active) {
 		DBG(("%s: no, shadow enabled\n", __FUNCTION__));
 		return FALSE;
 	}
@@ -1047,14 +1037,10 @@ inline static uint32_t pipe_select(int pipe)
 		return 0;
 }
 
-static void sna_dri_vblank_handle(int fd,
-				  unsigned int frame, unsigned int tv_sec,
-				  unsigned int tv_usec,
-				  void *data)
+void sna_dri_vblank_handler(struct sna *sna, struct drm_event_vblank *event)
 {
-	struct sna_dri_frame_event *info = data;
+	struct sna_dri_frame_event *info = (void *)(uintptr_t)event->user_data;
 	DrawablePtr draw;
-	struct sna *sna;
 	int status;
 
 	DBG(("%s(id=%d, type=%d)\n", __FUNCTION__,
@@ -1069,8 +1055,6 @@ static void sna_dri_vblank_handle(int fd,
 	if (status != Success)
 		goto done;
 
-	sna = to_sna_from_drawable(draw);
-
 	switch (info->type) {
 	case DRI2_FLIP:
 		/* If we can still flip... */
@@ -1091,7 +1075,8 @@ static void sna_dri_vblank_handle(int fd,
 		/* fall through to SwapComplete */
 	case DRI2_SWAP_THROTTLE:
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
-		     __FUNCTION__, info->type, frame, tv_sec, tv_usec));
+		     __FUNCTION__, info->type,
+		     event->sequence, event->tv_sec, event->tv_usec));
 
 		if (info->bo && kgem_bo_is_busy(info->bo)) {
 			kgem_retire(&sna->kgem);
@@ -1111,8 +1096,8 @@ static void sna_dri_vblank_handle(int fd,
 		}
 
 		DRI2SwapComplete(info->client,
-				 draw, frame,
-				 tv_sec, tv_usec,
+				 draw, event->sequence,
+				 event->tv_sec, event->tv_usec,
 				 DRI2_BLIT_COMPLETE,
 				 info->client ? info->event_complete : NULL,
 				 info->event_data);
@@ -1121,7 +1106,9 @@ static void sna_dri_vblank_handle(int fd,
 	case DRI2_WAITMSC:
 		if (info->client)
 			DRI2WaitMSCComplete(info->client, draw,
-					    frame, tv_sec, tv_usec);
+					    event->sequence,
+					    event->tv_sec,
+					    event->tv_usec);
 		break;
 	default:
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
@@ -1131,7 +1118,7 @@ static void sna_dri_vblank_handle(int fd,
 	}
 
 done:
-	sna_dri_frame_event_info_free(info);
+	sna_dri_frame_event_info_free(sna, info);
 }
 
 static int
@@ -1222,7 +1209,7 @@ static void sna_dri_flip_event(struct sna *sna,
 					 flip->event_data);
 		}
 
-		sna_dri_frame_event_info_free(flip);
+		sna_dri_frame_event_info_free(sna, flip);
 		break;
 
 	case DRI2_FLIP_THROTTLE:
@@ -1251,10 +1238,10 @@ static void sna_dri_flip_event(struct sna *sna,
 						DRI2_EXCHANGE_COMPLETE,
 						flip->client ? flip->event_complete : NULL,
 						flip->event_data);
-				sna_dri_frame_event_info_free(flip);
+				sna_dri_frame_event_info_free(sna, flip);
 			}
 		} else
-			sna_dri_frame_event_info_free(flip);
+			sna_dri_frame_event_info_free(sna, flip);
 		break;
 
 #if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
@@ -1298,7 +1285,7 @@ finish_async_flip:
 
 			DBG(("%s: async flip completed\n", __FUNCTION__));
 			sna->dri.flip_pending = NULL;
-			sna_dri_frame_event_info_free(flip);
+			sna_dri_frame_event_info_free(fsna, lip);
 		}
 		break;
 #endif
@@ -1311,26 +1298,26 @@ finish_async_flip:
 	}
 }
 
-static void
-sna_dri_page_flip_handler(int fd, unsigned int frame, unsigned int tv_sec,
-			  unsigned int tv_usec, void *data)
+void
+sna_dri_page_flip_handler(struct sna *sna,
+			  struct drm_event_vblank *event)
 {
-	struct sna_dri_frame_event *info = to_frame_event(data);
+	struct sna_dri_frame_event *info = to_frame_event(event->user_data);
 
 	DBG(("%s: pending flip_count=%d\n", __FUNCTION__, info->count));
 
 	/* Is this the event whose info shall be delivered to higher level? */
-	if ((uintptr_t)data & 1) {
+	if (event->user_data & 1) {
 		/* Yes: Cache msc, ust for later delivery. */
-		info->fe_frame = frame;
-		info->fe_tv_sec = tv_sec;
-		info->fe_tv_usec = tv_usec;
+		info->fe_frame = event->sequence;
+		info->fe_tv_sec = event->tv_sec;
+		info->fe_tv_usec = event->tv_usec;
 	}
 
 	if (--info->count)
 		return;
 
-	sna_dri_flip_event(info->sna, info);
+	sna_dri_flip_event(sna, info);
 }
 
 static int
@@ -1387,7 +1374,6 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		info->type = type;
 
-		info->sna = sna;
 		info->drawable_id = draw->id;
 		info->client = client;
 		info->event_complete = func;
@@ -1407,7 +1393,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		if (!sna_dri_page_flip(sna, info)) {
 			DBG(("%s: failed to queue page flip\n", __FUNCTION__));
-			sna_dri_frame_event_info_free(info);
+			sna_dri_frame_event_info_free(sna, info);
 			return FALSE;
 		}
 
@@ -1431,7 +1417,6 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (info == NULL)
 			return FALSE;
 
-		info->sna = sna;
 		info->drawable_id = draw->id;
 		info->client = client;
 		info->event_complete = func;
@@ -1454,7 +1439,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 		vbl.request.sequence = 0;
 		if (sna_wait_vblank(sna, &vbl)) {
-			sna_dri_frame_event_info_free(info);
+			sna_dri_frame_event_info_free(sna, info);
 			return FALSE;
 		}
 
@@ -1509,7 +1494,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence -= 1;
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl)) {
-			sna_dri_frame_event_info_free(info);
+			sna_dri_frame_event_info_free(sna, info);
 			return FALSE;
 		}
 
@@ -1588,7 +1573,6 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	if (!info)
 		goto blit_fallback;
 
-	info->sna = sna;
 	info->drawable_id = draw->id;
 	info->client = client;
 	info->event_complete = func;
@@ -1629,7 +1613,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 				return TRUE;
 		}
 
-		sna_dri_frame_event_info_free(info);
+		sna_dri_frame_event_info_free(sna, info);
 		DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
 		return TRUE;
 	}
@@ -1713,7 +1697,7 @@ blit_fallback:
 			      get_private(back)->bo,
 			      false);
 	if (info)
-		sna_dri_frame_event_info_free(info);
+		sna_dri_frame_event_info_free(sna, info);
 	DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
 	*target_msc = 0; /* offscreen, so zero out target vblank count */
 	return TRUE;
@@ -1812,7 +1796,7 @@ blit:
 		sna_dri_reference_buffer(back);
 
 		if (!sna_dri_page_flip(sna, info)) {
-			sna_dri_frame_event_info_free(info);
+			sna_dri_frame_event_info_free(sna, info);
 			goto blit;
 		}
 
@@ -1952,7 +1936,6 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (!info)
 		goto out_complete;
 
-	info->sna = sna;
 	info->drawable_id = draw->id;
 	info->client = client;
 	info->type = DRI2_WAITMSC;
@@ -2009,7 +1992,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	return TRUE;
 
 out_free_info:
-	sna_dri_frame_event_info_free(info);
+	sna_dri_frame_event_info_free(sna, info);
 out_complete:
 	DRI2WaitMSCComplete(client, draw, target_msc, 0, 0);
 	return TRUE;
@@ -2093,20 +2076,6 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	return DRI2ScreenInit(screen, &info);
 }
 
-void
-sna_dri_wakeup(struct sna *sna)
-{
-	drmEventContext ctx;
-
-	DBG(("%s\n", __FUNCTION__));
-
-	ctx.version = DRM_EVENT_CONTEXT_VERSION;
-	ctx.vblank_handler = sna_dri_vblank_handle;
-	ctx.page_flip_handler = sna_dri_page_flip_handler;
-
-	drmHandleEvent(sna->kgem.fd, &ctx);
-}
-
 void sna_dri_close(struct sna *sna, ScreenPtr screen)
 {
 	DBG(("%s()\n", __FUNCTION__));
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a02ff76..3b3b93f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -47,6 +47,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include <xf86cmap.h>
 #include <xf86drm.h>
+#include <xf86RandR12.h>
 #include <micmap.h>
 #include <fb.h>
 
@@ -492,8 +493,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (!xf86SetDefaultVisual(scrn, -1))
 		return FALSE;
 
-	sna->mode.cpp = scrn->bitsPerPixel / 8;
-
 	if (!sna_get_early_options(scrn))
 		return FALSE;
 
@@ -527,7 +526,10 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		sna->flags |= SNA_NO_DELAYED_FLUSH;
 	if (!xf86ReturnOptValBool(sna->Options, OPTION_SWAPBUFFERS_WAIT, TRUE))
 		sna->flags |= SNA_NO_WAIT;
-	if (!has_pageflipping(sna))
+	if (has_pageflipping(sna)) {
+		if (xf86ReturnOptValBool(sna->Options, OPTION_TEAR_FREE, FALSE))
+			sna->flags |= SNA_TEAR_FREE;
+	} else
 		sna->flags |= SNA_NO_FLIP;
 
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Framebuffer %s\n",
@@ -540,6 +542,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		   sna->flags & SNA_NO_THROTTLE ? "dis" : "en");
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Delayed flush %sabled\n",
 		   sna->flags & SNA_NO_DELAYED_FLUSH ? "dis" : "en");
+	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "\"Tear free\" %sabled\n",
+		   sna->flags & SNA_TEAR_FREE ? "en" : "dis");
 
 	if (!sna_mode_pre_init(scrn, sna)) {
 		PreInitCleanup(scrn);
@@ -608,7 +612,7 @@ sna_wakeup_handler(WAKEUPHANDLER_ARGS_DECL)
 	sna_accel_wakeup_handler(sna, read_mask);
 
 	if (FD_ISSET(sna->kgem.fd, (fd_set*)read_mask))
-		sna_dri_wakeup(sna);
+		sna_mode_wakeup(sna);
 }
 
 #if HAVE_UDEV
@@ -735,7 +739,6 @@ static void sna_leave_vt(VT_FUNC_ARGS_DECL)
 
 	xf86RotateFreeShadow(scrn);
 	xf86_hide_cursors(scrn);
-	sna_mode_remove_fb(sna);
 
 	ret = drmDropMaster(sna->kgem.fd);
 	if (ret)
@@ -747,7 +750,7 @@ static void sna_leave_vt(VT_FUNC_ARGS_DECL)
  * check that the fd is readable before attempting to read the next
  * event from drm.
  */
-static Bool sna_dri_has_pending_events(struct sna *sna)
+static Bool sna_mode_has_pending_events(struct sna *sna)
 {
 	struct pollfd pfd;
 	pfd.fd = sna->kgem.fd;
@@ -767,8 +770,8 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 #endif
 
 	/* drain the event queues */
-	if (sna_dri_has_pending_events(sna))
-		sna_dri_wakeup(sna);
+	if (sna_mode_has_pending_events(sna))
+		sna_mode_wakeup(sna);
 
 	if (scrn->vtSema == TRUE)
 		sna_leave_vt(VT_FUNC_ARGS(0));
@@ -788,7 +791,6 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 		sna->directRenderingOpen = FALSE;
 	}
 
-	sna_mode_remove_fb(sna);
 	if (sna->front) {
 		screen->DestroyPixmap(sna->front);
 		sna->front = NULL;
@@ -799,6 +801,20 @@ static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 	return TRUE;
 }
 
+static void sna_mode_set(ScrnInfoPtr scrn)
+{
+	struct sna *sna = to_sna(scrn);
+
+	DBG(("%s\n", __FUNCTION__));
+
+	if (sna->ModeSet) {
+		scrn->ModeSet = sna->ModeSet;
+		scrn->ModeSet(scrn);
+		scrn->ModeSet = sna_mode_set;
+	}
+	sna_mode_update(sna);
+}
+
 static Bool
 sna_register_all_privates(void)
 {
@@ -917,9 +933,17 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 	screen->CloseScreen = sna_close_screen;
 	screen->CreateScreenResources = sna_create_screen_resources;
 
+	sna->ModeSet = scrn->ModeSet;
+	scrn->ModeSet = sna_mode_set;
+
 	if (!xf86CrtcScreenInit(screen))
 		return FALSE;
 
+	xf86RandR12SetRotations(screen,
+				RR_Rotate_0 | RR_Rotate_90 | RR_Rotate_180 | RR_Rotate_270 |
+				RR_Reflect_X | RR_Reflect_Y);
+	xf86RandR12SetTransformSupport(screen, TRUE);
+
 	if (!miCreateDefColormap(screen))
 		return FALSE;
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 6ddf6f3..a072994 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -64,15 +64,15 @@ CARD32
 sna_render_format_for_depth(int depth)
 {
 	switch (depth) {
-	case 1: return PICT_a1;
-	case 4: return PICT_a4;
-	case 8: return PICT_a8;
-	case 15: return PICT_a1r5g5b5;
-	case 16: return PICT_r5g6b5;
-	case 30: return PICT_a2r10g10b10;
+	case 1: return PIXMAN_a1;
+	case 4: return PIXMAN_a4;
+	case 8: return PIXMAN_a8;
+	case 15: return PIXMAN_a1r5g5b5;
+	case 16: return PIXMAN_r5g6b5;
+	case 30: return PIXMAN_a2r10g10b10;
 	default: assert(0);
 	case 24:
-	case 32: return PICT_a8r8g8b8;
+	case 32: return PIXMAN_a8r8g8b8;
 	}
 }
 
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 08b848b..6ad81c3 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -143,7 +143,6 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 	Bool ret;
 	RegionRec crtc_region_local;
 	RegionPtr crtc_region = reg;
-	BoxRec crtc_box;
 	INT32 x1, x2, y1, y2;
 	xf86CrtcPtr crtc;
 
@@ -161,11 +160,12 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 	 * For overlay video, compute the relevant CRTC and
 	 * clip video to that
 	 */
-	crtc = sna_covering_crtc(scrn, dst, video->desired_crtc, &crtc_box);
+	crtc = sna_covering_crtc(scrn, dst, video->desired_crtc);
 
 	/* For textured video, we don't actually want to clip at all. */
 	if (crtc && !video->textured) {
-		RegionInit(&crtc_region_local, &crtc_box, 0);
+		crtc_region_local.extents = crtc->bounds;
+		crtc_region_local.data = NULL;
 		crtc_region = &crtc_region_local;
 		RegionIntersect(crtc_region, crtc_region, reg);
 	}
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 33f3f71..1b3b3af 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -281,7 +281,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	}
 
 	if (crtc && video->SyncToVblank != 0 &&
-	    pixmap == sna->front && !sna->shadow)
+	    sna_pixmap_is_scanout(sna, pixmap))
 		flush = sna_wait_for_scanline(sna, pixmap, crtc,
 					      &clip->extents);
 
commit e8b090902e788257610374deae659f01a91888f3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 22 22:01:37 2012 +0100

    sna/gen3+: Remove stale assertions for cached vbo
    
    Following the previous commit, we reset the vbo when it becomes idle
    rather than discard it. As such, the assertions to check that we are
    discarding the vbo are now bogus.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b66e0e0..63bbd76 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1678,12 +1678,8 @@ static void gen3_vertex_close(struct sna *sna)
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vertex_size,
 	     sna->render.vbo ? sna->render.vbo->handle : 0));
 
-	if (sna->render.vertex_used == 0) {
-		assert(sna->render.vbo == NULL);
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	if (sna->render.vertex_used == 0)
 		return;
-	}
 
 	bo = sna->render.vbo;
 	if (bo) {
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6379a18..cd4ca36 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -426,12 +426,8 @@ static void gen4_vertex_close(struct sna *sna)
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used) {
-		assert(sna->render.vbo == NULL);
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	if (!sna->render.vertex_used)
 		return;
-	}
 
 	bo = sna->render.vbo;
 	if (bo) {
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 7d424aa..27ba04d 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -418,12 +418,8 @@ static void gen5_vertex_close(struct sna *sna)
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used) {
-		assert(sna->render.vbo == NULL);
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	if (!sna->render.vertex_used)
 		return;
-	}
 
 	bo = sna->render.vbo;
 	if (bo) {
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 896693c..ecc8dfb 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -997,12 +997,8 @@ static void gen6_vertex_close(struct sna *sna)
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used) {
-		assert(sna->render.vbo == NULL);
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	if (!sna->render.vertex_used)
 		return;
-	}
 
 	bo = sna->render.vbo;
 	if (bo) {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ea9b01a..6cf75eb 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1153,12 +1153,8 @@ static void gen7_vertex_close(struct sna *sna)
 	DBG(("%s: used=%d, vbo active? %d\n",
 	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
 
-	if (!sna->render.vertex_used) {
-		assert(sna->render.vbo == NULL);
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	if (!sna->render.vertex_used)
 		return;
-	}
 
 	bo = sna->render.vbo;
 	if (bo) {
commit 565297e6bd3457a150036af9c62fe0dc67b794ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 13:53:02 2012 +0100

    sna/gen3+: Keep vbo cached
    
    Once we switch to using a vbo, keep it cached (resetting everytime it is
    idle) until we expire our caches.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 54ec9b2..b66e0e0 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1797,6 +1797,7 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 		return 0;
 	if (!kgem_check_reloc_and_exec(&sna->kgem, 1))
 		return 0;
+
 	if (op->need_magic_ca_pass && sna->render.vbo)
 		return 0;
 
@@ -1988,7 +1989,20 @@ gen3_render_retire(struct kgem *kgem)
 	struct sna *sna;
 
 	sna = container_of(kgem, struct sna, kgem);
-	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
+static void
+gen3_render_expire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (sna->render.vbo && !sna->render.vertex_used) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
 		discard_vbo(sna);
 	}
@@ -4680,5 +4694,6 @@ Bool gen3_render_init(struct sna *sna)
 	render->max_3d_pitch = MAX_3D_PITCH;
 
 	sna->kgem.retire = gen3_render_retire;
+	sna->kgem.expire = gen3_render_expire;
 	return TRUE;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 65c21c3..7d424aa 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3671,7 +3671,20 @@ gen5_render_retire(struct kgem *kgem)
 	struct sna *sna;
 
 	sna = container_of(kgem, struct sna, kgem);
-	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
+static void
+gen5_render_expire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (sna->render.vbo && !sna->render.vertex_used) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
 		discard_vbo(sna);
 	}
@@ -3944,6 +3957,7 @@ Bool gen5_render_init(struct sna *sna)
 
 	sna->kgem.context_switch = gen5_render_context_switch;
 	sna->kgem.retire = gen5_render_retire;
+	sna->kgem.expire = gen5_render_expire;
 
 	sna->render.composite = gen5_render_composite;
 #if !NO_COMPOSITE_SPANS
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 563e04c..896693c 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -4187,7 +4187,20 @@ gen6_render_retire(struct kgem *kgem)
 		kgem->ring = kgem->mode;
 
 	sna = container_of(kgem, struct sna, kgem);
-	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
+static void
+gen6_render_expire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (sna->render.vbo && !sna->render.vertex_used) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
 		kgem_bo_destroy(kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
@@ -4273,6 +4286,7 @@ Bool gen6_render_init(struct sna *sna)
 
 	sna->kgem.context_switch = gen6_render_context_switch;
 	sna->kgem.retire = gen6_render_retire;
+	sna->kgem.expire = gen6_render_expire;
 
 	sna->render.composite = gen6_render_composite;
 #if !NO_COMPOSITE_SPANS
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 726a67e..ea9b01a 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4298,7 +4298,20 @@ gen7_render_retire(struct kgem *kgem)
 		kgem->ring = kgem->mode;
 
 	sna = container_of(kgem, struct sna, kgem);
-	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+	if (kgem->nbatch == 0 && sna->render.vbo && !kgem_bo_is_busy(sna->render.vbo)) {
+		DBG(("%s: resetting idle vbo\n", __FUNCTION__));
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
+static void
+gen7_render_expire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (sna->render.vbo && !sna->render.vertex_used) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
 		kgem_bo_destroy(kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
@@ -4386,6 +4399,7 @@ Bool gen7_render_init(struct sna *sna)
 
 	sna->kgem.context_switch = gen7_render_context_switch;
 	sna->kgem.retire = gen7_render_retire;
+	sna->kgem.expire = gen7_render_expire;
 
 	sna->render.composite = gen7_render_composite;
 #if !NO_COMPOSITE_SPANS
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0b5ca61..9fe3661 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2049,6 +2049,8 @@ bool kgem_expire_cache(struct kgem *kgem)
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
 
+	kgem->expire(kgem);
+
 	if (kgem->need_purge)
 		kgem_purge_cache(kgem);
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 408ad03..c154be5 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -169,6 +169,7 @@ struct kgem {
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
 	void (*retire)(struct kgem *kgem);
+	void (*expire)(struct kgem *kgem);
 
 	uint32_t batch[64*1024-8];
 	struct drm_i915_gem_exec_object2 exec[256];
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 49f7c5e..6ddf6f3 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -252,6 +252,12 @@ no_render_retire(struct kgem *kgem)
 }
 
 static void
+no_render_expire(struct kgem *kgem)
+{
+	(void)kgem;
+}
+
+static void
 no_render_fini(struct sna *sna)
 {
 	(void)sna;
@@ -282,6 +288,7 @@ void no_render_init(struct sna *sna)
 
 	sna->kgem.context_switch = no_render_context_switch;
 	sna->kgem.retire = no_render_retire;
+	sna->kgem.expire = no_render_expire;
 	if (sna->kgem.gen >= 60)
 		sna->kgem.ring = KGEM_BLT;
 }
commit d806973e21cd46e605b3cd405323ae7a64c12798
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 12:57:13 2012 +0100

    sna: Micro-optimise search_inactive_cache
    
    Discard the unneeded next parameter to drop a memory reference in a hot
    path, and don't wait for a retirement if we are looking in a larger
    bucket than suits.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 90b4c96..0b5ca61 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2558,7 +2558,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       uint32_t flags)
 {
 	struct list *cache;
-	struct kgem_bo *bo, *next;
+	struct kgem_bo *bo;
 	uint32_t pitch, untiled_pitch, tiled_height, size;
 	uint32_t handle;
 	int i, bucket, retry;
@@ -2847,7 +2847,7 @@ search_inactive:
 	/* Now just look for a close match and prefer any currently active */
 	assert(bucket < NUM_CACHE_BUCKETS);
 	cache = &kgem->inactive[bucket];
-	list_for_each_entry_safe(bo, next, cache, list) {
+	list_for_each_entry(bo, cache, list) {
 		assert(bucket(bo) == bucket);
 		assert(bo->reusable);
 
@@ -2861,10 +2861,8 @@ search_inactive:
 		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
 			if (tiling != gem_set_tiling(kgem->fd,
 						     bo->handle,
-						     tiling, pitch)) {
-				kgem_bo_free(kgem, bo);
+						     tiling, pitch))
 				continue;
-			}
 
 			if (bo->map)
 				kgem_bo_release_map(kgem, bo);
@@ -2872,7 +2870,7 @@ search_inactive:
 
 		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
 			kgem_bo_free(kgem, bo);
-			continue;
+			break;
 		}
 
 		kgem_bo_remove_from_inactive(kgem, bo);
@@ -2903,6 +2901,7 @@ search_inactive:
 
 	if (--retry) {
 		bucket++;
+		flags &= ~CREATE_INACTIVE;
 		goto search_inactive;
 	}
 
commit d39fef0a7f3daf5c07686b44e4dea01c0f06c77a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 12:25:35 2012 +0100

    sna: Tiles are only 128 bytes wide on gen2
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bb1b77d..90b4c96 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -803,11 +803,12 @@ void kgem_get_tile_size(struct kgem *kgem, int tiling,
 {
 	if (kgem->gen <= 30) {
 		if (tiling) {
-			*tile_width = 512;
 			if (kgem->gen < 30) {
+				*tile_width = 128;
 				*tile_height = 16;
 				*tile_size = 2048;
 			} else {
+				*tile_width = 512;
 				*tile_height = 8;
 				*tile_size = 4096;
 			}
@@ -853,8 +854,13 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 
 	if (kgem->gen <= 30) {
 		if (tiling) {
-			tile_width = 512;
-			tile_height = kgem->gen < 30 ? 16 : 8;
+			if (kgem->gen < 30) {
+				tile_width = 128;
+				tile_height = 16;
+			} else {
+				tile_width = 512;
+				tile_height =  8;
+			}
 		} else {
 			tile_width = 2 * bpp >> 3;
 			tile_width = ALIGN(tile_width,
commit 4f2dde1fa3b04b27bae8fc0bca9c824bd362d23b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 10:31:24 2012 +0100

    sna/gen7: Eliminate the pipeline stall after a non-pipelined operation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index b0f7cfc..726a67e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -955,12 +955,12 @@ gen7_emit_vertex_elements(struct sna *sna,
 }
 
 inline static void
-gen7_emit_pipe_invalidate(struct sna *sna)
+gen7_emit_pipe_invalidate(struct sna *sna, bool stall)
 {
 	OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
 	OUT_BATCH(GEN7_PIPE_CONTROL_WC_FLUSH |
 		  GEN7_PIPE_CONTROL_TC_FLUSH |
-		  GEN7_PIPE_CONTROL_CS_STALL);
+		  (stall ? GEN7_PIPE_CONTROL_CS_STALL : 0));
 	OUT_BATCH(0);
 	OUT_BATCH(0);
 }
@@ -1020,7 +1020,7 @@ gen7_emit_state(struct sna *sna,
 	need_stall &= gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
-		gen7_emit_pipe_invalidate(sna);
+		gen7_emit_pipe_invalidate(sna, need_stall);
 		kgem_clear_dirty(&sna->kgem);
 		kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
@@ -1042,7 +1042,7 @@ static void gen7_magic_ca_pass(struct sna *sna,
 	DBG(("%s: CA fixup (%d -> %d)\n", __FUNCTION__,
 	     sna->render.vertex_start, sna->render.vertex_index));
 
-	gen7_emit_pipe_invalidate(sna);
+	gen7_emit_pipe_invalidate(sna, true);
 
 	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, TRUE, op->dst.format));
 	gen7_emit_wm(sna,
commit 3ef05a8d0833203e265aff392f225a11a11c2d01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 09:36:42 2012 +0100

    sna/gen7: Do not emit a pipeline stall after a non-pipelined command
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e3c8269..b0f7cfc 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -850,7 +850,7 @@ gen7_emit_binding_table(struct sna *sna, uint16_t offset)
 	return true;
 }
 
-static void
+static bool
 gen7_emit_drawing_rectangle(struct sna *sna,
 			    const struct sna_composite_op *op)
 {
@@ -862,7 +862,7 @@ gen7_emit_drawing_rectangle(struct sna *sna,
 
 	if (sna->render_state.gen7.drawrect_limit == limit &&
 	    sna->render_state.gen7.drawrect_offset == offset)
-		return;
+		return true;
 
 	sna->render_state.gen7.drawrect_offset = offset;
 	sna->render_state.gen7.drawrect_limit = limit;
@@ -871,6 +871,7 @@ gen7_emit_drawing_rectangle(struct sna *sna,
 	OUT_BATCH(0);
 	OUT_BATCH(limit);
 	OUT_BATCH(offset);
+	return false;
 }
 
 static void
@@ -1016,7 +1017,7 @@ gen7_emit_state(struct sna *sna,
 	gen7_emit_vertex_elements(sna, op);
 
 	need_stall |= gen7_emit_binding_table(sna, wm_binding_table);
-	gen7_emit_drawing_rectangle(sna, op);
+	need_stall &= gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen7_emit_pipe_invalidate(sna);
commit 4501e131e6b737cb8f2581c8b1f7ea9d29a8e912
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 16:38:32 2012 +0100

    sna/gen7: prefer using RENDER copy
    
    Further testing and the balance of doubt swings in favour of using the
    3D pipeline for copies.
    
    For small copies the BLT unit is faster,
    2.14M/sec vs 1.71M/sec for comppixwin10
    
    And for large copies the RENDER pipeline is faster,
    13000/sec vs 8000/sec for comppixwin500
    
    I think the implication is that we are not efficiently utilising the EU
    for small primitives - i.e. something that we might be able to improve.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 9c92e5a..e3c8269 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2448,7 +2448,7 @@ try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
-	if (prefer_blt_ring(sna)) {
+	if (sna->kgem.ring == KGEM_BLT) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
 		return TRUE;
 	}
@@ -2462,8 +2462,6 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return TRUE;
-		if (src->pDrawable)
-			return TRUE;
 	}
 
 	return FALSE;
@@ -3329,7 +3327,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
 				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
-	return (prefer_blt_ring(sna) ||
+	return (sna->kgem.ring == KGEM_BLT ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
commit 3da56c48b7820ec77d704c5a16670eb86a6f673f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 15:14:23 2012 +0100

    sna/gen7: Prefer using BLT rather than redirect for copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 3a9a856..9c92e5a 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3400,6 +3400,32 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return TRUE;
 
+	if ((too_large(dst->drawable.width, dst->drawable.height) ||
+	     too_large(src->drawable.width, src->drawable.height)) &&
+	    sna_blt_compare_depth(&src->drawable, &dst->drawable)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (box[i].x1 < extents.x1)
+				extents.x1 = box[i].x1;
+			if (box[i].y1 < extents.y1)
+				extents.y1 = box[i].y1;
+
+			if (box[i].x2 > extents.x2)
+				extents.x2 = box[i].x2;
+			if (box[i].y2 > extents.y2)
+				extents.y2 = box[i].y2;
+		}
+		if (too_large(extents.x2 - extents.x1, extents.y2 - extents.y1) &&
+		    sna_blt_copy_boxes(sna, alu,
+				       src_bo, src_dx, src_dy,
+				       dst_bo, dst_dx, dst_dy,
+				       dst->drawable.bitsPerPixel,
+				       box, n))
+			return TRUE;
+	}
+
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    overlaps(src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
@@ -3449,6 +3475,7 @@ fallback_blt:
 			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
+
 		if (!sna_render_composite_redirect(sna, &tmp,
 						   extents.x1 + dst_dx,
 						   extents.y1 + dst_dy,
commit b1f8386db6e9b3eea1bdbf8cde90f33792640ce8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 23:28:14 2012 +0100

    sna/gen7: Emit a pipeline flush after every render operation
    
    For whatever reason, this produces a 30% improvement with the fish-demo
    (500 -> 660 fps on i7-3730qm at 1024x768). However, it does cause about
    a 5% regression in aa10text. We can appear to alleviate that by only
    doing the flush when the composite op != PictOpSrc.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c474767..3a9a856 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -953,8 +953,8 @@ gen7_emit_vertex_elements(struct sna *sna,
 	}
 }
 
-static void
-gen7_emit_flush(struct sna *sna)
+inline static void
+gen7_emit_pipe_invalidate(struct sna *sna)
 {
 	OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
 	OUT_BATCH(GEN7_PIPE_CONTROL_WC_FLUSH |
@@ -964,6 +964,25 @@ gen7_emit_flush(struct sna *sna)
 	OUT_BATCH(0);
 }
 
+inline static void
+gen7_emit_pipe_flush(struct sna *sna)
+{
+	OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
+	OUT_BATCH(GEN7_PIPE_CONTROL_WC_FLUSH);
+	OUT_BATCH(0);
+	OUT_BATCH(0);
+}
+
+inline static void
+gen7_emit_pipe_stall(struct sna *sna)
+{
+	OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
+	OUT_BATCH(GEN7_PIPE_CONTROL_CS_STALL |
+		  GEN7_PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	OUT_BATCH(0);
+	OUT_BATCH(0);
+}
+
 static void
 gen7_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
@@ -1000,18 +1019,15 @@ gen7_emit_state(struct sna *sna,
 	gen7_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
-		gen7_emit_flush(sna);
+		gen7_emit_pipe_invalidate(sna);
 		kgem_clear_dirty(&sna->kgem);
 		kgem_bo_mark_dirty(op->dst.bo);
 		need_stall = false;
 	}
-	if (need_stall) {
-		OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
-		OUT_BATCH(GEN7_PIPE_CONTROL_CS_STALL |
-			  GEN7_PIPE_CONTROL_STALL_AT_SCOREBOARD);
-		OUT_BATCH(0);
-		OUT_BATCH(0);
-	}
+	if (need_stall)
+		gen7_emit_pipe_stall(sna);
+
+	sna->render_state.gen7.emit_flush = op->op != PictOpSrc;
 }
 
 static void gen7_magic_ca_pass(struct sna *sna,
@@ -1025,7 +1041,7 @@ static void gen7_magic_ca_pass(struct sna *sna,
 	DBG(("%s: CA fixup (%d -> %d)\n", __FUNCTION__,
 	     sna->render.vertex_start, sna->render.vertex_index));
 
-	gen7_emit_flush(sna);
+	gen7_emit_pipe_invalidate(sna);
 
 	gen7_emit_cc(sna, gen7_get_blend(PictOpAdd, TRUE, op->dst.format));
 	gen7_emit_wm(sna,
@@ -1055,6 +1071,11 @@ static void gen7_vertex_flush(struct sna *sna)
 	sna->kgem.batch[sna->render_state.gen7.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen7.vertex_offset = 0;
+
+	if (sna->render_state.gen7.emit_flush) {
+		gen7_emit_pipe_flush(sna);
+		sna->render_state.gen7.emit_flush = false;
+	}
 }
 
 static int gen7_vertex_finish(struct sna *sna)
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index fd42b21..65ca359 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -461,6 +461,7 @@ struct gen7_render_state {
 	uint16_t surface_table;
 
 	Bool needs_invariant;
+	Bool emit_flush;
 };
 
 struct sna_static_stream {
commit d02e6d81420a114c9622bbdaf90fc3ae5d4b15a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 21 16:10:02 2012 +0100

    Encode the third pipe using the HIGH_CRTC shift for vblanks
    
    The original vblank interface only understood 2 pipes (primary and
    secondary) and so selecting the third pipe (introduced with IvyBridge)
    requires use of the HIGH_CRTC. Using the second pipe where we meant the
    third pipe could result in some spurious timings when waiting on the
    vblank.
    
    Reported-by: Adam Jackson <ajax at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 88ab249..ed5078e 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1238,6 +1238,16 @@ void I830DRI2FlipEventHandler(unsigned int frame, unsigned int tv_sec,
 	i830_dri2_del_frame_event(drawable, flip_info);
 }
 
+static uint32_t pipe_select(int pipe)
+{
+	if (pipe > 1)
+		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
+	else if (pipe > 0)
+		return DRM_VBLANK_SECONDARY;
+	else
+		return 0;
+}
+
 /*
  * ScheduleSwap is responsible for requesting a DRM vblank event for the
  * appropriate frame.
@@ -1307,9 +1317,7 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	I830DRI2ReferenceBuffer(back);
 
 	/* Get current count */
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 	ret = drmWaitVBlank(intel->drmSubFD, &vbl);
 	if (ret) {
@@ -1345,9 +1353,8 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (flip && I830DRI2ScheduleFlip(intel, draw, swap_info))
 			return TRUE;
 
-		vbl.request.type =  DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
+		vbl.request.type =
+			DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
 
 		/* If non-pageflipping, but blitting/exchanging, we need to use
 		 * DRM_VBLANK_NEXTONMISS to avoid unreliable timestamping later
@@ -1355,8 +1362,6 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		 */
 		if (flip == 0)
 			vbl.request.type |= DRM_VBLANK_NEXTONMISS;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
 
 		/* If target_msc already reached or passed, set it to
 		 * current_msc to ensure we return a reasonable value back
@@ -1386,11 +1391,10 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	 * and we need to queue an event that will satisfy the divisor/remainder
 	 * equation.
 	 */
-	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
+	vbl.request.type =
+		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
 	if (flip == 0)
 		vbl.request.type |= DRM_VBLANK_NEXTONMISS;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
 
 	vbl.request.sequence = current_msc - (current_msc % divisor) +
 		remainder;
@@ -1463,9 +1467,7 @@ I830DRI2GetMSC(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
 		return TRUE;
 	}
 
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 
 	ret = drmWaitVBlank(intel->drmSubFD, &vbl);
@@ -1531,9 +1533,7 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	}
 
 	/* Get current count */
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 	ret = drmWaitVBlank(intel->drmSubFD, &vbl);
 	if (ret) {
@@ -1564,9 +1564,8 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 		 */
 		if (current_msc >= target_msc)
 			target_msc = current_msc;
-		vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
+		vbl.request.type =
+			DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
 		vbl.request.sequence = target_msc;
 		vbl.request.signal = (unsigned long)wait_info;
 		ret = drmWaitVBlank(intel->drmSubFD, &vbl);
@@ -1591,9 +1590,8 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	 * If we get here, target_msc has already passed or we don't have one,
 	 * so we queue an event that will satisfy the divisor/remainder equation.
 	 */
-	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type =
+		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
 
 	vbl.request.sequence = current_msc - (current_msc % divisor) +
 	    remainder;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 5390b5a..46a43e9 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1032,6 +1032,21 @@ can_flip(struct sna * sna,
 	return TRUE;
 }
 
+inline static uint32_t pipe_select(int pipe)
+{
+	/* The third pipe was introduced with IvyBridge long after
+	 * multiple pipe support was added to the kernel, hence
+	 * we can safely ignore the capability check - if we have more
+	 * than two pipes, we can assume that they are fully supported.
+	 */
+	if (pipe > 1)
+		return pipe << DRM_VBLANK_HIGH_CRTC_SHIFT;
+	else if (pipe > 0)
+		return DRM_VBLANK_SECONDARY;
+	else
+		return 0;
+}
+
 static void sna_dri_vblank_handle(int fd,
 				  unsigned int frame, unsigned int tv_sec,
 				  unsigned int tv_usec,
@@ -1086,9 +1101,8 @@ static void sna_dri_vblank_handle(int fd,
 				VG_CLEAR(vbl);
 				vbl.request.type =
 					DRM_VBLANK_RELATIVE |
-					DRM_VBLANK_EVENT;
-				if (info->pipe > 0)
-					vbl.request.type |= DRM_VBLANK_SECONDARY;
+					DRM_VBLANK_EVENT |
+					pipe_select(info->pipe);
 				vbl.request.sequence = 1;
 				vbl.request.signal = (unsigned long)info;
 				if (!sna_wait_vblank(sna, &vbl))
@@ -1437,9 +1451,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		sna_dri_reference_buffer(back);
 
 		/* Get current count */
-		vbl.request.type = DRM_VBLANK_RELATIVE;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
+		vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 		vbl.request.sequence = 0;
 		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(info);
@@ -1452,9 +1464,8 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		vbl.request.type =
 			DRM_VBLANK_ABSOLUTE |
-			DRM_VBLANK_EVENT;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
+			DRM_VBLANK_EVENT |
+			pipe_select(pipe);
 
 		/*
 		 * If divisor is zero, or current_msc is smaller than target_msc
@@ -1610,9 +1621,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			vbl.request.type =
 				DRM_VBLANK_RELATIVE |
 				DRM_VBLANK_NEXTONMISS |
-				DRM_VBLANK_EVENT;
-			if (pipe > 0)
-				vbl.request.type |= DRM_VBLANK_SECONDARY;
+				DRM_VBLANK_EVENT |
+				pipe_select(pipe);
 			vbl.request.sequence = 0;
 			vbl.request.signal = (unsigned long)info;
 			if (sna_wait_vblank(sna, &vbl) == 0)
@@ -1625,9 +1635,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	}
 
 	/* Get current count */
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 	if (sna_wait_vblank(sna, &vbl))
 		goto blit_fallback;
@@ -1651,9 +1659,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 		 vbl.request.type =
 			 DRM_VBLANK_ABSOLUTE |
-			 DRM_VBLANK_EVENT;
-		 if (pipe > 0)
-			 vbl.request.type |= DRM_VBLANK_SECONDARY;
+			 DRM_VBLANK_EVENT |
+			 pipe_select(pipe);
 		 vbl.request.sequence = *target_msc;
 		 vbl.request.signal = (unsigned long)info;
 		 if (sna_wait_vblank(sna, &vbl))
@@ -1674,9 +1681,10 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		     (int)divisor));
 
 	vbl.request.type =
-		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | DRM_VBLANK_NEXTONMISS;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+		DRM_VBLANK_ABSOLUTE |
+		DRM_VBLANK_EVENT |
+		DRM_VBLANK_NEXTONMISS |
+		pipe_select(pipe);
 
 	vbl.request.sequence = current_msc - current_msc % divisor + remainder;
 	/*
@@ -1872,9 +1880,7 @@ sna_dri_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
 
 	VG_CLEAR(vbl);
 
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 
 	if (sna_wait_vblank(sna, &vbl)) {
@@ -1924,9 +1930,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	VG_CLEAR(vbl);
 
 	/* Get current count */
-	vbl.request.type = DRM_VBLANK_RELATIVE;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type = DRM_VBLANK_RELATIVE | pipe_select(pipe);
 	vbl.request.sequence = 0;
 	if (sna_wait_vblank(sna, &vbl))
 		goto out_complete;
@@ -1964,9 +1968,10 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	 * client.
 	 */
 	if (divisor == 0 || current_msc < target_msc) {
-		vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
+		vbl.request.type =
+			DRM_VBLANK_ABSOLUTE |
+			DRM_VBLANK_EVENT |
+			pipe_select(pipe);
 		vbl.request.sequence = target_msc;
 		vbl.request.signal = (unsigned long)info;
 		if (sna_wait_vblank(sna, &vbl))
@@ -1981,9 +1986,8 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	 * If we get here, target_msc has already passed or we don't have one,
 	 * so we queue an event that will satisfy the divisor/remainder equation.
 	 */
-	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-	if (pipe > 0)
-		vbl.request.type |= DRM_VBLANK_SECONDARY;
+	vbl.request.type =
+		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | pipe_select(pipe);
 
 	vbl.request.sequence = current_msc - current_msc % divisor + remainder;
 
@@ -2024,7 +2028,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	if (sna->kgem.wedged) {
+	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable DRI2 whilst the GPU is wedged\n");
 		return FALSE;
commit f8b67be8d3b5d5cbe730ba5dba3edd2d30a99d9f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 12:39:19 2012 +0100

    sna: Don't clear the needs_flush flag after emitting a flush on the busy bo
    
    We use that flag to check whether we need to check whether the bo is
    still busy upon destruction, so only clear it if the bo is marked as
    idle.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0ab8033..408ad03 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -280,7 +280,6 @@ static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
 	 * not actualy care.
 	 */
 	(void)__kgem_flush(kgem, bo);
-	bo->needs_flush = false;
 }
 
 static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
commit 5419bbb483af595d7021e49debef7700c599217e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 11:21:52 2012 +0100

    sna/gen7: Prefer BLT for copies
    
    It's faster for where the cost of the extra batches and ring switching
    do not dominate...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 915e928..c474767 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2412,14 +2412,14 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	return TRUE;
 }
 
-static bool prefer_blt_ring(struct sna *sna)
+inline static bool can_switch_rings(struct sna *sna)
 {
-	return sna->kgem.ring != KGEM_RENDER;
+	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
-static bool can_switch_rings(struct sna *sna)
+inline static bool prefer_blt_ring(struct sna *sna)
 {
-	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
+	return sna->kgem.ring != KGEM_RENDER || can_switch_rings(sna);
 }
 
 static Bool
@@ -3308,7 +3308,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
 				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
-	return (sna->kgem.ring == KGEM_BLT ||
+	return (prefer_blt_ring(sna) ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
@@ -3715,9 +3715,7 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
-	return (can_switch_rings(sna) ||
-		prefer_blt_ring(sna) ||
-		untiled_tlb_miss(bo));
+	return prefer_blt_ring(sna) || untiled_tlb_miss(bo);
 }
 
 static Bool
commit 1c0bb8c4c93a43e2932429a9f6c23d91f1be2060
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 10:57:40 2012 +0100

    sna/gen7: Keep using RENDER paths for large pixmaps
    
    As the 3D pipeline is quite versatile and we only need to force BLT if
    we cannot extract the subregion.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index bea358e..915e928 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2438,21 +2438,6 @@ try_blt(struct sna *sna,
 		return TRUE;
 	}
 
-	if (too_large(dst->pDrawable->width, dst->pDrawable->height)) {
-		DBG(("%s: dst too large for 3D pipe (%d, %d)\n",
-		     __FUNCTION__,
-		     dst->pDrawable->width, dst->pDrawable->height));
-		return TRUE;
-	}
-
-	if (src->pDrawable &&
-	    too_large(src->pDrawable->width, src->pDrawable->height)) {
-		DBG(("%s: src too large for 3D pipe (%d, %d)\n",
-		     __FUNCTION__,
-		     src->pDrawable->width, src->pDrawable->height));
-		return TRUE;
-	}
-
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return TRUE;
commit b238f64e8a53883cfe7c568e37ef18bbee77996e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 20 10:46:59 2012 +0100

    sna/gen[67]: Prefer to not force BLT paths for large pixmaps
    
    The sampler can in fact handler subregions of large pixmaps quite well,
    and so we prefer to keep using the 3D pipeline so long as the operation
    fits in. If not, then switch to the BLT in order to avoid the temporary
    surface dance.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index b927d08..563e04c 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3237,9 +3237,7 @@ static bool prefer_blt_bo(struct sna *sna,
 			  PixmapPtr pixmap,
 			  struct kgem_bo *bo)
 {
-	return (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
-		untiled_tlb_miss(bo)) &&
-		kgem_bo_can_blt(&sna->kgem, bo);
+	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
 }
 
 static inline bool prefer_blt_copy(struct sna *sna,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 3c07d8d..bea358e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3316,9 +3316,7 @@ static bool prefer_blt_bo(struct sna *sna,
 			  PixmapPtr pixmap,
 			  struct kgem_bo *bo)
 {
-	return (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
-		untiled_tlb_miss(bo)) &&
-		kgem_bo_can_blt(&sna->kgem, bo);
+	return untiled_tlb_miss(bo) && kgem_bo_can_blt(&sna->kgem, bo);
 }
 
 static inline bool prefer_blt_copy(struct sna *sna,
commit 38f06a351fdd35fa5c6136e9a25e034a812a94c2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 22:22:12 2012 +0100

    uxa: Fix second regression in glyph fallback from 64a4bc
    
    To complete my show of incompetence for the evening, not only do we have
    to restore the original source when compositing the mask onto the
    destination, we also need to restore the original dst (rather than
    composite the mask onto the mask!).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 0ae0568..6bdf101 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -451,7 +451,7 @@ uxa_check_glyphs(CARD8 op,
 {
 	pixman_image_t *image;
 	PixmapPtr scratch;
-	PicturePtr mask, mask_src = NULL, white = NULL;
+	PicturePtr mask, mask_src = NULL, mask_dst = NULL, white = NULL;
 	int width = 0, height = 0;
 	int x, y, n;
 	int xDst = list->xOff, yDst = list->yOff;
@@ -511,6 +511,9 @@ uxa_check_glyphs(CARD8 op,
 
 		mask_src = src;
 		src = white;
+
+		mask_dst = dst;
+		dst = mask;
 	} else {
 		mask = dst;
 		x = 0;
@@ -547,7 +550,7 @@ uxa_check_glyphs(CARD8 op,
 	if (maskFormat) {
 		x = extents.x1;
 		y = extents.y1;
-		CompositePicture(mask_op, mask_src, mask, dst,
+		CompositePicture(mask_op, mask_src, mask, mask_dst,
 				 xSrc + x - xDst,
 				 ySrc + y - yDst,
 				 0, 0,
commit fda9faee755cb35906ca1179a568332ef2de35a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 21:01:47 2012 +0100

    uxa: Use the original src for fallback glyph compositing
    
    In 64a4bcb8cefff, I introduced a WHITE source for the purposes of
    accumulating the glyph mask correctly. Unfortunately I neglected to
    restore the original source picture for compositing the glyph mask on
    the destination, resulting in a use-after-free and then corruption.
    
    Reported-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index e83464e..0ae0568 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -451,12 +451,12 @@ uxa_check_glyphs(CARD8 op,
 {
 	pixman_image_t *image;
 	PixmapPtr scratch;
-	PicturePtr mask, white = NULL;
+	PicturePtr mask, mask_src = NULL, white = NULL;
 	int width = 0, height = 0;
 	int x, y, n;
 	int xDst = list->xOff, yDst = list->yOff;
 	BoxRec extents = { 0, 0, 0, 0 };
-	CARD8 mask_op;
+	CARD8 mask_op = 0;
 
 	if (maskFormat) {
 		pixman_format_code_t format;
@@ -504,10 +504,13 @@ uxa_check_glyphs(CARD8 op,
 		y = -extents.y1;
 
 		color.red = color.green = color.blue = color.alpha = 0xffff;
-		src = white = CreateSolidPicture(0, &color, &error);
+		white = CreateSolidPicture(0, &color, &error);
 
 		mask_op = op;
 		op = PictOpAdd;
+
+		mask_src = src;
+		src = white;
 	} else {
 		mask = dst;
 		x = 0;
@@ -544,7 +547,7 @@ uxa_check_glyphs(CARD8 op,
 	if (maskFormat) {
 		x = extents.x1;
 		y = extents.y1;
-		CompositePicture(mask_op, src, mask, dst,
+		CompositePicture(mask_op, mask_src, mask, dst,
 				 xSrc + x - xDst,
 				 ySrc + y - yDst,
 				 0, 0,
commit 8141e290b1ac7c4d1524bb389a84a8f375df4634
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 20:24:33 2012 +0100

    sna: Explain why we ignore the busy status result during kgem_bo_flush()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 40730c7..0ab8033 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -275,8 +275,11 @@ static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
 	if (!bo->needs_flush)
 		return;
 
-	__kgem_flush(kgem, bo);
-
+	/* If the kernel fails to emit the flush, then it will be forced when
+	 * we assume direct access. And as the useual failure is EIO, we do
+	 * not actualy care.
+	 */
+	(void)__kgem_flush(kgem, bo);
 	bo->needs_flush = false;
 }
 
commit eb1d07624e5aeb7b5db7ceb46975091e92185d63
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 16:00:13 2012 +0100

    sna: Ensure extents is initialised if short-circuit use-cpu-bo
    
    As we may attempt to end up using the GPU bo is the CPU bo is busy, we
    need to make sure we have initialised the damage extents first.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 60e1a80..5b0b33d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2288,6 +2288,7 @@ move_to_gpu:
 	if (!sna_pixmap_move_area_to_gpu(pixmap, &extents,
 					 MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: failed to move-to-gpu, fallback\n", __FUNCTION__));
+		assert(priv->gpu_bo == NULL);
 		goto use_cpu_bo;
 	}
 
@@ -2330,8 +2331,16 @@ use_cpu_bo:
 			return NULL;
 
 		/* Both CPU and GPU are busy, prefer to use the GPU */
-		if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)) {
+			get_drawable_deltas(drawable, pixmap, &dx, &dy);
+
+			extents = *box;
+			extents.x1 += dx;
+			extents.x2 += dx;
+			extents.y1 += dy;
+			extents.y2 += dy;
 			goto move_to_gpu;
+		}
 
 		priv->mapped = false;
 		pixmap->devPrivate.ptr = NULL;
commit 9f216e159bd05ddd269eb8ddf3ca7a407c2901e2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 15:57:31 2012 +0100

    sna: Assert expected return values
    
    Keep the semantic analyser happy by consuming the expected return value
    with an assert.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 13f91c2..60e1a80 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5200,15 +5200,15 @@ sna_fill_spans__gpu(DrawablePtr drawable, GCPtr gc, int n,
 		}
 
 		if (gc->fillStyle == FillTiled) {
-			sna_poly_fill_rect_tiled_blt(drawable,
-						     data->bo, NULL,
-						     gc, n, rect,
-						     &data->region.extents, 2);
+			(void)sna_poly_fill_rect_tiled_blt(drawable,
+							   data->bo, NULL,
+							   gc, n, rect,
+							   &data->region.extents, 2);
 		} else {
-			sna_poly_fill_rect_stippled_blt(drawable,
-							data->bo, NULL,
-							gc, n, rect,
-							&data->region.extents, 2);
+			(void)sna_poly_fill_rect_stippled_blt(drawable,
+							      data->bo, NULL,
+							      gc, n, rect,
+							      &data->region.extents, 2);
 		}
 		free (rect);
 	}
@@ -6212,8 +6212,12 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 				}
 				b->x2++;
 				b->y2++;
-				if (oc1 | oc2)
-					box_intersect(b, extents);
+				if (oc1 | oc2) {
+					bool intersects;
+
+					intersects = box_intersect(b, extents);
+					assert(intersects);
+				}
 				if (++b == last_box) {
 					ret = &&rectangle_continue;
 					goto *jump;
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 4a9dbff..7690afe 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1205,16 +1205,18 @@ blt_put_composite(struct sna *sna,
 			    data, pitch);
 	} else {
 		BoxRec box;
+		bool ok;
 
 		box.x1 = dst_x;
 		box.y1 = dst_y;
 		box.x2 = dst_x + r->width;
 		box.y2 = dst_y + r->height;
 
-		sna_write_boxes(sna, dst,
-				dst_priv->gpu_bo, 0, 0,
-				data, pitch, src_x, src_y,
-				&box, 1);
+		ok = sna_write_boxes(sna, dst,
+				     dst_priv->gpu_bo, 0, 0,
+				     data, pitch, src_x, src_y,
+				     &box, 1);
+		assert(ok);
 	}
 }
 
@@ -1242,12 +1244,15 @@ fastcall static void blt_put_composite_box(struct sna *sna,
 		sna_replace(sna, op->dst.pixmap, &dst_priv->gpu_bo,
 			    data, pitch);
 	} else {
-		sna_write_boxes(sna, op->dst.pixmap,
-				op->dst.bo, op->dst.x, op->dst.y,
-				src->devPrivate.ptr,
-				src->devKind,
-				op->u.blt.sx, op->u.blt.sy,
-				box, 1);
+		bool ok;
+
+		ok = sna_write_boxes(sna, op->dst.pixmap,
+				     op->dst.bo, op->dst.x, op->dst.y,
+				     src->devPrivate.ptr,
+				     src->devKind,
+				     op->u.blt.sx, op->u.blt.sy,
+				     box, 1);
+		assert(ok);
 	}
 }
 
@@ -1276,12 +1281,15 @@ static void blt_put_composite_boxes(struct sna *sna,
 		sna_replace(sna, op->dst.pixmap, &dst_priv->gpu_bo,
 			    data, pitch);
 	} else {
-		sna_write_boxes(sna, op->dst.pixmap,
-				op->dst.bo, op->dst.x, op->dst.y,
-				src->devPrivate.ptr,
-				src->devKind,
-				op->u.blt.sx, op->u.blt.sy,
-				box, n);
+		bool ok;
+
+		ok = sna_write_boxes(sna, op->dst.pixmap,
+				     op->dst.bo, op->dst.x, op->dst.y,
+				     src->devPrivate.ptr,
+				     src->devKind,
+				     op->u.blt.sx, op->u.blt.sy,
+				     box, n);
+		assert(ok);
 	}
 }
 
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 85453d3..c14af3c 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -845,17 +845,19 @@ sna_composite_rectangles(CARD8		 op,
 			       pixmap->drawable.width, pixmap->drawable.height);
 		priv->undamaged = false;
 		if (op <= PictOpSrc) {
-			priv->clear = true;
+			bool ok = true;
+
 			priv->clear_color = 0;
 			if (op == PictOpSrc)
-				sna_get_pixel_from_rgba(&priv->clear_color,
-							color->red,
-							color->green,
-							color->blue,
-							color->alpha,
-							dst->format);
-			DBG(("%s: marking clear [%08x]\n",
-			     __FUNCTION__, priv->clear_color));
+				ok = sna_get_pixel_from_rgba(&priv->clear_color,
+							     color->red,
+							     color->green,
+							     color->blue,
+							     color->alpha,
+							     dst->format);
+			priv->clear = ok;
+			DBG(("%s: marking clear [%08x]? %d\n",
+			     __FUNCTION__, priv->clear_color, ok));
 		}
 	}
 	if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 199fdd5..49f7c5e 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1886,12 +1886,15 @@ sna_render_composite_redirect_done(struct sna *sna,
 		assert(op->dst.bo != t->real_bo);
 
 		if (t->box.x2 > t->box.x1) {
+			bool ok;
+
 			DBG(("%s: copying temporary to dst\n", __FUNCTION__));
-			sna_blt_copy_boxes(sna, GXcopy,
-					   op->dst.bo, -t->box.x1, -t->box.y1,
-					   t->real_bo, 0, 0,
-					   op->dst.pixmap->drawable.bitsPerPixel,
-					   &t->box, 1);
+			ok = sna_blt_copy_boxes(sna, GXcopy,
+						op->dst.bo, -t->box.x1, -t->box.y1,
+						t->real_bo, 0, 0,
+						op->dst.pixmap->drawable.bitsPerPixel,
+						&t->box, 1);
+			assert(ok);
 		}
 		if (t->damage) {
 			DBG(("%s: combining damage, offset=(%d, %d)\n",
commit 2dc93b2a6c832ce8b972de90d09080e860dcd40f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 15:34:09 2012 +0100

    sna: Check results from syscalls
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 3a9fe6f..f1bb10d 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -753,7 +753,10 @@ I830HandleUEvents(int fd, void *closure)
 		return;
 
 	udev_devnum = udev_device_get_devnum(dev);
-	fstat(intel->drmSubFD, &s);
+	if (fstat(intel->drmSubFD, &s)) {
+		udev_device_unref(dev);
+		return;
+	}
 	/*
 	 * Check to make sure this event is directed at our
 	 * device (by comparing dev_t values), then make
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 016ff48..bb1b77d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -567,7 +567,8 @@ static int gem_param(struct kgem *kgem, int name)
 	VG_CLEAR(gp);
 	gp.param = name;
 	gp.value = &v;
-	drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return -1;
 
 	VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
 	return v;
@@ -2129,7 +2130,9 @@ void kgem_cleanup_cache(struct kgem *kgem)
 		set_domain.handle = rq->bo->handle;
 		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
 		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+		(void)drmIoctl(kgem->fd,
+			       DRM_IOCTL_I915_GEM_SET_DOMAIN,
+			       &set_domain);
 	}
 
 	kgem_retire(kgem);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 76ae24e..a02ff76 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -353,7 +353,7 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 		/* make the fd nonblocking to handle event loops */
 		flags = fcntl(fd, F_GETFL, 0);
 		if (flags != -1)
-			fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+			(void)fcntl(fd, F_SETFL, flags | O_NONBLOCK);
 
 		dev->fd = fd;
 		dev->open_count = 1;
@@ -392,10 +392,14 @@ static bool has_pageflipping(struct sna *sna)
 	if (sna->flags & SNA_NO_WAIT)
 		return false;
 
+	v = 0;
+
 	VG_CLEAR(gp);
 	gp.param = I915_PARAM_HAS_PAGEFLIPPING;
 	gp.value = &v;
-	drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return false;
 
 	VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
 	return v > 0;
@@ -625,7 +629,11 @@ sna_handle_uevents(int fd, void *closure)
 		return;
 
 	udev_devnum = udev_device_get_devnum(dev);
-	fstat(sna->kgem.fd, &s);
+	if (fstat(sna->kgem.fd, &s)) {
+		udev_device_unref(dev);
+		return;
+	}
+
 	/*
 	 * Check to make sure this event is directed at our
 	 * device (by comparing dev_t values), then make
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 612711f..d47f745 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -148,7 +148,10 @@ static void sna_video_overlay_off(struct sna *sna)
 
 	request.flags = 0;
 
-	drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, &request);
+	/* Not much we can do if the hardware dies before we turn it off! */
+	(void)drmIoctl(sna->kgem.fd,
+		       DRM_IOCTL_I915_OVERLAY_PUT_IMAGE,
+		       &request);
 }
 
 static void sna_video_overlay_stop(ScrnInfoPtr scrn,
commit 06634604abf15fdd27dd007fcf81595da994146b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 15:28:43 2012 +0100

    Initialise adaptors to 0 in case xf86XVListGenericAdaptors does not
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_video.c b/src/intel_video.c
index 235845f..0e9845d 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -329,11 +329,9 @@ void I830InitVideo(ScreenPtr screen)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
-	XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL;
+	XF86VideoAdaptorPtr *adaptors = NULL, *newAdaptors = NULL;
 	XF86VideoAdaptorPtr overlayAdaptor = NULL, texturedAdaptor = NULL;
-	int num_adaptors;
-
-	num_adaptors = xf86XVListGenericAdaptors(scrn, &adaptors);
+	int num_adaptors = xf86XVListGenericAdaptors(scrn, &adaptors);
 	/* Give our adaptor list enough space for the overlay and/or texture video
 	 * adaptors.
 	 */
diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 440f9f7..56d04a4 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -155,10 +155,8 @@ static Atom xvBrightness, xvContrast, xvColorKey;
 void I810InitVideo(ScreenPtr screen)
 {
     ScrnInfoPtr pScrn = xf86ScreenToScrn(screen);
-    XF86VideoAdaptorPtr *adaptors;
-    int num_adaptors;
-
-    num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors);
+    XF86VideoAdaptorPtr *adaptors = NULL;
+    int num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors);
 
     if (pScrn->bitsPerPixel != 8) {
 	XF86VideoAdaptorPtr newAdaptor;
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 6999548..08b848b 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -538,6 +538,7 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 	if (!xf86LoaderCheckSymbol("xf86XVListGenericAdaptors"))
 		return;
 
+	adaptors = NULL;
 	num_adaptors = xf86XVListGenericAdaptors(sna->scrn, &adaptors);
 	newAdaptors = realloc(adaptors,
 			      (num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr));
commit 8bfea58dbc634cadc399d3132030c591e086880c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 15:26:18 2012 +0100

    sna: Minor cleanups from sematic analyser in DBG
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index 5d6d175..1634225 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -205,22 +205,11 @@ static void gen3_update_vertex_elements_offsets(struct kgem *kgem)
 	int i, offset;
 
 	for (i = offset = 0; i < ARRAY_SIZE(state.ve); i++) {
-		int size;
-
 		if (!state.ve[i].valid)
 			continue;
 
-		size = 0;
-		switch (state.ve[i].type) {
-		case T_FLOAT16:
-			size = 4;
-			break;
-		case T_FLOAT32:
-			size = 4;
-			break;
-		}
 		state.ve[i].offset = offset;
-		offset += size * state.ve[i].size;
+		offset += 4 * state.ve[i].size;
 		state.num_ve = i;
 	}
 }
@@ -969,7 +958,7 @@ gen3_decode_load_state_immediate_1(struct kgem *kgem, uint32_t offset)
 					}
 					kgem_debug_print(data, offset, i, "S4: point_width=%i, line_width=%.1f,"
 						  "%s%s%s%s%s cullmode=%s, vfmt=%s%s%s%s%s%s%s%s "
-						  "%s%s\n",
+						  "%s%s%s\n",
 						  (data[i]>>23)&0x1ff,
 						  ((data[i]>>19)&0xf) / 2.0,
 						  data[i]&(0xf<<15)?" flatshade=":"",
@@ -1342,7 +1331,6 @@ gen3_decode_3d_1d(struct kgem *kgem, uint32_t offset)
 					  dword&(1<<5)?" normalized coords,":"",
 					  (dword>>1)&0xf,
 					  dword&(1<<0)?" deinterlacer,":"");
-				dword = data[i];
 				kgem_debug_print(data, offset, i++, "sampler %d SS4: border color\n",
 					  sampler);
 			}
commit 0a43d425670b883b04565296c0510e7ba03ba6de
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 14:46:58 2012 +0100

    uxa: Implement glyphs-to-dst to avoid fallbacks
    
    An earlier version was buggy and introduced corruption as it failed to
    fallback gracefully with ComponentAlpha glpyhs. This is a much simpler
    implementation that composites each glyph individually, leaving it to the
    backend to optimise away state changes. It should still be many times
    faster than incurring the fallback...
    
    Reported-by: Oleksandr Natalenko <pfactum at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50508
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 0893779..e83464e 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -881,6 +881,65 @@ next_glyph:
 	return 0;
 }
 
+static int
+uxa_glyphs_to_dst(CARD8 op,
+		  PicturePtr pSrc,
+		  PicturePtr pDst,
+		  INT16 xSrc, INT16 ySrc,
+		  int nlist, GlyphListPtr list, GlyphPtr * glyphs)
+{
+	ScreenPtr screen = pDst->pDrawable->pScreen;
+	int x, y, n;
+
+	xSrc -= list->xOff;
+	ySrc -= list->yOff;
+	x = y = 0;
+	while (nlist--) {
+		x += list->xOff;
+		y += list->yOff;
+		n = list->len;
+		while (n--) {
+			GlyphPtr glyph = *glyphs++;
+			PicturePtr glyph_atlas;
+			int glyph_x, glyph_y;
+			struct uxa_glyph *priv;
+
+			if (glyph->info.width == 0 || glyph->info.height == 0)
+				goto next_glyph;
+
+			priv = uxa_glyph_get_private(glyph);
+			if (priv != NULL) {
+				glyph_x = priv->x;
+				glyph_y = priv->y;
+				glyph_atlas = priv->cache->picture;
+			} else {
+				glyph_atlas = uxa_glyph_cache(screen, glyph, &glyph_x, &glyph_y);
+				if (glyph_atlas == NULL) {
+					/* no cache for this glyph */
+					glyph_atlas = GetGlyphPicture(glyph, screen);
+					glyph_x = glyph_y = 0;
+				}
+			}
+
+			uxa_composite(op,
+				      pSrc, glyph_atlas, pDst,
+				      xSrc + x - glyph->info.x,
+				      ySrc + y - glyph->info.y,
+				      glyph_x, glyph_y,
+				      x - glyph->info.x,
+				      y - glyph->info.y,
+				      glyph->info.width, glyph->info.height);
+
+next_glyph:
+			x += glyph->info.xOff;
+			y += glyph->info.yOff;
+		}
+		list++;
+	}
+
+	return 0;
+}
+
 static Bool
 is_solid(PicturePtr picture)
 {
@@ -966,12 +1025,16 @@ fallback:
 		}
 	}
 
-	if (!maskFormat)
-		goto fallback;
-
-	if (uxa_glyphs_via_mask(op,
-				pSrc, pDst, maskFormat,
-				xSrc, ySrc,
-				nlist, list, glyphs))
-		goto fallback;
+	if (!maskFormat) {
+		if (uxa_glyphs_to_dst(op, pSrc, pDst,
+				      xSrc, ySrc,
+				      nlist, list, glyphs))
+			goto fallback;
+	} else {
+		if (uxa_glyphs_via_mask(op,
+					pSrc, pDst, maskFormat,
+					xSrc, ySrc,
+					nlist, list, glyphs))
+			goto fallback;
+	}
 }
commit 64a4bcb8ceffff8e4ee448d8a467620596703509
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 13:42:12 2012 +0100

    uxa: Use (white IN glyph) ADD mask to compose the glyph mask
    
    As pointed out by Soren Sandmann and Behdad Esfahbod, it is essential to
    use white IN glyph when adding to the mask so that the channel expansion
    is correctly performed when adding to an incompatible mask format.
    
    For example, loading alpha as the source results in the value 000a being
    added to the rgba glyph mask (for mixed subpixel rendering with
    grayscale glyphs), whereas the desired value is aaaa.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 7db094b..0893779 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -451,15 +451,17 @@ uxa_check_glyphs(CARD8 op,
 {
 	pixman_image_t *image;
 	PixmapPtr scratch;
-	PicturePtr mask;
+	PicturePtr mask, white = NULL;
 	int width = 0, height = 0;
 	int x, y, n;
 	int xDst = list->xOff, yDst = list->yOff;
 	BoxRec extents = { 0, 0, 0, 0 };
+	CARD8 mask_op;
 
 	if (maskFormat) {
 		pixman_format_code_t format;
 		CARD32 component_alpha;
+		xRenderColor color;
 		int error;
 
 		uxa_glyph_extents(nlist, list, glyphs, &extents);
@@ -500,6 +502,12 @@ uxa_check_glyphs(CARD8 op,
 
 		x = -extents.x1;
 		y = -extents.y1;
+
+		color.red = color.green = color.blue = color.alpha = 0xffff;
+		src = white = CreateSolidPicture(0, &color, &error);
+
+		mask_op = op;
+		op = PictOpAdd;
 	} else {
 		mask = dst;
 		x = 0;
@@ -514,24 +522,14 @@ uxa_check_glyphs(CARD8 op,
 			GlyphPtr glyph = *glyphs++;
 			PicturePtr g = GetGlyphPicture(glyph, dst->pDrawable->pScreen);
 			if (g) {
-				if (maskFormat) {
-					CompositePicture(PictOpAdd, g, NULL, mask,
-							 0, 0,
-							 0, 0,
-							 x - glyph->info.x,
-							 y - glyph->info.y,
-							 glyph->info.width,
-							 glyph->info.height);
-				} else {
-					CompositePicture(op, src, g, dst,
-							 xSrc + (x - glyph->info.x) - xDst,
-							 ySrc + (y - glyph->info.y) - yDst,
-							 0, 0,
-							 x - glyph->info.x,
-							 y - glyph->info.y,
-							 glyph->info.width,
-							 glyph->info.height);
-				}
+				CompositePicture(op, src, g, dst,
+						 xSrc + (x - glyph->info.x) - xDst,
+						 ySrc + (y - glyph->info.y) - yDst,
+						 0, 0,
+						 x - glyph->info.x,
+						 y - glyph->info.y,
+						 glyph->info.width,
+						 glyph->info.height);
 			}
 
 			x += glyph->info.xOff;
@@ -540,10 +538,13 @@ uxa_check_glyphs(CARD8 op,
 		list++;
 	}
 
+	if (white)
+		FreePicture(white, 0);
+
 	if (maskFormat) {
 		x = extents.x1;
 		y = extents.y1;
-		CompositePicture(op, src, mask, dst,
+		CompositePicture(mask_op, src, mask, dst,
 				 xSrc + x - xDst,
 				 ySrc + y - yDst,
 				 0, 0,
@@ -703,6 +704,23 @@ fallback:
 	}
 }
 
+static PicturePtr
+create_white_solid(ScreenPtr screen)
+{
+	PicturePtr white, ret = NULL;
+	xRenderColor color;
+	int error;
+
+	color.red = color.green = color.blue = color.alpha = 0xffff;
+	white = CreateSolidPicture(0, &color, &error);
+	if (white) {
+		ret = uxa_acquire_solid(screen, white->pSourcePict);
+		FreePicture(white, 0);
+	}
+
+	return ret;
+}
+
 static int
 uxa_glyphs_via_mask(CARD8 op,
 		    PicturePtr pSrc,
@@ -714,8 +732,8 @@ uxa_glyphs_via_mask(CARD8 op,
 	ScreenPtr screen = pDst->pDrawable->pScreen;
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 	CARD32 component_alpha;
-	PixmapPtr pixmap;
-	PicturePtr glyph_atlas, mask;
+	PixmapPtr pixmap, white_pixmap;
+	PicturePtr glyph_atlas, mask, white;
 	int xDst = list->xOff, yDst = list->yOff;
 	int x, y, width, height;
 	int dst_off_x, dst_off_y;
@@ -755,6 +773,17 @@ uxa_glyphs_via_mask(CARD8 op,
 		return -1;
 	}
 
+	white_pixmap = NULL;
+	white = create_white_solid(screen);
+	if (white)
+		white_pixmap = uxa_get_drawable_pixmap(white->pDrawable);
+	if (!white_pixmap) {
+		if (white)
+			FreePicture(white, 0);
+		screen->DestroyPixmap(pixmap);
+		return -1;
+	}
+
 	uxa_clear_pixmap(screen, uxa_screen, pixmap);
 
 	component_alpha = NeedsComponent(maskFormat->format);
@@ -763,8 +792,10 @@ uxa_glyphs_via_mask(CARD8 op,
 			      &component_alpha, serverClient, &error);
 	screen->DestroyPixmap(pixmap);
 
-	if (!mask)
+	if (!mask) {
+		FreePicture(white, 0);
 		return 1;
+	}
 
 	ValidatePicture(mask);
 
@@ -776,7 +807,7 @@ uxa_glyphs_via_mask(CARD8 op,
 		while (n--) {
 			GlyphPtr glyph = *glyphs++;
 			PicturePtr this_atlas;
-			int src_x, src_y;
+			int glyph_x, glyph_y;
 			struct uxa_glyph *priv;
 
 			if (glyph->info.width == 0 || glyph->info.height == 0)
@@ -784,34 +815,35 @@ uxa_glyphs_via_mask(CARD8 op,
 
 			priv = uxa_glyph_get_private(glyph);
 			if (priv != NULL) {
-				src_x = priv->x;
-				src_y = priv->y;
+				glyph_x = priv->x;
+				glyph_y = priv->y;
 				this_atlas = priv->cache->picture;
 			} else {
 				if (glyph_atlas) {
 					uxa_screen->info->done_composite(pixmap);
 					glyph_atlas = NULL;
 				}
-				this_atlas = uxa_glyph_cache(screen, glyph, &src_x, &src_y);
+				this_atlas = uxa_glyph_cache(screen, glyph, &glyph_x, &glyph_y);
 				if (this_atlas == NULL) {
 					/* no cache for this glyph */
 					this_atlas = GetGlyphPicture(glyph, screen);
-					src_x = src_y = 0;
+					glyph_x = glyph_y = 0;
 				}
 			}
 
 			if (this_atlas != glyph_atlas) {
-				PixmapPtr src_pixmap;
+				PixmapPtr glyph_pixmap;
 
 				if (glyph_atlas)
 					uxa_screen->info->done_composite(pixmap);
 
-				src_pixmap =
+				glyph_pixmap =
 					uxa_get_drawable_pixmap(this_atlas->pDrawable);
-				if (!uxa_pixmap_is_offscreen(src_pixmap) ||
+				if (!uxa_pixmap_is_offscreen(glyph_pixmap) ||
 				    !uxa_screen->info->prepare_composite(PictOpAdd,
-									 this_atlas, NULL, mask,
-									 src_pixmap, NULL, pixmap)) {
+									 white, this_atlas, mask,
+									 white_pixmap, glyph_pixmap, pixmap)) {
+					FreePicture(white, 0);
 					FreePicture(mask, 0);
 					return -1;
 				}
@@ -820,8 +852,8 @@ uxa_glyphs_via_mask(CARD8 op,
 			}
 
 			uxa_screen->info->composite(pixmap,
-						    src_x, src_y,
 						    0, 0,
+						    glyph_x, glyph_y,
 						    x - glyph->info.x,
 						    y - glyph->info.y,
 						    glyph->info.width,
@@ -844,6 +876,7 @@ next_glyph:
 		      dst_off_x, dst_off_y,
 		      width, height);
 
+	FreePicture(white, 0);
 	FreePicture(mask, 0);
 	return 0;
 }
commit 99845dcb3ba862269b29aec782e2bcef31c0403e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 10:37:10 2012 +0100

    Post Damage on the Screen Pixmap after a pageflip
    
    This issue was raised by Dave Airlie as he is trying to integrate
    multiple GPUs into the xserver, and a particular setup has a slave
    rendering device that copies the contents from the GPU over a
    DisplayLink USB adaptor. As such the slave device is listening for
    Damage on the Screen Pixmap and needs the update following pageflips.
    Since we already are posting damage for all the SwapBuffers paths other
    than pageflip, for consistency we should post damage along the pageflip
    path as well.
    
    Reported-by: Dave Airlie <airlied at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 6bf76d0..88ab249 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -860,6 +860,21 @@ intel_exchange_pixmap_buffers(struct intel_screen_private *intel, PixmapPtr fron
 	new_back->busy = -1;
 
 	intel_glamor_exchange_buffers(intel, front, back);
+
+	/* Post damage on the new front buffer so that listeners, such
+	 * as DisplayLink know take a copy and shove it over the USB.
+	 */
+	{
+		RegionRec region;
+
+		region.extents.x1 = region.extents.y1 = 0;
+		region.extents.x2 = front->drawable.width;
+		region.extents.y2 = front->drawable.height;
+		region.data = NULL;
+		DamageRegionAppend(&front->drawable, &region);
+		DamageRegionProcessPending(&front->drawable);
+	}
+
 	return new_front;
 }
 
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index b9f9b85..5390b5a 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -384,6 +384,7 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	RegionRec region;
 
 	sna_damage_all(&priv->gpu_damage,
 		       pixmap->drawable.width,
@@ -393,6 +394,16 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 
 	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 	priv->gpu_bo = ref(bo);
+
+	/* Post damage on the new front buffer so that listeners, such
+	 * as DisplayLink know take a copy and shove it over the USB.
+	 */
+	region.extents.x1 = region.extents.y1 = 0;
+	region.extents.x2 = pixmap->drawable.width;
+	region.extents.y2 = pixmap->drawable.height;
+	region.data = NULL;
+	DamageRegionAppend(&pixmap->drawable, &region);
+	DamageRegionProcessPending(&pixmap->drawable);
 }
 
 static struct kgem_bo *
commit 4acf727941a37651f672db3774281a437940c540
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 10:20:54 2012 +0100

    sna: Initialize the color value for fallback unaligned boxes
    
    Reported-by:Zdenek Kabelac <zkabelac at redhat.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=5047
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index cb9fd7c..92be480 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -51,6 +51,7 @@
 #endif
 
 #define NO_ACCEL 0
+#define FORCE_FALLBACK 0
 #define NO_ALIGNED_BOXES 0
 #define NO_UNALIGNED_BOXES 0
 #define NO_SCAN_CONVERTER 0
@@ -3109,7 +3110,8 @@ static inline uint32_t mul_4x8_8(uint32_t color, uint8_t alpha)
 {
 	uint32_t v;
 
-	v = multa(color, alpha, 24);
+	v = 0;
+	v |= multa(color, alpha, 24);
 	v |= multa(color, alpha, 16);
 	v |= multa(color, alpha, 8);
 	v |= multa(color, alpha, 0);
@@ -3308,6 +3310,7 @@ pixman:
 		pi.source = pixman_image_create_bits(PIXMAN_a8r8g8b8, 1, 1, NULL, 0);
 		pixman_image_set_repeat(pi.source, PIXMAN_REPEAT_NORMAL);
 		pi.bits = pixman_image_get_data(pi.source);
+		pi.color = color;
 		pi.op = op;
 
 		count = REGION_NUM_RECTS(&clip);
@@ -5327,7 +5330,7 @@ sna_composite_trapezoids(CARD8 op,
 		goto fallback;
 	}
 
-	force_fallback = false;
+	force_fallback = FORCE_FALLBACK;
 	if ((too_small(priv) || DAMAGE_IS_ALL(priv->cpu_damage)) &&
 	    !picture_is_gpu(src)) {
 		DBG(("%s: force fallbacks -- dst is too small, %dx%d\n",
commit b0b2d3c9663c29e9844aef1608416ae40c194b55
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 00:41:35 2012 +0100

    sna: Avoid copying unintialised data during source picture upload
    
    If we have never written to a pixmap, then there will be neither a GPU
    or shadow pointer and we would attempt to copy a NULL pointer. In this
    case as the user is expecting to copy unintialised data we are at
    liberty to replace those undefined values with the clear color.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 4dc7062..199fdd5 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -452,7 +452,8 @@ static struct kgem_bo *upload(struct sna *sna,
 			priv->mapped = false;
 		}
 		if (pixmap->devPrivate.ptr == NULL) {
-			assert(priv->ptr);
+			if (priv->ptr == NULL) /* uninitialised */
+				return NULL;
 			assert(priv->stride);
 			pixmap->devPrivate.ptr = priv->ptr;
 			pixmap->devKind = priv->stride;
commit 38472fcc53c5dceb98b96458183e6729b8311a43
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 00:37:12 2012 +0100

    sna: Double check that the source is busy before performing indirect reads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 01b8d2c..8d04548 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -122,7 +122,7 @@ static bool download_inplace(struct kgem *kgem, struct kgem_bo *bo)
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
-	return !kgem_bo_is_busy(bo) || bo->tiling == I915_TILING_NONE;
+	return !__kgem_bo_is_busy(kgem, bo) || bo->tiling == I915_TILING_NONE;
 }
 
 void sna_read_boxes(struct sna *sna,
@@ -1158,21 +1158,23 @@ bool sna_replace(struct sna *sna,
 {
 	struct kgem_bo *bo = *_bo;
 	struct kgem *kgem = &sna->kgem;
+	bool busy;
 	void *dst;
 
-	DBG(("%s(handle=%d, %dx%d, bpp=%d, tiling=%d)\n",
+	busy = __kgem_bo_is_busy(kgem, bo);
+	DBG(("%s(handle=%d, %dx%d, bpp=%d, tiling=%d) busy?=%d\n",
 	     __FUNCTION__, bo->handle,
 	     pixmap->drawable.width,
 	     pixmap->drawable.height,
 	     pixmap->drawable.bitsPerPixel,
-	     bo->tiling));
+	     bo->tiling, busy));
 	assert(!bo->flush);
 
-	if ((!kgem_bo_can_map(kgem, bo) || kgem_bo_is_busy(bo)) &&
+	if ((busy || !kgem_bo_can_map(kgem, bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
 		return true;
 
-	if (kgem_bo_is_busy(bo)) {
+	if (busy) {
 		struct kgem_bo *new_bo;
 
 		new_bo = kgem_create_2d(kgem,
commit 8cdfb8c24c8b49c88451714d80293c66d63e8c01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 19 00:36:41 2012 +0100

    sna: Fix up the shadow pointer on the source when copying
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index da50942..13f91c2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4023,7 +4023,18 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 			assert_pixmap_damage(dst_pixmap);
 		} else {
-			assert(!src_priv->gpu_bo);
+			if (src_priv) {
+				/* Fixup the shadow pointer as neccessary */
+				assert(!src_priv->gpu_bo);
+				assert(!src_priv->mapped);
+				if (src_pixmap->devPrivate.ptr == NULL) {
+					if (!src_priv->ptr) /* uninitialised!*/
+						goto out;
+					assert(src_priv->stride);
+					src_pixmap->devPrivate.ptr = src_priv->ptr;
+					src_pixmap->devKind = src_priv->stride;
+				}
+			}
 
 			if (!dst_priv->pinned && replaces) {
 				stride = src_pixmap->devKind;
commit 17f3a83fdc8c0ef5c12fb4be34d86021c0c865e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 23:48:16 2012 +0100

    sna: Review sna_copy_boxes
    
    A couple of ordering issue and more assertions.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 73808f7..da50942 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3792,11 +3792,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst_priv == NULL)
 		goto fallback;
 
-	if (src_priv == NULL &&
-	    !copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) {
-		DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
-		     __FUNCTION__));
-		goto fallback;
+	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
+		DBG(("%s: discarding cached upload\n", __FUNCTION__));
+		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
+		dst_priv->gpu_bo = NULL;
 	}
 
 	if (replaces) {
@@ -3807,10 +3806,11 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		dst_priv->clear = false;
 	}
 
-	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
-		DBG(("%s: discarding cached upload\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
-		dst_priv->gpu_bo = NULL;
+	if (src_priv == NULL &&
+	    !copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) {
+		DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
+		     __FUNCTION__));
+		goto fallback;
 	}
 
 	/* Try to maintain the data on the GPU */
@@ -4023,13 +4023,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 			assert_pixmap_damage(dst_pixmap);
 		} else {
-			if (src_priv) {
-				RegionTranslate(&region, src_dx, src_dy);
-				if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
-								&region, MOVE_READ))
-					goto out;
-				RegionTranslate(&region, -src_dx, -src_dy);
-			}
+			assert(!src_priv->gpu_bo);
 
 			if (!dst_priv->pinned && replaces) {
 				stride = src_pixmap->devKind;
@@ -4054,6 +4048,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			} else {
 				DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
 				     __FUNCTION__));
+				assert(!DAMAGE_IS_ALL(dst_priv->cpu_damage));
 				if (!sna_write_boxes(sna, dst_pixmap,
 						     dst_priv->gpu_bo, dst_dx, dst_dy,
 						     src_pixmap->devPrivate.ptr,
@@ -4084,7 +4079,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		}
 
 		goto out;
-	} else if (dst_priv->cpu_bo &&
+	} else if (use_cpu_bo_for_write(sna, dst_priv) &&
 		   src_priv && DAMAGE_IS_ALL(src_priv->gpu_damage) && !src_priv->clear) {
 		assert(src_priv->gpu_bo != NULL); /* guaranteed by gpu_damage */
 		if (!sna->render.copy_boxes(sna, alu,
@@ -4188,7 +4183,7 @@ fallback:
 		dst_stride = dst_pixmap->devKind;
 		src_stride = src_pixmap->devKind;
 
-		if (alu == GXcopy && !reverse && !upsidedown && bpp >= 8) {
+		if (alu == GXcopy && bpp >= 8) {
 			dst_bits = (FbBits *)
 				((char *)dst_pixmap->devPrivate.ptr +
 				 dst_dy * dst_stride + dst_dx * bpp / 8);
commit a9045699b9cd66d0b0d96bfc964458c96845f97f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 23:47:21 2012 +0100

    sna: Reset region after transferring to cpu
    
    If we adjust the region for the pixmap offset, be sure that we reset it
    before returning it back to the caller.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2623e70..73808f7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1467,8 +1467,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			if (!kgem_bo_is_busy(priv->gpu_bo)) {
 				pixmap->devPrivate.ptr =
 					kgem_bo_map(&sna->kgem, priv->gpu_bo);
-				if (pixmap->devPrivate.ptr == NULL)
+				if (pixmap->devPrivate.ptr == NULL) {
+					if (dx | dy)
+						RegionTranslate(region, -dx, -dy);
 					return false;
+				}
 
 				priv->mapped = true;
 				pixmap->devKind = priv->gpu_bo->pitch;
@@ -1486,6 +1489,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 				priv->clear = false;
 				assert_pixmap_damage(pixmap);
+				if (dx | dy)
+					RegionTranslate(region, -dx, -dy);
 				return true;
 			}
 		}
@@ -1495,8 +1500,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				kgem_retire(&sna->kgem);
 			if (sync_will_stall(priv->cpu_bo)) {
 				sna_damage_subtract(&priv->cpu_damage, region);
-				if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE))
+				if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE)) {
+					if (dx | dy)
+						RegionTranslate(region, -dx, -dy);
 					return false;
+				}
 
 				sna_pixmap_free_cpu(sna, priv);
 			}
@@ -1508,8 +1516,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		    sna_pixmap_create_mappable_gpu(pixmap)) {
 			pixmap->devPrivate.ptr =
 				kgem_bo_map(&sna->kgem, priv->gpu_bo);
-			if (pixmap->devPrivate.ptr == NULL)
+			if (pixmap->devPrivate.ptr == NULL) {
+				if (dx | dy)
+					RegionTranslate(region, -dx, -dy);
 				return false;
+			}
 
 			priv->mapped = true;
 			pixmap->devKind = priv->gpu_bo->pitch;
@@ -1526,6 +1537,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 			assert_pixmap_damage(pixmap);
 			priv->clear = false;
+			if (dx | dy)
+				RegionTranslate(region, -dx, -dy);
 			return true;
 		}
 	}
@@ -1558,6 +1571,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 			assert_pixmap_damage(pixmap);
 			priv->clear = false;
+			if (dx | dy)
+				RegionTranslate(region, -dx, -dy);
 			return true;
 		}
 
@@ -1577,8 +1592,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 	if (pixmap->devPrivate.ptr == NULL &&
-	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL))
+	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL)) {
+		if (dx | dy)
+			RegionTranslate(region, -dx, -dy);
 		return false;
+	}
 
 	if (priv->gpu_bo == NULL) {
 		assert(priv->gpu_damage == NULL);
commit 9f51311a7d95bf4bc23926b8e6bf8ee52afd180c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 23:45:50 2012 +0100

    sna: Check if the busy is truly busy before commiting to an indirect upload
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index b747dc7..40730c7 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -486,6 +486,15 @@ static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 	return bo->rq;
 }
 
+static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
+{
+	DBG_HDR(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
+		 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	if (bo->rq && !bo->exec)
+		kgem_retire(kgem);
+	return kgem_bo_is_busy(bo);
+}
+
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
 {
 	if (bo == NULL)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3841e52..01b8d2c 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -42,7 +42,7 @@
 
 #define PITCH(x, y) ALIGN((x)*(y), 4)
 
-#define FORCE_INPLACE 0
+#define FORCE_INPLACE 0 /* 1 upload directly, -1 force indirect */
 
 /* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
 
@@ -545,7 +545,7 @@ static bool upload_inplace(struct kgem *kgem,
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (kgem_bo_is_busy(bo)) {
+	if (__kgem_bo_is_busy(kgem, bo)) {
 		unsigned int bytes = 0;
 		while (n--) {
 			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
@@ -763,9 +763,8 @@ tile:
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
-	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
-	    !kgem_check_batch(kgem, 8) ||
+	if (!kgem_check_batch(kgem, 8) ||
+	    !kgem_check_reloc_and_exec(kgem, 2) ||
 	    !kgem_check_bo_fenced(kgem, dst_bo)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
commit 291b3c4367b455bfc5a772f8caaeee73f36d826a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 23:43:00 2012 +0100

    sna: Align upload buffers to 128
    
    This seems to be a restriction (observed on 965gm at least) that we
    have incoherent sampler cache if we write within 128 bytes of a busy
    buffer. This is either due to a restriction on neighbouring cachelines
    (like the earlier BLT limitations) or an effect of sampler prefetch.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50477
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index aaddda4..016ff48 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -71,6 +71,13 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define DBG(x) ErrorF x
 #endif
 
+/* Worst case seems to be 965gm where we cannot write within a cacheline that
+ * is being simultaneously being read by the GPU, or within the sampler
+ * prefetch. In general, the chipsets seem to have a requirement that sampler
+ * offsets be aligned to a cacheline (64 bytes).
+ */
+#define UPLOAD_ALIGNMENT 128
+
 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
 #define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
 
@@ -1134,7 +1141,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
 	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
 
-	if (ALIGN(bo->delta + bo->size.bytes, 64) == io->used)
+	if (ALIGN(bo->delta + bo->size.bytes, UPLOAD_ALIGNMENT) == io->used)
 		io->used = bo->delta;
 }
 
@@ -3619,9 +3626,9 @@ static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 {
 	struct kgem_partial_bo *bo;
 
-	bo = malloc(sizeof(*bo) + 128 + num_pages * PAGE_SIZE);
+	bo = malloc(sizeof(*bo) + 2*UPLOAD_ALIGNMENT + num_pages * PAGE_SIZE);
 	if (bo) {
-		bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), 64);
+		bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), UPLOAD_ALIGNMENT);
 		bo->mmapped = false;
 	}
 
@@ -4005,7 +4012,7 @@ init:
 	     __FUNCTION__, alloc, bo->base.handle));
 
 done:
-	bo->used = ALIGN(bo->used, 64);
+	bo->used = ALIGN(bo->used, UPLOAD_ALIGNMENT);
 	assert(bo->mem);
 	*ret = (char *)bo->mem + offset;
 	return kgem_create_proxy(kgem, &bo->base, offset, size);
@@ -4052,7 +4059,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		 * that it can be allocated to other pixmaps.
 		 */
 		min = bo->delta + height * stride;
-		min = ALIGN(min, 64);
+		min = ALIGN(min, UPLOAD_ALIGNMENT);
 		if (io->used != min) {
 			DBG(("%s: trimming partial buffer from %d to %d\n",
 			     __FUNCTION__, io->used, min));
commit 39e5c7491535999643c1761bb1602ad757ab486c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 21:58:27 2012 +0100

    sna: Assert damage is valid after every addition
    
    Even more paranoia than just checking upon migration.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c0c8ca4..2623e70 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2967,6 +2967,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			}
 		}
 
+		assert_pixmap_damage(pixmap);
 		priv->clear = false;
 		return true;
 	}
@@ -3006,6 +3007,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 							sna_damage_add(&priv->gpu_damage, region);
 					}
 
+					assert_pixmap_damage(pixmap);
 					priv->clear = false;
 					return true;
 				}
@@ -3145,6 +3147,7 @@ blt:
 		box++;
 	} while (--n);
 
+	assert_pixmap_damage(pixmap);
 	return true;
 }
 
@@ -3195,6 +3198,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
 	if (damage)
 		sna_damage_add(damage, region);
+	assert_pixmap_damage(pixmap);
 
 	DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
 
@@ -3318,6 +3322,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
 	if (damage)
 		sna_damage_add(damage, region);
+	assert_pixmap_damage(pixmap);
 
 	DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
 
@@ -3605,6 +3610,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 		if (!DAMAGE_IS_ALL(priv->gpu_damage))
 			sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty);
+		assert_pixmap_damage(pixmap);
 	} else {
 		FbBits *dst_bits, *src_bits;
 		int stride, bpp;
@@ -3737,6 +3743,9 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     src_pixmap->drawable.width, src_pixmap->drawable.height,
 	     dst_pixmap->drawable.width, dst_pixmap->drawable.height));
 
+	assert_pixmap_damage(dst_pixmap);
+	assert_pixmap_damage(src_pixmap);
+
 	pixman_region_init_rects(&region, box, n);
 
 	bpp = dst_pixmap->drawable.bitsPerPixel;
@@ -3870,6 +3879,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					assert_pixmap_contains_box(dst_pixmap,
 								   RegionExtents(&region));
 					sna_damage_add(&dst_priv->gpu_damage, &region);
+					assert_pixmap_damage(dst_pixmap);
 				}
 			}
 
@@ -3905,6 +3915,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					sna_damage_add(&dst_priv->gpu_damage, &region);
 					RegionTranslate(&region, -dst_dx, -dst_dy);
 				}
+				assert_pixmap_damage(dst_pixmap);
 			}
 		} else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) {
 			if (!sna->render.copy_boxes(sna, alu,
@@ -3931,6 +3942,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					sna_damage_add(&dst_priv->gpu_damage, &region);
 					RegionTranslate(&region, -dst_dx, -dst_dy);
 				}
+				assert_pixmap_damage(dst_pixmap);
 			}
 		} else if (alu != GXcopy) {
 			PixmapPtr tmp;
@@ -3991,6 +4003,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				sna_damage_add(&dst_priv->gpu_damage, &region);
 				RegionTranslate(&region, -dst_dx, -dst_dy);
 			}
+			assert_pixmap_damage(dst_pixmap);
 		} else {
 			if (src_priv) {
 				RegionTranslate(&region, src_dx, src_dy);
@@ -4047,6 +4060,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 							       &region);
 						RegionTranslate(&region, -dst_dx, -dst_dy);
 					}
+					assert_pixmap_damage(dst_pixmap);
 				}
 			}
 		}
@@ -4076,6 +4090,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			sna_damage_add(&dst_priv->cpu_damage, &region);
 			RegionTranslate(&region, -dst_dx, -dst_dy);
 		}
+		assert_pixmap_damage(dst_pixmap);
 		if (dst_priv->flush)
 			list_move(&dst_priv->list, &sna->dirty_pixmaps);
 
@@ -5088,6 +5103,7 @@ damage_clipped:
 
 done:
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -5387,6 +5403,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 	assert_pixmap_contains_boxes(pixmap, box, n, dx, dy);
 	if (arg->damage)
 		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
+	assert_pixmap_damage(pixmap);
 
 	br00 = 3 << 20;
 	br13 = arg->bo->pitch;
@@ -5548,6 +5565,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	assert_pixmap_contains_boxes(dst_pixmap, box, n, dx, dy);
 	if (arg->damage)
 		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
+	assert_pixmap_damage(dst_pixmap);
 
 	br00 = XY_MONO_SRC_COPY;
 	if (drawable->bitsPerPixel == 32)
@@ -5937,6 +5955,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 		}
 	}
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -6388,6 +6407,7 @@ Y2_continue:
 
 done:
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	RegionUninit(&clip);
 	return true;
 
@@ -6641,6 +6661,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 			sna_damage_add_boxes(damage, boxes, b - boxes, 0, 0);
 	}
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -7041,6 +7062,7 @@ spans_fallback:
 				pixman_region_translate(&data.region, data.dx, data.dy);
 			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
+			assert_pixmap_damage(data.pixmap);
 		}
 		RegionUninit(&data.region);
 		return;
@@ -7257,6 +7279,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 	}
 done:
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -7553,6 +7576,7 @@ Y2_continue:
 
 done:
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	RegionUninit(&clip);
 	return true;
 
@@ -7894,6 +7918,7 @@ spans_fallback:
 			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
 		}
+		assert_pixmap_damage(data.pixmap);
 		RegionUninit(&data.region);
 		return;
 	}
@@ -8435,6 +8460,7 @@ done:
 			sna_damage_add_boxes(damage, boxes, b-boxes, 0, 0);
 	}
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -8669,6 +8695,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 				assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 				sna_damage_add(data.damage, &data.region);
 			}
+			assert_pixmap_damage(data.pixmap);
 			RegionUninit(&data.region);
 			return;
 		}
@@ -8755,6 +8782,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 					} else
 						sna_damage_add_box(damage, &r);
 				}
+				assert_pixmap_damage(pixmap);
 
 				if ((gc->alu == GXcopy || gc->alu == GXclear) &&
 				    r.x2 - r.x1 == pixmap->drawable.width &&
@@ -8897,6 +8925,7 @@ sna_poly_fill_rect_blt(DrawablePtr drawable,
 	}
 done:
 	fill.done(sna, &fill);
+	assert_pixmap_damage(pixmap);
 	return TRUE;
 }
 
@@ -9022,6 +9051,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
 		}
+		assert_pixmap_damage(data.pixmap);
 		RegionUninit(&data.region);
 		return;
 	}
@@ -9337,6 +9367,7 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	}
 done:
 	copy.done(sna, &copy);
+	assert_pixmap_damage(pixmap);
 	kgem_bo_destroy(&sna->kgem, tile_bo);
 	return TRUE;
 }
@@ -9528,6 +9559,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 		}
 	}
 
+	assert_pixmap_damage(pixmap);
 	sna->blt_state.fill_bo = 0;
 	return true;
 }
@@ -10248,6 +10280,7 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 		}
 	}
 
+	assert_pixmap_damage(pixmap);
 	sna->blt_state.fill_bo = 0;
 	return true;
 }
@@ -10616,6 +10649,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
 		}
+		assert_pixmap_damage(data.pixmap);
 		RegionUninit(&data.region);
 		return;
 	}
@@ -10888,6 +10922,7 @@ skip:
 		}
 	} while (1);
 
+	assert_pixmap_damage(pixmap);
 	sna->blt_state.fill_bo = 0;
 	return true;
 }
@@ -11601,6 +11636,7 @@ skip:
 		}
 	} while (1);
 
+	assert_pixmap_damage(pixmap);
 	sna->blt_state.fill_bo = 0;
 	return true;
 }
@@ -11803,6 +11839,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
 	if (damage)
 		sna_damage_add(damage, region);
+	assert_pixmap_damage(pixmap);
 
 	DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__,
 	     region->extents.x1, region->extents.y1,
commit 92e1693e5fb3a1dd89fca5e5ecc660e2de78f9cd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 21:26:58 2012 +0100

    sna: Validate cpu/gpu damage never overlaps
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50477
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 0b93a2f..604db7d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -318,8 +318,12 @@ fi
 if test "x$DEBUG" = xmemory; then
 	AC_DEFINE(DEBUG_MEMORY,1,[Enable memory debugging])
 fi
+if test "x$DEBUG" = xpixmap; then
+	AC_DEFINE(DEBUG_PIXMAP,1,[Enable pixmap debugging])
+fi
 if test "x$DEBUG" = xfull; then
 	AC_DEFINE(DEBUG_MEMORY,1,[Enable memory debugging])
+	AC_DEFINE(DEBUG_PIXMAP,1,[Enable pixmap debugging])
 	AC_DEFINE(HAS_DEBUG_FULL,1,[Enable all debugging])
         CFLAGS="$CFLAGS -O0 -ggdb3"
 fi
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d72a591..c0c8ca4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -213,7 +213,7 @@ typedef struct box32 {
 #define PM_IS_SOLID(_draw, _pm) \
 	(((_pm) & FbFullMask((_draw)->depth)) == FbFullMask((_draw)->depth))
 
-#if DEBUG_ACCEL
+#ifdef DEBUG_PIXMAP
 static void _assert_pixmap_contains_box(PixmapPtr pixmap, const BoxRec *box, const char *function)
 {
 	if (box->x1 < 0 || box->y1 < 0 ||
@@ -296,15 +296,53 @@ static void _assert_drawable_contains_box(DrawablePtr drawable, const BoxRec *bo
 		assert(0);
 	}
 }
+
+static void assert_pixmap_damage(PixmapPtr p)
+{
+	struct sna_pixmap *priv;
+	RegionRec reg, cpu, gpu;
+
+	priv = sna_pixmap(p);
+	if (priv == NULL)
+		return;
+
+	if (DAMAGE_IS_ALL(priv->gpu_damage) && DAMAGE_IS_ALL(priv->cpu_damage))
+		/* special upload buffer */
+		return;
+
+	assert(!DAMAGE_IS_ALL(priv->gpu_damage) || priv->cpu_damage == NULL);
+	assert(!DAMAGE_IS_ALL(priv->cpu_damage) || priv->gpu_damage == NULL);
+
+	/* Avoid reducing damage to minimise interferrence */
+	RegionNull(&reg);
+	RegionNull(&gpu);
+	RegionNull(&cpu);
+
+	if (priv->gpu_damage)
+		_sna_damage_debug_get_region(DAMAGE_PTR(priv->gpu_damage), &gpu);
+
+	if (priv->cpu_damage)
+		_sna_damage_debug_get_region(DAMAGE_PTR(priv->cpu_damage), &cpu);
+
+	RegionIntersect(&reg, &cpu, &gpu);
+	assert(!RegionNotEmpty(&reg));
+
+	RegionUninit(&reg);
+	RegionUninit(&gpu);
+	RegionUninit(&cpu);
+}
+
 #define assert_pixmap_contains_box(p, b) _assert_pixmap_contains_box(p, b, __FUNCTION__)
 #define assert_drawable_contains_box(d, b) _assert_drawable_contains_box(d, b, __FUNCTION__)
 #define assert_pixmap_contains_boxes(p, b, n, x, y) _assert_pixmap_contains_boxes(p, b, n, x, y, __FUNCTION__)
 #define assert_pixmap_contains_points(p, pt, n, x, y) _assert_pixmap_contains_points(p, pt, n, x, y, __FUNCTION__)
+
 #else
 #define assert_pixmap_contains_box(p, b)
 #define assert_pixmap_contains_boxes(p, b, n, x, y)
 #define assert_pixmap_contains_points(p, pt, n, x, y)
 #define assert_drawable_contains_box(d, b)
+#define assert_pixmap_damage(p)
 #endif
 
 inline static bool
@@ -441,6 +479,8 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	list_del(&priv->list);
 	list_del(&priv->inactive);
 
+	assert_pixmap_damage(pixmap);
+
 	sna_damage_destroy(&priv->gpu_damage);
 	sna_damage_destroy(&priv->cpu_damage);
 
@@ -536,6 +576,8 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling)
 		return NULL;
 	}
 
+	assert_pixmap_damage(pixmap);
+
 	bo = kgem_create_2d(&sna->kgem,
 			    pixmap->drawable.width,
 			    pixmap->drawable.height,
@@ -939,6 +981,8 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 	if (wedged(sna))
 		return false;
 
+	assert_pixmap_damage(pixmap);
+
 	assert(priv->gpu_bo == NULL);
 	priv->gpu_bo =
 		kgem_create_2d(&sna->kgem,
@@ -993,6 +1037,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 	     pixmap->drawable.height,
 	     flags));
 
+	assert_pixmap_damage(pixmap);
+
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
 		DBG(("%s: not attached\n", __FUNCTION__));
@@ -1052,6 +1098,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				sna_pixmap_free_cpu(sna, priv);
 			}
 
+			assert_pixmap_damage(pixmap);
 			return true;
 		}
 
@@ -1105,6 +1152,8 @@ skip_inplace_map:
 				priv->undamaged = false;
 				priv->clear = false;
 			}
+
+			assert_pixmap_damage(pixmap);
 			return true;
 		}
 
@@ -1203,6 +1252,7 @@ done:
 	}
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
+	assert_pixmap_damage(pixmap);
 	return true;
 }
 
@@ -1295,6 +1345,8 @@ static inline bool region_inplace(struct sna *sna,
 				  struct sna_pixmap *priv,
 				  bool write_only)
 {
+	assert_pixmap_damage(pixmap);
+
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
@@ -1353,6 +1405,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	     RegionExtents(region)->x2, RegionExtents(region)->y2,
 	     flags));
 
+	assert_pixmap_damage(pixmap);
 	if (flags & MOVE_WRITE) {
 		assert_drawable_contains_box(drawable, &region->extents);
 	}
@@ -1432,6 +1485,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 						       region);
 
 				priv->clear = false;
+				assert_pixmap_damage(pixmap);
 				return true;
 			}
 		}
@@ -1470,6 +1524,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			} else
 				sna_damage_add(&priv->gpu_damage, region);
 
+			assert_pixmap_damage(pixmap);
 			priv->clear = false;
 			return true;
 		}
@@ -1501,6 +1556,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					sna_damage_subtract(&priv->cpu_damage,
 							    region);
 			}
+			assert_pixmap_damage(pixmap);
 			priv->clear = false;
 			return true;
 		}
@@ -1771,16 +1827,6 @@ done:
 		}
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
-#ifdef HAVE_FULL_DEBUG
-		{
-			RegionRec need;
-
-			RegionNull(&need);
-			assert(priv->gpu_damage == NULL ||
-			       !sna_damage_intersect(priv->gpu_damage, r, &need));
-			RegionUninit(&need);
-		}
-#endif
 	}
 
 	if (dx | dy)
@@ -1797,6 +1843,7 @@ out:
 	}
 	assert(pixmap->devPrivate.ptr);
 	assert(pixmap->devKind);
+	assert_pixmap_damage(pixmap);
 	return true;
 }
 
@@ -1914,6 +1961,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 
 	DBG(("%s()\n", __FUNCTION__));
 
+	assert_pixmap_damage(pixmap);
 	assert_pixmap_contains_box(pixmap, box);
 
 	if (sna_damage_is_all(&priv->gpu_damage,
@@ -1961,6 +2009,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 		sna_damage_subtract_box(&priv->cpu_damage, box);
 
 	sna_damage_reduce(&priv->cpu_damage);
+	assert_pixmap_damage(pixmap);
 	if (priv->cpu_damage == NULL) {
 		list_del(&priv->list);
 		goto done;
@@ -2082,6 +2131,7 @@ done:
 	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
+	assert_pixmap_damage(pixmap);
 	return true;
 }
 
@@ -2107,6 +2157,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	DBG(("%s((%d, %d), (%d, %d))...\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
+	assert_pixmap_damage(pixmap);
 	assert_drawable_contains_box(drawable, box);
 
 	if (priv == NULL) {
@@ -2482,6 +2533,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		sna_damage_destroy(&priv->cpu_damage);
 
 	sna_damage_reduce(&priv->cpu_damage);
+	assert_pixmap_damage(pixmap);
 	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
 		DBG(("%s: creating GPU bo (%dx%d@%d), create=%x\n",
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 1957177..ce16b77 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1626,3 +1626,101 @@ void sna_damage_selftest(void)
 	}
 }
 #endif
+
+void _sna_damage_debug_get_region(struct sna_damage *damage, RegionRec *r)
+{
+	int n, nboxes;
+	BoxPtr boxes;
+	struct sna_damage_box *iter;
+
+	RegionCopy(r, &damage->region);
+	if (!damage->dirty)
+		return;
+
+	nboxes = damage->embedded_box.size;
+	list_for_each_entry(iter, &damage->embedded_box.list, list)
+		nboxes += iter->size;
+	nboxes -= damage->remain;
+	if (nboxes == 0)
+		return;
+
+	if (nboxes == 1) {
+		pixman_region16_t tmp;
+
+		tmp.extents = damage->embedded_box.box[0];
+		tmp.data = NULL;
+
+		if (damage->mode == DAMAGE_ADD)
+			pixman_region_union(r, r, &tmp);
+		else
+			pixman_region_subtract(r, r, &tmp);
+
+		return;
+	}
+
+	if (damage->mode == DAMAGE_ADD)
+		nboxes += REGION_NUM_RECTS(r);
+
+	iter = list_entry(damage->embedded_box.list.prev,
+			  struct sna_damage_box,
+			  list);
+	n = iter->size - damage->remain;
+	boxes = malloc(sizeof(BoxRec)*nboxes);
+	if (boxes == NULL)
+		return;
+
+	if (list_is_empty(&damage->embedded_box.list)) {
+		memcpy(boxes,
+		       damage->embedded_box.box,
+		       n*sizeof(BoxRec));
+	} else {
+		if (boxes != (BoxPtr)(iter+1))
+			memcpy(boxes, iter+1, n*sizeof(BoxRec));
+
+		iter = list_entry(iter->list.prev,
+				  struct sna_damage_box,
+				  list);
+		while (&iter->list != &damage->embedded_box.list) {
+			memcpy(boxes + n, iter+1,
+			       iter->size * sizeof(BoxRec));
+			n += iter->size;
+
+			iter = list_entry(iter->list.prev,
+					  struct sna_damage_box,
+					  list);
+		}
+
+		memcpy(boxes + n,
+		       damage->embedded_box.box,
+		       sizeof(damage->embedded_box.box));
+		n += damage->embedded_box.size;
+	}
+
+	if (damage->mode == DAMAGE_ADD) {
+		memcpy(boxes + n,
+		       REGION_RECTS(r),
+		       REGION_NUM_RECTS(r)*sizeof(BoxRec));
+		assert(n + REGION_NUM_RECTS(r) == nboxes);
+		pixman_region_fini(r);
+		pixman_region_init_rects(r, boxes, nboxes);
+
+		assert(pixman_region_not_empty(r));
+		assert(damage->extents.x1 == r->extents.x1 &&
+		       damage->extents.y1 == r->extents.y1 &&
+		       damage->extents.x2 == r->extents.x2 &&
+		       damage->extents.y2 == r->extents.y2);
+	} else {
+		pixman_region16_t tmp;
+
+		pixman_region_init_rects(&tmp, boxes, nboxes);
+		pixman_region_subtract(r, r, &tmp);
+		pixman_region_fini(&tmp);
+
+		assert(damage->extents.x1 <= r->extents.x1 &&
+		       damage->extents.y1 <= r->extents.y1 &&
+		       damage->extents.x2 >= r->extents.x2 &&
+		       damage->extents.y2 >= r->extents.y2);
+	}
+
+	free(boxes);
+}
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 1196912..a006ade 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -285,6 +285,8 @@ static inline void sna_damage_destroy(struct sna_damage **damage)
 	*damage = NULL;
 }
 
+void _sna_damage_debug_get_region(struct sna_damage *damage, RegionRec *r);
+
 #if DEBUG_DAMAGE && TEST_DAMAGE
 void sna_damage_selftest(void);
 #else
commit d2312c8f958002e54ddcb834f37916f4b46ac291
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 21:29:29 2012 +0100

    sna: Fixup tracking of vmap upload buffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 84475fe..aaddda4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1032,7 +1032,9 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 
 	if (IS_VMAP_MAP(bo->map)) {
 		assert(bo->rq == NULL);
-		free(MAP(bo->map));
+		assert(MAP(bo->map) != bo || bo->io);
+		if (bo != MAP(bo->map))
+			free(MAP(bo->map));
 		bo->map = NULL;
 	}
 	if (bo->map)
@@ -1578,6 +1580,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
+				assert(!bo->base.vmap);
 				list_move(&bo->base.list,
 					  &kgem->active_partials);
 				continue;
@@ -3881,10 +3884,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				DBG(("%s: created vmap handle=%d for buffer\n",
 				     __FUNCTION__, bo->base.handle));
 
-				bo->need_io = false;
 				bo->base.io = true;
 				bo->base.vmap = true;
+				bo->base.map = MAKE_VMAP_MAP(bo);
 				bo->mmapped = true;
+				bo->need_io = false;
 
 				goto init;
 			}
commit 75e9eeca7e998b1ee3f8b0df780adae1b9e5b408
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 16:39:20 2012 +0100

    sna: Remove overlapping CPU damage when operating inplace on the GPU
    
    Otherwise we gradually introduce garbage into the picture.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50477
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index adc3e4e..d72a591 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1292,7 +1292,8 @@ static bool sync_will_stall(struct kgem_bo *bo)
 static inline bool region_inplace(struct sna *sna,
 				  PixmapPtr pixmap,
 				  RegionPtr region,
-				  struct sna_pixmap *priv)
+				  struct sna_pixmap *priv,
+				  bool write_only)
 {
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
@@ -1300,7 +1301,7 @@ static inline bool region_inplace(struct sna *sna,
 	if (wedged(sna))
 		return false;
 
-	if (priv->cpu_damage &&
+	if (!write_only && priv->cpu_damage &&
 	    region_overlaps_damage(region, priv->cpu_damage)) {
 		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
 		return false;
@@ -1404,7 +1405,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		assert(flags & MOVE_WRITE);
 
 		if (priv->stride && priv->gpu_bo &&
-		    region_inplace(sna, pixmap, region, priv)) {
+		    region_inplace(sna, pixmap, region, priv, true)) {
 			assert(priv->gpu_bo->proxy == NULL);
 			if (sync_will_stall(priv->gpu_bo) &&
 			    priv->gpu_bo->exec == NULL)
@@ -1449,7 +1450,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 		if (priv->gpu_bo == NULL && priv->stride &&
 		    sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) != I915_TILING_NONE &&
-		    region_inplace(sna, pixmap, region, priv) &&
+		    region_inplace(sna, pixmap, region, priv, true) &&
 		    sna_pixmap_create_mappable_gpu(pixmap)) {
 			pixmap->devPrivate.ptr =
 				kgem_bo_map(&sna->kgem, priv->gpu_bo);
@@ -1475,7 +1476,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 	if (operate_inplace(priv, flags) &&
-	    region_inplace(sna, pixmap, region, priv)) {
+	    region_inplace(sna, pixmap, region, priv, (flags & MOVE_READ) == 0)) {
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
 		DBG(("%s: operate inplace\n", __FUNCTION__));
@@ -1496,7 +1497,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					sna_damage_destroy(&priv->cpu_damage);
 					priv->undamaged = false;
 					list_del(&priv->list);
-				}
+				} else
+					sna_damage_subtract(&priv->cpu_damage,
+							    region);
 			}
 			priv->clear = false;
 			return true;
@@ -2832,12 +2835,7 @@ static bool upload_inplace(struct sna *sna,
 			   struct sna_pixmap *priv,
 			   RegionRec *region)
 {
-	if (priv->mapped) {
-		DBG(("%s: already mapped\n", __FUNCTION__));
-		return true;
-	}
-
-	if (!region_inplace(sna, pixmap, region, priv))
+	if (!region_inplace(sna, pixmap, region, priv, true))
 		return false;
 
 	if (priv->gpu_bo) {
@@ -3051,7 +3049,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		}
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
-		priv->clear = false;
 	}
 
 blt:
@@ -3068,6 +3065,15 @@ blt:
 	box = REGION_RECTS(region);
 	n = REGION_NUM_RECTS(region);
 	do {
+		DBG(("%s: copy box (%d, %d)->(%d, %d)x(%d, %d)\n",
+		     __FUNCTION__,
+		     box->x1 - x, box->y1 - y,
+		     box->x1, box->y1,
+		     box->x2 - box->x1, box->y2 - box->y1));
+
+		assert(box->x2 > box->x1);
+		assert(box->y2 > box->y1);
+
 		assert(box->x1 >= 0);
 		assert(box->y1 >= 0);
 		assert(box->x2 <= pixmap->drawable.width);
@@ -3599,9 +3605,10 @@ fallback:
 
 static bool copy_use_gpu_bo(struct sna *sna,
 			    struct sna_pixmap *priv,
-			    RegionPtr region)
+			    RegionPtr region,
+			    bool write_only)
 {
-	if (region_inplace(sna, priv->pixmap, region, priv)) {
+	if (region_inplace(sna, priv->pixmap, region, priv, write_only)) {
 		DBG(("%s: perform in place, use gpu bo\n", __FUNCTION__));
 		return true;
 	}
@@ -3706,7 +3713,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst_priv == NULL)
 		goto fallback;
 
-	if (src_priv == NULL && !copy_use_gpu_bo(sna, dst_priv, &region)) {
+	if (src_priv == NULL &&
+	    !copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) {
 		DBG(("%s: fallback - unattached to source and not use dst gpu bo\n",
 		     __FUNCTION__));
 		goto fallback;
@@ -3728,7 +3736,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	/* Try to maintain the data on the GPU */
 	if (dst_priv->gpu_bo == NULL &&
-	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
+	    ((dst_priv->cpu_damage == NULL &&
+	      copy_use_gpu_bo(sna, dst_priv, &region, alu_overwrites(alu))) ||
 	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
 		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap,
 							   DEFAULT_TILING);
commit a936466dd41c4bb8b0956c4da5a5838f991b332a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 11:36:53 2012 +0100

    sna: Prefer to attempt a Composite operation rather than use pixman composite
    
    As pixman composite performance is atrocious for anything other than
    solids, prefer to upload the mask and attempt a composite operation on
    the GPU unless we are forcing the fallback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 9da9aae..cb9fd7c 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3412,12 +3412,8 @@ composite_unaligned_boxes_inplace(CARD8 op,
 				  PicturePtr dst, int n, xTrapezoid *t,
 				  bool force_fallback)
 {
-	if (!force_fallback &&
-	    (is_gpu(dst->pDrawable) ||
-	     (src->pDrawable && is_gpu(src->pDrawable)))) {
-		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
-		     __FUNCTION__));
-
+	if (!force_fallback) {
+		DBG(("%s: fallback -- not forcing\n", __FUNCTION__));
 		return false;
 	}
 
commit 4b325d6e2b99ec3f63a7c5175f6f4746f6773550
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 18 11:24:55 2012 +0100

    sna: Fix rendering of unaligned boxes through pixman
    
    Not only do we need to make sure the source is available to the CPU, we
    need to actually check the right conditions for clipping the box.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 61b4fb1..9da9aae 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3148,7 +3148,7 @@ pixsolid_unaligned_box_row(struct pixman_inplace *pi,
 	if (x2 > extents->x2)
 		x2 = extents->x2, fx2 = 0;
 
-	if (x2 < x1) {
+	if (x1 < x2) {
 		if (fx1) {
 			pixsolid_opacity(pi, x1, 1, y, h,
 					 covered * (SAMPLES_X - fx1));
@@ -3389,7 +3389,7 @@ pixmask_unaligned_box_row(struct pixman_inplace *pi,
 	if (x2 > extents->x2)
 		x2 = extents->x2, fx2 = 0;
 
-	if (x2 < x1) {
+	if (x1 < x2) {
 		if (fx1) {
 			pixmask_opacity(pi, x1, 1, y, h,
 					 covered * (SAMPLES_X - fx1));
@@ -3412,10 +3412,9 @@ composite_unaligned_boxes_inplace(CARD8 op,
 				  PicturePtr dst, int n, xTrapezoid *t,
 				  bool force_fallback)
 {
-	PixmapPtr pixmap;
-	int16_t dx, dy;
-
-	if (!force_fallback && is_gpu(dst->pDrawable)) {
+	if (!force_fallback &&
+	    (is_gpu(dst->pDrawable) ||
+	     (src->pDrawable && is_gpu(src->pDrawable)))) {
 		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
 		     __FUNCTION__));
 
@@ -3424,10 +3423,6 @@ composite_unaligned_boxes_inplace(CARD8 op,
 
 	src_x -= pixman_fixed_to_int(t[0].left.p1.x);
 	src_y -= pixman_fixed_to_int(t[0].left.p1.y);
-
-	pixmap = get_drawable_pixmap(dst->pDrawable);
-	get_drawable_deltas(dst->pDrawable, pixmap, &dx, &dy);
-
 	do {
 		struct pixman_inplace pi;
 		RegionRec clip;
@@ -3441,8 +3436,9 @@ composite_unaligned_boxes_inplace(CARD8 op,
 		clip.data = NULL;
 
 		if (!sna_compute_composite_region(&clip,
-						   NULL, NULL, dst,
-						   0, 0,
+						   src, NULL, dst,
+						   clip.extents.x1 + src_x,
+						   clip.extents.y1 + src_y,
 						   0, 0,
 						   clip.extents.x1, clip.extents.y1,
 						   clip.extents.x2 - clip.extents.x1,
@@ -3455,8 +3451,23 @@ composite_unaligned_boxes_inplace(CARD8 op,
 			continue;
 		}
 
+		if (src->pDrawable) {
+			if (!sna_drawable_move_to_cpu(src->pDrawable,
+						      MOVE_READ)) {
+				RegionUninit(&clip);
+				continue;
+			}
+			if (src->alphaMap) {
+				if (!sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
+							      MOVE_READ)) {
+					RegionUninit(&clip);
+					continue;
+				}
+			}
+		}
+
 		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
-		pi.source = image_from_pict(src, TRUE, &pi.sx, &pi.sy);
+		pi.source = image_from_pict(src, FALSE, &pi.sx, &pi.sy);
 		pi.sx += src_x;
 		pi.sy += src_y;
 		pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
commit caef27492b5a70bd46a09cd5094b2c93e38e39ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 20:56:46 2012 +0100

    sna: convert another instance of applying the clear to the CPU pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 97df25a..adc3e4e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1136,13 +1136,18 @@ skip_inplace_map:
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		}
 
-		pixman_fill(pixmap->devPrivate.ptr,
-			    pixmap->devKind/sizeof(uint32_t),
-			    pixmap->drawable.bitsPerPixel,
-			    0, 0,
-			    pixmap->drawable.width,
-			    pixmap->drawable.height,
-			    priv->clear_color);
+		if (priv->clear_color == 0 || pixmap->drawable.bitsPerPixel == 8) {
+			memset(pixmap->devPrivate.ptr, priv->clear_color,
+			       pixmap->devKind * pixmap->drawable.height);
+		} else {
+			pixman_fill(pixmap->devPrivate.ptr,
+				    pixmap->devKind/sizeof(uint32_t),
+				    pixmap->drawable.bitsPerPixel,
+				    0, 0,
+				    pixmap->drawable.width,
+				    pixmap->drawable.height,
+				    priv->clear_color);
+		}
 
 		priv->clear = false;
 	}
commit 8695c4c77666cd07eab51efcbc7c4f11c85250fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 16:53:53 2012 +0100

    sna: Fix the blt composite op with no-ops
    
    When returning early because the operation is a no-op, we still need to
    fill in the function pointers to prevent a later NULL dereference.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 83bcd69..4a9dbff 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -889,6 +889,38 @@ static void blt_composite_fill_boxes(struct sna *sna,
 	} while (--n);
 }
 
+fastcall
+static void blt_composite_nop(struct sna *sna,
+			       const struct sna_composite_op *op,
+			       const struct sna_composite_rectangles *r)
+{
+}
+
+fastcall static void blt_composite_nop_box(struct sna *sna,
+					   const struct sna_composite_op *op,
+					   const BoxRec *box)
+{
+}
+
+static void blt_composite_nop_boxes(struct sna *sna,
+				    const struct sna_composite_op *op,
+				    const BoxRec *box, int n)
+{
+}
+
+static Bool
+prepare_blt_nop(struct sna *sna,
+		struct sna_composite_op *op)
+{
+	DBG(("%s\n", __FUNCTION__));
+
+	op->blt   = blt_composite_nop;
+	op->box   = blt_composite_nop_box;
+	op->boxes = blt_composite_nop_boxes;
+	op->done  = nop_done;
+	return TRUE;
+}
+
 static Bool
 prepare_blt_clear(struct sna *sna,
 		  struct sna_composite_op *op)
@@ -1100,14 +1132,18 @@ prepare_blt_copy(struct sna *sna,
 	PixmapPtr src = op->u.blt.src_pixmap;
 	struct sna_pixmap *priv = sna_pixmap(src);
 
-	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo))
+	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
+		DBG(("%s: fallback -- can't blt from source\n", __FUNCTION__));
 		return FALSE;
+	}
 
 	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
 		_kgem_submit(&sna->kgem);
 		if (!kgem_check_many_bo_fenced(&sna->kgem,
-					       op->dst.bo, priv->gpu_bo, NULL))
+					       op->dst.bo, priv->gpu_bo, NULL)) {
+			DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
 			return FALSE;
+		}
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
@@ -1586,8 +1622,9 @@ sna_blt_composite(struct sna *sna,
 	if (op == PictOpClear) {
 clear:
 		if (was_clear)
-			return TRUE;
-		return prepare_blt_clear(sna, tmp);
+			return prepare_blt_nop(sna, tmp);
+		else
+			return prepare_blt_clear(sna, tmp);
 	}
 
 	if (is_solid(src)) {
commit 7905ddae1dbc8805d0fadbd6d21c7a5df7e715fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 17:01:12 2012 +0100

    sna: Further refine choice of placement when uploading source data.
    
    The goal is cheaply spot a simple copy operation that can be performed
    on the CPU without having to load both parties onto the GPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 15512fd..8a5a405 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -123,11 +123,7 @@ static inline Bool
 unattached(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-
-	if (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))
-		return true;
-
-	return priv->gpu_bo == NULL && priv->cpu_bo == NULL;
+	return priv == NULL || (priv->gpu_damage == NULL && priv->cpu_damage);
 }
 
 static inline Bool
commit 5a675b61f27273f7ef344d4056dbba1f8cd97737
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 15:42:17 2012 +0100

    sna: Correct typo forcing everything to be clear to 0!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1e86643..97df25a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3002,7 +3002,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		}
 
-		if (priv->clear_color = 0) {
+		if (priv->clear_color == 0) {
 			memset(pixmap->devPrivate.ptr,
 			       0, pixmap->devKind * pixmap->drawable.height);
 		} else {
commit b55bf1abbe71281e3d9ebde1c4005d40902d5e7f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 14:14:44 2012 +0100

    sna: Fix cut'n'paste errors in tiling debug
    
    Rename for different variables
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 16b90ca..27a9dc8 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -431,8 +431,8 @@ sna_tiling_composite_spans_done(struct sna *sna,
 
 					DBG(("%s: rect[%d] = (%d, %d)x(%d,%d), tile=(%d,%d)x(%d, %d), blt=(%d,%d),(%d,%d)\n",
 					     __FUNCTION__, n,
-					     r->dst.x, r->dst.y,
-					     r->width, r->height,
+					     r->box.x1, r->box.y1,
+					     r->box.x2-r->box.x1, r->box.y2-r->box.y1,
 					     x, y, width, height,
 					     b.x1, b.y1, b.x2, b.y2));
 
@@ -488,8 +488,8 @@ sna_tiling_composite_spans_done(struct sna *sna,
 
 					DBG(("%s: rect[%d] = (%d, %d)x(%d,%d), tile=(%d,%d)x(%d, %d), blt=(%d,%d),(%d,%d)\n",
 					     __FUNCTION__, n,
-					     r->dst.x, r->dst.y,
-					     r->width, r->height,
+					     r->box.x1, r->box.y1,
+					     r->box.x2-r->box.x1, r->box.y2-r->box.y1,
 					     x, y, width, height,
 					     b.x1, b.y1, b.x2, b.y2));
 
commit 9756c60b4ad15281d025b9c27f19d19e8a630958
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 11:39:33 2012 +0100

    sna/gen7: Enable non-rectilinear spans
    
    Seems we have enough GPU power to overcome the clumsy shaders. Just
    imagine the possibilities when we have a true shader for spans...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 4642549..3c07d8d 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3166,9 +3166,6 @@ gen7_render_composite_spans(struct sna *sna,
 	DBG(("%s: %dx%d with flags=%x, current mode=%d\n", __FUNCTION__,
 	     width, height, flags, sna->kgem.ring));
 
-	if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0)
-		return FALSE;
-
 	if (op >= ARRAY_SIZE(gen7_blend_op))
 		return FALSE;
 
commit 41aff56a1f452e409c7a49512a1d2824b74f3838
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 10:50:31 2012 +0100

    sna: Add tiling for spans
    
    Semmingly only advisable when already committed to using the GPU. This
    first pass is still a little naive as it makes no attempt to avoid empty
    tiles, nor aims to be efficient.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index e67dd95..5fd995b 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2220,14 +2220,23 @@ gen2_render_composite_spans(struct sna *sna,
 		return FALSE;
 	}
 
-	if (!gen2_check_dst_format(dst->format)) {
-		DBG(("%s: fallback due to unhandled dst format: %x\n",
-		     __FUNCTION__, dst->format));
+	if (gen2_composite_fallback(sna, src, NULL, dst))
 		return FALSE;
-	}
 
-	if (need_tiling(sna, width, height))
-		return FALSE;
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
+
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return FALSE;
+		}
+
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
+	}
 
 	if (!gen2_composite_set_target(sna, &tmp->base, dst)) {
 		DBG(("%s: unable to set render target\n",
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 62b3a8e..54ec9b2 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3392,9 +3392,18 @@ gen3_render_composite_spans(struct sna *sna,
 		return FALSE;
 
 	if (need_tiling(sna, width, height)) {
-		DBG(("%s: fallback, operation (%dx%d) too wide for pipeline\n",
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
 		     __FUNCTION__, width, height));
-		return FALSE;
+
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return FALSE;
+		}
+
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
 	}
 
 	if (!gen3_composite_set_target(sna, &tmp->base, dst)) {
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index e31ac5c..65c21c3 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2785,12 +2785,24 @@ gen5_render_composite_spans(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen5_blend_op))
 		return FALSE;
 
-	if (need_tiling(sna, width, height))
-		return FALSE;
-
 	if (gen5_composite_fallback(sna, src, NULL, dst))
 		return FALSE;
 
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
+
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return FALSE;
+		}
+
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
+	}
+
 	tmp->base.op = op;
 	if (!gen5_composite_set_target(dst, &tmp->base))
 		return FALSE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d613fe1..b927d08 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3088,12 +3088,24 @@ gen6_render_composite_spans(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen6_blend_op))
 		return FALSE;
 
-	if (need_tiling(sna, width, height))
-		return FALSE;
-
 	if (gen6_composite_fallback(sna, src, NULL, dst))
 		return FALSE;
 
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
+
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return FALSE;
+		}
+
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
+	}
+
 	tmp->base.op = op;
 	if (!gen6_composite_set_target(sna, &tmp->base, dst))
 		return FALSE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index a7c498e..4642549 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3172,12 +3172,24 @@ gen7_render_composite_spans(struct sna *sna,
 	if (op >= ARRAY_SIZE(gen7_blend_op))
 		return FALSE;
 
-	if (need_tiling(sna, width, height))
-		return FALSE;
-
 	if (gen7_composite_fallback(sna, src, NULL, dst))
 		return FALSE;
 
+	if (need_tiling(sna, width, height)) {
+		DBG(("%s: tiling, operation (%dx%d) too wide for pipeline\n",
+		     __FUNCTION__, width, height));
+
+		if (!is_gpu(dst->pDrawable)) {
+			DBG(("%s: fallback, tiled operation not on GPU\n",
+			     __FUNCTION__));
+			return FALSE;
+		}
+
+		return sna_tiling_composite_spans(op, src, dst,
+						  src_x, src_y, dst_x, dst_y,
+						  width, height, flags, tmp);
+	}
+
 	tmp->base.op = op;
 	if (!gen7_composite_set_target(sna, &tmp->base, dst))
 		return FALSE;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 4898223..fd42b21 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -517,6 +517,14 @@ Bool sna_tiling_composite(uint32_t op,
 			  int16_t dst_x, int16_t dst_y,
 			  int16_t width, int16_t height,
 			  struct sna_composite_op *tmp);
+Bool sna_tiling_composite_spans(uint32_t op,
+				PicturePtr src,
+				PicturePtr dst,
+				int16_t src_x,  int16_t src_y,
+				int16_t dst_x,  int16_t dst_y,
+				int16_t width,  int16_t height,
+				unsigned flags,
+				struct sna_composite_spans_op *tmp);
 Bool sna_tiling_fill_boxes(struct sna *sna,
 			   CARD8 op,
 			   PictFormat format,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 33fee42..16b90ca 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -39,6 +39,11 @@
 #define DBG(x) ErrorF x
 #endif
 
+struct sna_tile_span {
+	BoxRec box;
+	float opacity;
+};
+
 struct sna_tile_state {
 	int op;
 	PicturePtr src, mask, dst;
@@ -48,6 +53,7 @@ struct sna_tile_state {
 	int16_t mask_x, mask_y;
 	int16_t dst_x, dst_y;
 	int16_t width, height;
+	unsigned flags;
 
 	int rect_count;
 	int rect_size;
@@ -318,6 +324,262 @@ sna_tiling_composite(uint32_t op,
 	return TRUE;
 }
 
+fastcall static void
+sna_tiling_composite_spans_box(struct sna *sna,
+			       const struct sna_composite_spans_op *op,
+			       const BoxRec *box, float opacity)
+{
+	struct sna_tile_state *tile = op->base.priv;
+	struct sna_tile_span *a;
+
+	if (tile->rect_count == tile->rect_size) {
+		int newsize = tile->rect_size * 2;
+
+		if (tile->rects == tile->rects_embedded) {
+			a = malloc (sizeof(struct sna_tile_span) * newsize);
+			if (a == NULL)
+				return;
+
+			memcpy(a,
+			       tile->rects_embedded,
+			       sizeof(struct sna_tile_span) * tile->rect_count);
+		} else {
+			a = realloc(tile->rects,
+				    sizeof(struct sna_tile_span) * newsize);
+			if (a == NULL)
+				return;
+		}
+
+		tile->rects = (void *)a;
+		tile->rect_size = newsize;
+	} else
+		a = (void *)tile->rects;
+
+	a[tile->rect_count].box = *box;
+	a[tile->rect_count].opacity = opacity;
+	tile->rect_count++;
+	(void)sna;
+}
+
+static void
+sna_tiling_composite_spans_boxes(struct sna *sna,
+				 const struct sna_composite_spans_op *op,
+				 const BoxRec *box, int nbox, float opacity)
+{
+	while (nbox--)
+		sna_tiling_composite_spans_box(sna, op->base.priv, box++, opacity);
+	(void)sna;
+}
+
+fastcall static void
+sna_tiling_composite_spans_done(struct sna *sna,
+				const struct sna_composite_spans_op *op)
+{
+	struct sna_tile_state *tile = op->base.priv;
+	struct sna_composite_spans_op tmp;
+	int x, y, n, step;
+	bool force_fallback = false;
+
+	/* Use a small step to accommodate enlargement through tile alignment */
+	step = sna->render.max_3d_size;
+	if (tile->dst_x & (8*512 / tile->dst->pDrawable->bitsPerPixel - 1) ||
+	    tile->dst_y & 63)
+		step /= 2;
+	while (step * step * 4 > sna->kgem.max_copy_tile_size)
+		step /= 2;
+
+	DBG(("%s -- %dx%d, count=%d, step size=%d\n", __FUNCTION__,
+	     tile->width, tile->height, tile->rect_count, step));
+
+	if (tile->rect_count == 0)
+		goto done;
+
+	for (y = 0; y < tile->height; y += step) {
+		int height = step;
+		if (y + height > tile->height)
+			height = tile->height - y;
+		for (x = 0; x < tile->width; x += step) {
+			const struct sna_tile_span *r = (void *)tile->rects;
+			int width = step;
+			if (x + width > tile->width)
+				width = tile->width - x;
+			if (!force_fallback &&
+			    sna->render.composite_spans(sna, tile->op,
+							tile->src, tile->dst,
+							tile->src_x + x,  tile->src_y + y,
+							tile->dst_x + x,  tile->dst_y + y,
+							width, height, tile->flags,
+							memset(&tmp, 0, sizeof(tmp)))) {
+				for (n = 0; n < tile->rect_count; n++) {
+					BoxRec b;
+
+					b.x1 = r->box.x1 - tile->dst_x;
+					if (b.x1 < x)
+						b.x1 = x;
+
+					b.y1 = r->box.y1 - tile->dst_y;
+					if (b.y1 < y)
+						b.y1 = y;
+
+					b.x2 = r->box.x2 - tile->dst_x;
+					if (b.x2 > x + width)
+						b.x2 = x + width;
+
+					b.y2 = r->box.y2 - tile->dst_y;
+					if (b.y2 > y + height)
+						b.y2 = y + height;
+
+					DBG(("%s: rect[%d] = (%d, %d)x(%d,%d), tile=(%d,%d)x(%d, %d), blt=(%d,%d),(%d,%d)\n",
+					     __FUNCTION__, n,
+					     r->dst.x, r->dst.y,
+					     r->width, r->height,
+					     x, y, width, height,
+					     b.x1, b.y1, b.x2, b.y2));
+
+					if (b.y2 > b.y1 && b.x2 > b.x1)
+						tmp.box(sna, &tmp, &b, r->opacity);
+					r++;
+				}
+				tmp.done(sna, &tmp);
+			} else {
+				unsigned int flags;
+
+				DBG(("%s -- falback\n", __FUNCTION__));
+
+				if (tile->op <= PictOpSrc)
+					flags = MOVE_WRITE;
+				else
+					flags = MOVE_WRITE | MOVE_READ;
+				if (!sna_drawable_move_to_cpu(tile->dst->pDrawable,
+							      flags))
+					goto done;
+				if (tile->dst->alphaMap &&
+				    !sna_drawable_move_to_cpu(tile->dst->alphaMap->pDrawable,
+							      flags))
+					goto done;
+
+				if (tile->src->pDrawable &&
+				    !sna_drawable_move_to_cpu(tile->src->pDrawable,
+							      MOVE_READ))
+					goto done;
+				if (tile->src->alphaMap &&
+				    !sna_drawable_move_to_cpu(tile->src->alphaMap->pDrawable,
+							      MOVE_READ))
+					goto done;
+
+				for (n = 0; n < tile->rect_count; n++) {
+					BoxRec b;
+
+					b.x1 = r->box.x1 - tile->dst_x;
+					if (b.x1 < x)
+						b.x1 = x;
+
+					b.y1 = r->box.y1 - tile->dst_y;
+					if (b.y1 < y)
+						b.y1 = y;
+
+					b.x2 = r->box.x2 - tile->dst_x;
+					if (b.x2 > x + width)
+						b.x2 = x + width;
+
+					b.y2 = r->box.y2 - tile->dst_y;
+					if (b.y2 > y + height)
+						b.y2 = y + height;
+
+					DBG(("%s: rect[%d] = (%d, %d)x(%d,%d), tile=(%d,%d)x(%d, %d), blt=(%d,%d),(%d,%d)\n",
+					     __FUNCTION__, n,
+					     r->dst.x, r->dst.y,
+					     r->width, r->height,
+					     x, y, width, height,
+					     b.x1, b.y1, b.x2, b.y2));
+
+					if (b.y2 > b.y1 && b.x2 > b.x1) {
+						xRenderColor alpha;
+						PicturePtr mask;
+						int error;
+
+						alpha.red = alpha.green = alpha.blue = 0;
+						alpha.alpha = r->opacity * 0xffff;
+
+						mask = CreateSolidPicture(0, &alpha, &error);
+						if (!mask)
+							goto done;
+
+						fbComposite(tile->op,
+							    tile->src, mask, tile->dst,
+							    tile->src_x + x,  tile->src_y + y,
+							    0, 0,
+							    tile->dst_x + x,  tile->dst_y + y,
+							    width, height);
+
+						FreePicture(mask, 0);
+					}
+					r++;
+				}
+
+				force_fallback = true;
+			}
+		}
+	}
+
+done:
+	if (tile->rects != tile->rects_embedded)
+		free(tile->rects);
+	free(tile);
+}
+
+Bool
+sna_tiling_composite_spans(uint32_t op,
+			   PicturePtr src,
+			   PicturePtr dst,
+			   int16_t src_x,  int16_t src_y,
+			   int16_t dst_x,  int16_t dst_y,
+			   int16_t width,  int16_t height,
+			   unsigned flags,
+			   struct sna_composite_spans_op *tmp)
+{
+	struct sna_tile_state *tile;
+	struct sna_pixmap *priv;
+
+	DBG(("%s size=(%d, %d), tile=%d\n",
+	     __FUNCTION__, width, height,
+	     to_sna_from_drawable(dst->pDrawable)->render.max_3d_size));
+
+	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
+	if (priv == NULL || priv->gpu_bo == NULL)
+		return FALSE;
+
+	tile = malloc(sizeof(*tile));
+	if (!tile)
+		return FALSE;
+
+	tile->op = op;
+	tile->flags = flags;
+
+	tile->src  = src;
+	tile->mask = NULL;
+	tile->dst  = dst;
+
+	tile->src_x = src_x;
+	tile->src_y = src_y;
+	tile->mask_x = 0;
+	tile->mask_y = 0;
+	tile->dst_x = dst_x;
+	tile->dst_y = dst_y;
+	tile->width = width;
+	tile->height = height;
+	tile->rects = tile->rects_embedded;
+	tile->rect_count = 0;
+	tile->rect_size = ARRAY_SIZE(tile->rects_embedded);
+
+	tmp->box   = sna_tiling_composite_spans_box;
+	tmp->boxes = sna_tiling_composite_spans_boxes;
+	tmp->done  = sna_tiling_composite_spans_done;
+
+	tmp->base.priv = tile;
+	return TRUE;
+}
+
 Bool
 sna_tiling_fill_boxes(struct sna *sna,
 		      CARD8 op,
commit 222e6ff43ef683e82101fb360911fc01fbe00597
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 23:40:30 2012 +0100

    sna: Read inplace for fallback copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 85db919..1e86643 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4052,10 +4052,21 @@ fallback:
 		DBG(("%s: fallback -- src=(%d, %d), dst=(%d, %d)\n",
 		     __FUNCTION__, src_dx, src_dy, dst_dx, dst_dy));
 		if (src_priv) {
+			unsigned mode;
+
 			RegionTranslate(&region, src_dx, src_dy);
+
+			assert_pixmap_contains_box(src_pixmap,
+						   RegionExtents(&region));
+
+			mode = MOVE_READ;
+			if (src_priv->cpu_bo == NULL)
+				mode |= MOVE_INPLACE_HINT;
+
 			if (!sna_drawable_move_region_to_cpu(&src_pixmap->drawable,
-							     &region, MOVE_READ))
+							     &region, mode))
 				goto out;
+
 			RegionTranslate(&region, -src_dx, -src_dy);
 		}
 
commit 79d468925bb012806e097337e4e5930818c6ab46
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 17:54:21 2012 +0100

    sna: Decrease latency for 1x1 GetImage by using an inplace mapping
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3c17d75..85db919 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11982,8 +11982,9 @@ sna_get_image(DrawablePtr drawable,
 	      char *dst)
 {
 	RegionRec region;
+	unsigned int flags;
 
-	DBG(("%s (%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
+	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	region.extents.x1 = x + drawable->x;
 	region.extents.y1 = y + drawable->y;
@@ -11991,7 +11992,10 @@ sna_get_image(DrawablePtr drawable,
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
 
-	if (!sna_drawable_move_region_to_cpu(drawable, &region, MOVE_READ))
+	flags = MOVE_READ;
+	if ((w | h) == 1)
+		flags |= MOVE_INPLACE_HINT;
+	if (!sna_drawable_move_region_to_cpu(drawable, &region, flags))
 		return;
 
 	if (format == ZPixmap &&
commit 2c2a8d3780f1d8de3f13bee8e068fdaf608ff9e9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 23:19:49 2012 +0100

    sna: Allow reads to be performed inplace
    
    If we can guess that we will only readback the data once, then we can
    skip the copy into the shadow.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 12b4e18..3c17d75 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -975,7 +975,7 @@ static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags)
 	if ((flags & MOVE_INPLACE_HINT) == 0 || priv->gpu_bo == NULL)
 		return false;
 
-	if (kgem_bo_is_busy(priv->gpu_bo))
+	if (flags & MOVE_WRITE && kgem_bo_is_busy(priv->gpu_bo))
 		return false;
 
 	return priv->stride != 0;
@@ -1087,7 +1087,6 @@ skip_inplace_map:
 	if (operate_inplace(priv, flags) &&
 	    pixmap_inplace(sna, pixmap, priv) &&
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
-		assert(flags & MOVE_WRITE);
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
 		DBG(("%s: operate inplace\n", __FUNCTION__));
@@ -1097,13 +1096,15 @@ skip_inplace_map:
 		if (pixmap->devPrivate.ptr != NULL) {
 			priv->mapped = true;
 			pixmap->devKind = priv->gpu_bo->pitch;
-			sna_damage_all(&priv->gpu_damage,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height);
-			sna_damage_destroy(&priv->cpu_damage);
-			list_del(&priv->list);
-			priv->undamaged = false;
-			priv->clear = false;
+			if (flags & MOVE_WRITE) {
+				sna_damage_all(&priv->gpu_damage,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height);
+				sna_damage_destroy(&priv->cpu_damage);
+				list_del(&priv->list);
+				priv->undamaged = false;
+				priv->clear = false;
+			}
 			return true;
 		}
 
@@ -1470,7 +1471,6 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 	if (operate_inplace(priv, flags) &&
 	    region_inplace(sna, pixmap, region, priv)) {
-		assert(flags & MOVE_WRITE);
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
 		DBG(("%s: operate inplace\n", __FUNCTION__));
@@ -1480,7 +1480,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		if (pixmap->devPrivate.ptr != NULL) {
 			priv->mapped = true;
 			pixmap->devKind = priv->gpu_bo->pitch;
-			if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
+			if (flags & MOVE_WRITE &&
+			    !DAMAGE_IS_ALL(priv->gpu_damage)) {
 				sna_damage_add(&priv->gpu_damage, region);
 				if (sna_damage_is_all(&priv->gpu_damage,
 						      pixmap->drawable.width,
commit bc6997f6f751d3ba352dfc20c6717ec12b8fac47
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 17:54:15 2012 +0100

    sna: Cleanup damage processing after operating inplace
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dc97812..12b4e18 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -970,6 +970,17 @@ static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
 	return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
 }
 
+static inline bool operate_inplace(struct sna_pixmap *priv, unsigned flags)
+{
+	if ((flags & MOVE_INPLACE_HINT) == 0 || priv->gpu_bo == NULL)
+		return false;
+
+	if (kgem_bo_is_busy(priv->gpu_bo))
+		return false;
+
+	return priv->stride != 0;
+}
+
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -1073,9 +1084,7 @@ skip_inplace_map:
 
 	assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
 
-	if (flags & MOVE_INPLACE_HINT &&
-	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_is_busy(priv->gpu_bo) &&
+	if (operate_inplace(priv, flags) &&
 	    pixmap_inplace(sna, pixmap, priv) &&
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -1091,6 +1100,9 @@ skip_inplace_map:
 			sna_damage_all(&priv->gpu_damage,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height);
+			sna_damage_destroy(&priv->cpu_damage);
+			list_del(&priv->list);
+			priv->undamaged = false;
 			priv->clear = false;
 			return true;
 		}
@@ -1347,6 +1359,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	if (sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
+		DBG(("%s: pixmap=%ld all damaged on CPU\n",
+		     __FUNCTION__, pixmap->drawable.serialNumber));
+
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = false;
 
@@ -1453,11 +1468,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		}
 	}
 
-	if (flags & MOVE_INPLACE_HINT &&
-	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_is_busy(priv->gpu_bo) &&
-	    region_inplace(sna, pixmap, region, priv) &&
-	    sna_pixmap_move_area_to_gpu(pixmap, &region->extents, flags)) {
+	if (operate_inplace(priv, flags) &&
+	    region_inplace(sna, pixmap, region, priv)) {
 		assert(flags & MOVE_WRITE);
 		kgem_bo_submit(&sna->kgem, priv->gpu_bo);
 
@@ -1468,8 +1480,18 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		if (pixmap->devPrivate.ptr != NULL) {
 			priv->mapped = true;
 			pixmap->devKind = priv->gpu_bo->pitch;
-			if (!DAMAGE_IS_ALL(priv->gpu_damage))
+			if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 				sna_damage_add(&priv->gpu_damage, region);
+				if (sna_damage_is_all(&priv->gpu_damage,
+						      pixmap->drawable.width,
+						      pixmap->drawable.height)) {
+					DBG(("%s: replaced entire pixmap, destroying CPU shadow\n",
+					     __FUNCTION__));
+					sna_damage_destroy(&priv->cpu_damage);
+					priv->undamaged = false;
+					list_del(&priv->list);
+				}
+			}
 			priv->clear = false;
 			return true;
 		}
@@ -3943,12 +3965,21 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					goto fallback;
 
 				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
-					RegionTranslate(&region, dst_dx, dst_dy);
-					assert_pixmap_contains_box(dst_pixmap,
-								   RegionExtents(&region));
-					sna_damage_add(&dst_priv->gpu_damage,
-						       &region);
-					RegionTranslate(&region, -dst_dx, -dst_dy);
+					if (replaces) {
+						sna_damage_destroy(&dst_priv->cpu_damage);
+						sna_damage_all(&dst_priv->gpu_damage,
+							       dst_pixmap->drawable.width,
+							       dst_pixmap->drawable.height);
+						list_del(&dst_priv->list);
+						dst_priv->undamaged = false;
+					} else {
+						RegionTranslate(&region, dst_dx, dst_dy);
+						assert_pixmap_contains_box(dst_pixmap,
+									   RegionExtents(&region));
+						sna_damage_add(&dst_priv->gpu_damage,
+							       &region);
+						RegionTranslate(&region, -dst_dx, -dst_dy);
+					}
 				}
 			}
 		}
commit 937ca8a5d8a0f70a0724db1519bb7b5fc0857425
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 17:53:58 2012 +0100

    sna: Use memset for simple clears
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ed996df..dc97812 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2979,13 +2979,18 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		}
 
-		pixman_fill(pixmap->devPrivate.ptr,
-			    pixmap->devKind/sizeof(uint32_t),
-			    pixmap->drawable.bitsPerPixel,
-			    0, 0,
-			    pixmap->drawable.width,
-			    pixmap->drawable.height,
-			    priv->clear_color);
+		if (priv->clear_color = 0) {
+			memset(pixmap->devPrivate.ptr,
+			       0, pixmap->devKind * pixmap->drawable.height);
+		} else {
+			pixman_fill(pixmap->devPrivate.ptr,
+				    pixmap->devKind/sizeof(uint32_t),
+				    pixmap->drawable.bitsPerPixel,
+				    0, 0,
+				    pixmap->drawable.width,
+				    pixmap->drawable.height,
+				    priv->clear_color);
+		}
 
 		sna_damage_all(&priv->cpu_damage,
 			       pixmap->drawable.width,
commit de4572b0b52e2fcfcad04660ee2f81ee88d500a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 13:40:26 2012 +0100

    sna: Inspect CPU damaged state when deciding upon Composite placement
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index fc5bf8a..85453d3 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -410,6 +410,21 @@ static void apply_damage(struct sna_composite_op *op, RegionPtr region)
 	sna_damage_add(op->damage, region);
 }
 
+static inline bool use_cpu(PixmapPtr pixmap, struct sna_pixmap *priv,
+			   CARD8 op, INT16 width, INT16 height)
+{
+	if (too_small(priv))
+		return true;
+
+	if (DAMAGE_IS_ALL(priv->cpu_damage) &&
+	    (op > PictOpSrc ||
+	     width  < pixmap->drawable.width ||
+	     height < pixmap->drawable.height))
+		return true;
+
+	return false;
+}
+
 void
 sna_composite(CARD8 op,
 	      PicturePtr src,
@@ -481,8 +496,9 @@ sna_composite(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(priv) && !picture_is_gpu(src) && !picture_is_gpu(mask)) {
-		DBG(("%s: fallback due to too small\n", __FUNCTION__));
+	if (use_cpu(pixmap, priv, op, width, height) &&
+	    !picture_is_gpu(src) && !picture_is_gpu(mask)) {
+		DBG(("%s: fallback, dst is too small (or completely damaged)\n", __FUNCTION__));
 		goto fallback;
 	}
 
@@ -792,15 +808,10 @@ sna_composite_rectangles(CARD8		 op,
 		goto fallback;
 	}
 
-	if (too_small(priv)) {
-		DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
-		goto fallback;
-	}
-
-	if (DAMAGE_IS_ALL(priv->cpu_damage) &&
-	    (region.extents.x2 - region.extents.x1 < pixmap->drawable.width ||
-	     region.extents.y2 - region.extents.y1 < pixmap->drawable.height)) {
-		DBG(("%s: fallback due to completely damaged CPU\n", __FUNCTION__));
+	if (use_cpu(pixmap, priv, op,
+		    region.extents.x2 - region.extents.x1,
+		    region.extents.y2 - region.extents.y1)) {
+		DBG(("%s: fallback, dst is too small (or completely damaged)\n", __FUNCTION__));
 		goto fallback;
 	}
 
commit b689cd924c500373e1e293dd9eb54a238e400381
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 16:25:51 2012 +0100

    sna: Composite traps inplace if the CPU is already all-damaged
    
    One outcome is that inspecting the usage patterns afterwards indicated
    that we were missing an opportunity to reduce unaligned boxes to an
    inplace operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index ab1cd39..fc5bf8a 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -207,26 +207,28 @@ sna_compute_composite_region(RegionPtr region,
 	}
 
 	/* clip against src */
-	if (src->pDrawable) {
-		src_x += src->pDrawable->x;
-		src_y += src->pDrawable->y;
-	}
-	if (!clip_to_src(region, src, dst_x - src_x, dst_y - src_y)) {
-		pixman_region_fini (region);
-		return FALSE;
-	}
-	DBG(("%s: clip against src: (%d, %d), (%d, %d)\n",
-	     __FUNCTION__,
-	     region->extents.x1, region->extents.y1,
-	     region->extents.x2, region->extents.y2));
-
-	if (src->alphaMap) {
-		if (!clip_to_src(region, src->alphaMap,
-				 dst_x - (src_x - src->alphaOrigin.x),
-				 dst_y - (src_y - src->alphaOrigin.y))) {
-			pixman_region_fini(region);
+	if (src) {
+		if (src->pDrawable) {
+			src_x += src->pDrawable->x;
+			src_y += src->pDrawable->y;
+		}
+		if (!clip_to_src(region, src, dst_x - src_x, dst_y - src_y)) {
+			pixman_region_fini (region);
 			return FALSE;
 		}
+		DBG(("%s: clip against src: (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     region->extents.x1, region->extents.y1,
+		     region->extents.x2, region->extents.y2));
+
+		if (src->alphaMap) {
+			if (!clip_to_src(region, src->alphaMap,
+					 dst_x - (src_x - src->alphaOrigin.x),
+					 dst_y - (src_y - src->alphaOrigin.y))) {
+				pixman_region_fini(region);
+				return FALSE;
+			}
+		}
 	}
 
 	/* clip against mask */
@@ -361,7 +363,8 @@ sna_compute_composite_extents(BoxPtr extents,
 	     extents->x1, extents->y1,
 	     extents->x2, extents->y2));
 
-	trim_source_extents(extents, src, dst_x - src_x, dst_y - src_y);
+	if (src)
+		trim_source_extents(extents, src, dst_x - src_x, dst_y - src_y);
 	if (mask)
 		trim_source_extents(extents, mask,
 				    dst_x - mask_x, dst_y - mask_y);
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index a3bdb16..61b4fb1 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2541,9 +2541,10 @@ composite_aligned_boxes(struct sna *sna,
 			PicturePtr dst,
 			PictFormatPtr maskFormat,
 			INT16 src_x, INT16 src_y,
-			int ntrap, xTrapezoid *traps)
+			int ntrap, xTrapezoid *traps,
+			bool force_fallback)
 {
-	BoxRec stack_boxes[64], *boxes, extents;
+	BoxRec stack_boxes[64], *boxes;
 	pixman_region16_t region, clip;
 	struct sna_composite_op tmp;
 	Bool ret = true;
@@ -2564,8 +2565,8 @@ composite_aligned_boxes(struct sna *sna,
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
 
-	extents.x1 = extents.y1 = 32767;
-	extents.x2 = extents.y2 = -32767;
+	region.extents.x1 = region.extents.y1 = 32767;
+	region.extents.x2 = region.extents.y2 = -32767;
 	num_boxes = 0;
 	for (n = 0; n < ntrap; n++) {
 		boxes[num_boxes].x1 = dx + pixman_fixed_to_int(traps[n].left.p1.x + pixman_fixed_1_minus_e/2);
@@ -2578,15 +2579,15 @@ composite_aligned_boxes(struct sna *sna,
 		if (boxes[num_boxes].y1 >= boxes[num_boxes].y2)
 			continue;
 
-		if (boxes[num_boxes].x1 < extents.x1)
-			extents.x1 = boxes[num_boxes].x1;
-		if (boxes[num_boxes].x2 > extents.x2)
-			extents.x2 = boxes[num_boxes].x2;
+		if (boxes[num_boxes].x1 < region.extents.x1)
+			region.extents.x1 = boxes[num_boxes].x1;
+		if (boxes[num_boxes].x2 > region.extents.x2)
+			region.extents.x2 = boxes[num_boxes].x2;
 
-		if (boxes[num_boxes].y1 < extents.y1)
-			extents.y1 = boxes[num_boxes].y1;
-		if (boxes[num_boxes].y2 > extents.y2)
-			extents.y2 = boxes[num_boxes].y2;
+		if (boxes[num_boxes].y1 < region.extents.y1)
+			region.extents.y1 = boxes[num_boxes].y1;
+		if (boxes[num_boxes].y2 > region.extents.y2)
+			region.extents.y2 = boxes[num_boxes].y2;
 
 		num_boxes++;
 	}
@@ -2596,37 +2597,96 @@ composite_aligned_boxes(struct sna *sna,
 
 	DBG(("%s: extents (%d, %d), (%d, %d) offset of (%d, %d)\n",
 	     __FUNCTION__,
-	     extents.x1, extents.y1,
-	     extents.x2, extents.y2,
-	     extents.x1 - boxes[0].x1,
-	     extents.y1 - boxes[0].y1));
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2,
+	     region.extents.x1 - boxes[0].x1,
+	     region.extents.y1 - boxes[0].y1));
 
-	src_x += extents.x1 - boxes[0].x1;
-	src_y += extents.y1 - boxes[0].y1;
+	src_x += region.extents.x1 - boxes[0].x1;
+	src_y += region.extents.y1 - boxes[0].y1;
 
 	if (!sna_compute_composite_region(&clip,
 					  src, NULL, dst,
 					  src_x,  src_y,
 					  0, 0,
-					  extents.x1 - dx, extents.y1 - dy,
-					  extents.x2 - extents.x1,
-					  extents.y2 - extents.y1)) {
+					  region.extents.x1 - dx, region.extents.y1 - dy,
+					  region.extents.x2 - region.extents.x1,
+					  region.extents.y2 - region.extents.y1)) {
 		DBG(("%s: trapezoids do not intersect drawable clips\n",
 		     __FUNCTION__)) ;
 		goto done;
 	}
 
-	memset(&tmp, 0, sizeof(tmp));
-	if (!sna->render.composite(sna, op, src, NULL, dst,
+	if (force_fallback ||
+	    !sna->render.composite(sna, op, src, NULL, dst,
 				   src_x,  src_y,
 				   0, 0,
-				   extents.x1,  extents.y1,
-				   extents.x2 - extents.x1,
-				   extents.y2 - extents.y1,
-				   &tmp)) {
+				   clip.extents.x1,  clip.extents.y1,
+				   clip.extents.x2 - clip.extents.x1,
+				   clip.extents.y2 - clip.extents.y1,
+				   memset(&tmp, 0, sizeof(tmp)))) {
+		unsigned int flags;
+		pixman_box16_t *b;
+		int i, count;
+
 		DBG(("%s: composite render op not supported\n",
 		     __FUNCTION__));
-		ret = false;
+
+		flags = MOVE_READ | MOVE_WRITE;
+		if (n == 1 && op <= PictOpSrc)
+			flags = MOVE_WRITE | MOVE_INPLACE_HINT;
+
+		if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &clip, flags))
+			goto done;
+		if (dst->alphaMap  &&
+		    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
+					      MOVE_READ | MOVE_WRITE))
+			goto done;
+		if (src->pDrawable) {
+			if (!sna_drawable_move_to_cpu(src->pDrawable,
+						      MOVE_READ))
+				goto done;
+			if (src->alphaMap &&
+			    !sna_drawable_move_to_cpu(src->alphaMap->pDrawable,
+						      MOVE_READ))
+				goto done;
+		}
+
+		DBG(("%s: fbComposite()\n", __FUNCTION__));
+		if (maskFormat) {
+			pixman_region_init_rects(&region, boxes, num_boxes);
+			RegionIntersect(&region, &region, &clip);
+
+			b = REGION_RECTS(&region);
+			count = REGION_NUM_RECTS(&region);
+			for (i = 0; i < count; i++) {
+				fbComposite(op, src, NULL, dst,
+					    src_x + b[i].x1 - boxes[0].x1,
+					    src_y + b[i].y1 - boxes[0].y1,
+					    0, 0,
+					    b[i].x1, b[i].y1,
+					    b[i].x2 - b[i].x1, b[i].y2 - b[i].y1);
+			}
+			pixman_region_fini(&region);
+		} else {
+			for (n = 0; n < num_boxes; n++) {
+				pixman_region_init_rects(&region, &boxes[n], 1);
+				RegionIntersect(&region, &region, &clip);
+				b = REGION_RECTS(&region);
+				count = REGION_NUM_RECTS(&region);
+				for (i = 0; i < count; i++) {
+					fbComposite(op, src, NULL, dst,
+						    src_x + b[i].x1 - boxes[0].x1,
+						    src_y + b[i].y1 - boxes[0].y1,
+						    0, 0,
+						    b[i].x1, b[i].y1,
+						    b[i].x2 - b[i].x1, b[i].y2 - b[i].y1);
+				}
+				pixman_region_fini(&region);
+				pixman_region_fini(&region);
+			}
+		}
+		ret = true;
 		goto done;
 	}
 
@@ -2877,15 +2937,17 @@ blt_unaligned_box_row(PixmapPtr scratch,
 			    y1, y2,
 			    covered * (grid_coverage(SAMPLES_X, trap->right.p1.x) - grid_coverage(SAMPLES_X, trap->left.p1.x)));
 	} else {
-		if (pixman_fixed_frac(trap->left.p1.x))
+		if (pixman_fixed_frac(trap->left.p1.x)) {
 			blt_opacity(scratch,
-				    x1, x1+1,
+				    x1, x1,
 				    y1, y2,
 				    covered * (SAMPLES_X - grid_coverage(SAMPLES_X, trap->left.p1.x)));
+			x1++;
+		}
 
-		if (x2 > x1 + 1) {
+		if (x2 > x1) {
 			blt_opacity(scratch,
-				    x1 + 1, x2,
+				    x1, x2,
 				    y1, y2,
 				    covered*SAMPLES_X);
 		}
@@ -2898,20 +2960,581 @@ blt_unaligned_box_row(PixmapPtr scratch,
 	}
 }
 
-static Bool
+#define ONE_HALF 0x7f
+#define RB_MASK 0x00ff00ff
+#define RB_ONE_HALF 0x007f007f
+#define RB_MASK_PLUS_ONE 0x01000100
+#define G_SHIFT 8
+
+static force_inline uint32_t
+mul8x2_8 (uint32_t a, uint8_t b)
+{
+	uint32_t t = (a & RB_MASK) * b + RB_ONE_HALF;
+	return ((t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT) & RB_MASK;
+}
+
+static force_inline uint32_t
+add8x2_8x2(uint32_t a, uint32_t b)
+{
+	uint32_t t = a + b;
+	t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK);
+	return t & RB_MASK;
+}
+
+static force_inline uint32_t
+lerp8x4(uint32_t src, uint8_t a, uint32_t dst)
+{
+	return (add8x2_8x2(mul8x2_8(src, a),
+			   mul8x2_8(dst, ~a)) |
+		add8x2_8x2(mul8x2_8(src >> G_SHIFT, a),
+			   mul8x2_8(dst >> G_SHIFT, ~a)) << G_SHIFT);
+}
+
+inline static void
+lerp32_opacity(PixmapPtr scratch,
+	       uint32_t color,
+	       int16_t x, int16_t w,
+	       int16_t y, int16_t h,
+	       uint8_t opacity)
+{
+	uint32_t *ptr;
+	int stride, i;
+
+	ptr = (uint32_t*)((uint8_t *)scratch->devPrivate.ptr + scratch->devKind * y);
+	ptr += x;
+	stride = scratch->devKind / 4;
+
+	if (opacity == 0xff) {
+		if ((w | h) == 1) {
+			*ptr = color;
+		} else {
+			if (w < 16) {
+				do {
+					for (i = 0; i < w; i++)
+						ptr[i] = color;
+					ptr += stride;
+				} while (--h);
+			} else {
+				pixman_fill(ptr, stride, 32,
+					    0, 0, w, h, color);
+			}
+		}
+	} else {
+		if ((w | h) == 1) {
+			*ptr = lerp8x4(color, opacity, *ptr);
+		} else if (w == 1) {
+			do {
+				*ptr = lerp8x4(color, opacity, *ptr);
+				ptr += stride;
+			} while (--h);
+		} else{
+			do {
+				for (i = 0; i < w; i++)
+					ptr[i] = lerp8x4(color, opacity, ptr[i]);
+				ptr += stride;
+			} while (--h);
+		}
+	}
+}
+
+static void
+lerp32_unaligned_box_row(PixmapPtr scratch, uint32_t color,
+			 const BoxRec *extents,
+			 xTrapezoid *trap, int16_t dx,
+			 int16_t y, int16_t h,
+			 uint8_t covered)
+{
+	int16_t x1 = pixman_fixed_to_int(trap->left.p1.x) + dx;
+	int16_t fx1 = grid_coverage(SAMPLES_X, trap->left.p1.x);
+	int16_t x2 = pixman_fixed_to_int(trap->right.p1.x) + dx;
+	int16_t fx2 = grid_coverage(SAMPLES_X, trap->right.p1.x);
+
+	if (x1 < extents->x1)
+		x1 = extents->x1, fx1 = 0;
+	if (x2 > extents->x2)
+		x2 = extents->x2, fx2 = 0;
+
+	if (x2 < x1) {
+		if (fx1) {
+			lerp32_opacity(scratch, color,
+				       x1, 1,
+				       y, h,
+				       covered * (SAMPLES_X - fx1));
+			x1++;
+		}
+
+		if (x2 > x1) {
+			lerp32_opacity(scratch, color,
+				       x1, x2-x1,
+				       y, h,
+				       covered*SAMPLES_X);
+		}
+
+		if (fx2) {
+			lerp32_opacity(scratch, color,
+				       x2, 1,
+				       y, h,
+				       covered * fx2);
+		}
+	} else if (x1 == x2 && fx2 > fx1) {
+		lerp32_opacity(scratch, color,
+			       x1, 1,
+			       y, h,
+			       covered * (fx2 - fx1));
+	}
+}
+
+struct pixman_inplace {
+	pixman_image_t *image, *source, *mask;
+	uint32_t color;
+	uint32_t *bits;
+	int dx, dy;
+	int sx, sy;
+	uint8_t op;
+};
+
+static force_inline uint8_t
+mul_8_8(uint8_t a, uint8_t b)
+{
+    uint16_t t = a * (uint16_t)b + 0x7f;
+    return ((t >> 8) + t) >> 8;
+}
+
+static inline uint32_t multa(uint32_t s, uint8_t a, int shift)
+{
+	return mul_8_8((s >> shift) & 0xff, a) << shift;
+}
+
+static inline uint32_t mul_4x8_8(uint32_t color, uint8_t alpha)
+{
+	uint32_t v;
+
+	v = multa(color, alpha, 24);
+	v |= multa(color, alpha, 16);
+	v |= multa(color, alpha, 8);
+	v |= multa(color, alpha, 0);
+
+	return v;
+}
+
+inline static void
+pixsolid_opacity(struct pixman_inplace *pi,
+		 int16_t x, int16_t w,
+		 int16_t y, int16_t h,
+		 uint8_t opacity)
+{
+	if (opacity == 0xff)
+		*pi->bits = pi->color;
+	else
+		*pi->bits = mul_4x8_8(pi->color, opacity);
+	pixman_image_composite(pi->op, pi->source, NULL, pi->image,
+			       0, 0, 0, 0, pi->dx + x, pi->dy + y, w, h);
+}
+
+static void
+pixsolid_unaligned_box_row(struct pixman_inplace *pi,
+			   const BoxRec *extents,
+			   xTrapezoid *trap,
+			   int16_t y, int16_t h,
+			   uint8_t covered)
+{
+	int16_t x1 = pixman_fixed_to_int(trap->left.p1.x);
+	int16_t fx1 = grid_coverage(SAMPLES_X, trap->left.p1.x);
+	int16_t x2 = pixman_fixed_to_int(trap->right.p1.x);
+	int16_t fx2 = grid_coverage(SAMPLES_X, trap->right.p1.x);
+
+	if (x1 < extents->x1)
+		x1 = extents->x1, fx1 = 0;
+	if (x2 > extents->x2)
+		x2 = extents->x2, fx2 = 0;
+
+	if (x2 < x1) {
+		if (fx1) {
+			pixsolid_opacity(pi, x1, 1, y, h,
+					 covered * (SAMPLES_X - fx1));
+			x1++;
+		}
+
+		if (x2 > x1)
+			pixsolid_opacity(pi, x1, x2-x1, y, h, covered*SAMPLES_X);
+
+		if (fx2)
+			pixsolid_opacity(pi, x2, 1, y, h, covered * fx2);
+	} else if (x1 == x2 && fx2 > fx1) {
+		pixsolid_opacity(pi, x1, 1, y, h, covered * (fx2 - fx1));
+	}
+}
+
+static bool
+composite_unaligned_boxes_inplace__solid(CARD8 op, uint32_t color,
+					 PicturePtr dst, int n, xTrapezoid *t,
+					 bool force_fallback)
+{
+	PixmapPtr pixmap;
+	int16_t dx, dy;
+
+	if (!force_fallback && is_gpu(dst->pDrawable)) {
+		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
+		     __FUNCTION__));
+
+		return false;
+	}
+
+	/* XXX a8 boxes */
+	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8) {
+		DBG(("%s: fallback -- can not perform operation in place, unhanbled format %08lx\n",
+		     __FUNCTION__, dst->format));
+		goto pixman;
+	}
+
+	pixmap = get_drawable_pixmap(dst->pDrawable);
+	get_drawable_deltas(dst->pDrawable, pixmap, &dx, &dy);
+
+	if (op == PictOpOver && (color >> 24) == 0xff)
+		op = PictOpSrc;
+	if (op == PictOpOver) {
+		struct sna_pixmap *priv = sna_pixmap(pixmap);
+		if (priv && priv->clear && priv->clear_color == 0)
+			op = PictOpSrc;
+	}
+
+	switch (op) {
+	case PictOpSrc:
+		break;
+	default:
+		DBG(("%s: fallback -- can not perform op [%d] in place\n",
+		     __FUNCTION__, op));
+		goto pixman;
+	}
+
+	do {
+		RegionRec clip;
+		BoxPtr extents;
+		int count;
+
+		clip.extents.x1 = pixman_fixed_to_int(t->left.p1.x);
+		clip.extents.x2 = pixman_fixed_to_int(t->right.p1.x + pixman_fixed_1_minus_e);
+		clip.extents.y1 = pixman_fixed_to_int(t->top);
+		clip.extents.y2 = pixman_fixed_to_int(t->bottom + pixman_fixed_1_minus_e);
+		clip.data = NULL;
+
+		if (!sna_compute_composite_region(&clip,
+						   NULL, NULL, dst,
+						   0, 0,
+						   0, 0,
+						   clip.extents.x1, clip.extents.y1,
+						   clip.extents.x2 - clip.extents.x1,
+						   clip.extents.y2 - clip.extents.y1))
+			continue;
+
+		if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &clip,
+						     MOVE_WRITE | MOVE_READ)) {
+			RegionUninit(&clip);
+			continue;
+		}
+
+		RegionTranslate(&clip, dx, dy);
+		count = REGION_NUM_RECTS(&clip);
+		extents = REGION_RECTS(&clip);
+		while (count--) {
+			int16_t y1 = dy + pixman_fixed_to_int(t->top);
+			int16_t fy1 = pixman_fixed_frac(t->top);
+			int16_t y2 = dy + pixman_fixed_to_int(t->bottom);
+			int16_t fy2 = pixman_fixed_frac(t->bottom);
+
+			if (y1 < extents->y1)
+				y1 = extents->y1, fy1 = 0;
+			if (y2 > extents->y2)
+				y2 = extents->y2, fy2 = 0;
+			if (y1 < y2) {
+				if (fy1) {
+					lerp32_unaligned_box_row(pixmap, color, extents,
+								 t, dx, y1, 1,
+								 SAMPLES_Y - grid_coverage(SAMPLES_Y, fy1));
+					y1++;
+				}
+
+				if (y2 > y1)
+					lerp32_unaligned_box_row(pixmap, color, extents,
+								 t, dx, y1, y2 - y1,
+								 SAMPLES_Y);
+
+				if (fy2)
+					lerp32_unaligned_box_row(pixmap, color,  extents,
+								 t, dx, y2, 1,
+								 grid_coverage(SAMPLES_Y, fy2));
+			} else if (y1 == y2 && fy2 > fy1) {
+				lerp32_unaligned_box_row(pixmap, color, extents,
+							 t, dx, y1, 1,
+							 grid_coverage(SAMPLES_Y, fy2) - grid_coverage(SAMPLES_Y, fy1));
+			}
+			extents++;
+		}
+
+		RegionUninit(&clip);
+	} while (--n && t++);
+
+	return true;
+
+pixman:
+	do {
+		struct pixman_inplace pi;
+		RegionRec clip;
+		BoxPtr extents;
+		int count;
+
+		clip.extents.x1 = pixman_fixed_to_int(t->left.p1.x);
+		clip.extents.x2 = pixman_fixed_to_int(t->right.p1.x + pixman_fixed_1_minus_e);
+		clip.extents.y1 = pixman_fixed_to_int(t->top);
+		clip.extents.y2 = pixman_fixed_to_int(t->bottom + pixman_fixed_1_minus_e);
+		clip.data = NULL;
+
+		if (!sna_compute_composite_region(&clip,
+						   NULL, NULL, dst,
+						   0, 0,
+						   0, 0,
+						   clip.extents.x1, clip.extents.y1,
+						   clip.extents.x2 - clip.extents.x1,
+						   clip.extents.y2 - clip.extents.y1))
+			continue;
+
+		if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &clip,
+						     MOVE_WRITE | MOVE_READ)) {
+			RegionUninit(&clip);
+			continue;
+		}
+
+		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
+		pi.source = pixman_image_create_bits(PIXMAN_a8r8g8b8, 1, 1, NULL, 0);
+		pixman_image_set_repeat(pi.source, PIXMAN_REPEAT_NORMAL);
+		pi.bits = pixman_image_get_data(pi.source);
+		pi.op = op;
+
+		count = REGION_NUM_RECTS(&clip);
+		extents = REGION_RECTS(&clip);
+		while (count--) {
+			int16_t y1 = pixman_fixed_to_int(t->top);
+			int16_t fy1 = pixman_fixed_frac(t->top);
+			int16_t y2 = pixman_fixed_to_int(t->bottom);
+			int16_t fy2 = pixman_fixed_frac(t->bottom);
+
+			if (y1 < extents->y1)
+				y1 = extents->y1, fy1 = 0;
+			if (y2 > extents->y2)
+				y2 = extents->y2, fy2 = 0;
+			if (y1 < y2) {
+				if (fy1) {
+					pixsolid_unaligned_box_row(&pi, extents, t, y1, 1,
+								   SAMPLES_Y - grid_coverage(SAMPLES_Y, fy1));
+					y1++;
+				}
+
+				if (y2 > y1)
+					pixsolid_unaligned_box_row(&pi, extents, t, y1, y2 - y1,
+								   SAMPLES_Y);
+
+				if (fy2)
+					pixsolid_unaligned_box_row(&pi, extents, t, y2, 1,
+								   grid_coverage(SAMPLES_Y, fy2));
+			} else if (y1 == y2 && fy2 > fy1) {
+				pixsolid_unaligned_box_row(&pi, extents, t, y1, 1,
+							   grid_coverage(SAMPLES_Y, fy2) - grid_coverage(SAMPLES_Y, fy1));
+			}
+			extents++;
+		}
+
+		RegionUninit(&clip);
+		pixman_image_unref(pi.image);
+		pixman_image_unref(pi.source);
+	} while (--n && t++);
+	return true;
+}
+
+inline static void
+pixmask_opacity(struct pixman_inplace *pi,
+		int16_t x, int16_t w,
+		int16_t y, int16_t h,
+		uint8_t opacity)
+{
+	if (opacity == 0xff) {
+		pixman_image_composite(pi->op, pi->source, NULL, pi->image,
+				       pi->sx + x, pi->sy + y,
+				       0, 0,
+				       pi->dx + x, pi->dy + y,
+				       w, h);
+	} else {
+		*pi->bits = opacity;
+		pixman_image_composite(pi->op, pi->source, pi->mask, pi->image,
+				       pi->sx + x, pi->sy + y,
+				       0, 0,
+				       pi->dx + x, pi->dy + y,
+				       w, h);
+	}
+}
+
+static void
+pixmask_unaligned_box_row(struct pixman_inplace *pi,
+			  const BoxRec *extents,
+			  xTrapezoid *trap,
+			  int16_t y, int16_t h,
+			  uint8_t covered)
+{
+	int16_t x1 = pixman_fixed_to_int(trap->left.p1.x);
+	int16_t fx1 = grid_coverage(SAMPLES_X, trap->left.p1.x);
+	int16_t x2 = pixman_fixed_to_int(trap->right.p1.x);
+	int16_t fx2 = grid_coverage(SAMPLES_X, trap->right.p1.x);
+
+	if (x1 < extents->x1)
+		x1 = extents->x1, fx1 = 0;
+	if (x2 > extents->x2)
+		x2 = extents->x2, fx2 = 0;
+
+	if (x2 < x1) {
+		if (fx1) {
+			pixmask_opacity(pi, x1, 1, y, h,
+					 covered * (SAMPLES_X - fx1));
+			x1++;
+		}
+
+		if (x2 > x1)
+			pixmask_opacity(pi, x1, x2-x1, y, h, covered*SAMPLES_X);
+
+		if (fx2)
+			pixmask_opacity(pi, x2, 1, y, h, covered * fx2);
+	} else if (x1 == x2 && fx2 > fx1) {
+		pixmask_opacity(pi, x1, 1, y, h, covered * (fx2 - fx1));
+	}
+}
+
+static bool
+composite_unaligned_boxes_inplace(CARD8 op,
+				  PicturePtr src, int16_t src_x, int16_t src_y,
+				  PicturePtr dst, int n, xTrapezoid *t,
+				  bool force_fallback)
+{
+	PixmapPtr pixmap;
+	int16_t dx, dy;
+
+	if (!force_fallback && is_gpu(dst->pDrawable)) {
+		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
+		     __FUNCTION__));
+
+		return false;
+	}
+
+	src_x -= pixman_fixed_to_int(t[0].left.p1.x);
+	src_y -= pixman_fixed_to_int(t[0].left.p1.y);
+
+	pixmap = get_drawable_pixmap(dst->pDrawable);
+	get_drawable_deltas(dst->pDrawable, pixmap, &dx, &dy);
+
+	do {
+		struct pixman_inplace pi;
+		RegionRec clip;
+		BoxPtr extents;
+		int count;
+
+		clip.extents.x1 = pixman_fixed_to_int(t->left.p1.x);
+		clip.extents.x2 = pixman_fixed_to_int(t->right.p1.x + pixman_fixed_1_minus_e);
+		clip.extents.y1 = pixman_fixed_to_int(t->top);
+		clip.extents.y2 = pixman_fixed_to_int(t->bottom + pixman_fixed_1_minus_e);
+		clip.data = NULL;
+
+		if (!sna_compute_composite_region(&clip,
+						   NULL, NULL, dst,
+						   0, 0,
+						   0, 0,
+						   clip.extents.x1, clip.extents.y1,
+						   clip.extents.x2 - clip.extents.x1,
+						   clip.extents.y2 - clip.extents.y1))
+			continue;
+
+		if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &clip,
+						     MOVE_WRITE | MOVE_READ)) {
+			RegionUninit(&clip);
+			continue;
+		}
+
+		pi.image = image_from_pict(dst, FALSE, &pi.dx, &pi.dy);
+		pi.source = image_from_pict(src, TRUE, &pi.sx, &pi.sy);
+		pi.sx += src_x;
+		pi.sy += src_y;
+		pi.mask = pixman_image_create_bits(PIXMAN_a8, 1, 1, NULL, 0);
+		pixman_image_set_repeat(pi.mask, PIXMAN_REPEAT_NORMAL);
+		pi.bits = pixman_image_get_data(pi.mask);
+		pi.op = op;
+
+		count = REGION_NUM_RECTS(&clip);
+		extents = REGION_RECTS(&clip);
+		while (count--) {
+			int16_t y1 = pixman_fixed_to_int(t->top);
+			int16_t fy1 = pixman_fixed_frac(t->top);
+			int16_t y2 = pixman_fixed_to_int(t->bottom);
+			int16_t fy2 = pixman_fixed_frac(t->bottom);
+
+			if (y1 < extents->y1)
+				y1 = extents->y1, fy1 = 0;
+			if (y2 > extents->y2)
+				y2 = extents->y2, fy2 = 0;
+			if (y1 < y2) {
+				if (fy1) {
+					pixmask_unaligned_box_row(&pi, extents, t, y1, 1,
+								   SAMPLES_Y - grid_coverage(SAMPLES_Y, fy1));
+					y1++;
+				}
+
+				if (y2 > y1)
+					pixmask_unaligned_box_row(&pi, extents, t, y1, y2 - y1,
+								   SAMPLES_Y);
+
+				if (fy2)
+					pixmask_unaligned_box_row(&pi, extents, t, y2, 1,
+								   grid_coverage(SAMPLES_Y, fy2));
+			} else if (y1 == y2 && fy2 > fy1) {
+				pixmask_unaligned_box_row(&pi, extents, t, y1, 1,
+							  grid_coverage(SAMPLES_Y, fy2) - grid_coverage(SAMPLES_Y, fy1));
+			}
+			extents++;
+		}
+
+		RegionUninit(&clip);
+		pixman_image_unref(pi.image);
+		pixman_image_unref(pi.source);
+		pixman_image_unref(pi.mask);
+	} while (--n && t++);
+
+	return true;
+}
+
+static bool
 composite_unaligned_boxes_fallback(CARD8 op,
 				   PicturePtr src,
 				   PicturePtr dst,
 				   INT16 src_x, INT16 src_y,
-				   int ntrap, xTrapezoid *traps)
+				   int ntrap, xTrapezoid *traps,
+				   bool force_fallback)
 {
 	ScreenPtr screen = dst->pDrawable->pScreen;
-	INT16 dst_x = pixman_fixed_to_int(traps[0].left.p1.x);
-	INT16 dst_y = pixman_fixed_to_int(traps[0].left.p1.y);
-	int dx = dst->pDrawable->x;
-	int dy = dst->pDrawable->y;
+	uint32_t color;
+	int16_t dst_x, dst_y;
+	int16_t dx, dy;
 	int n;
 
+	if (sna_picture_is_solid(src, &color) &&
+	    composite_unaligned_boxes_inplace__solid(op, color, dst,
+						     ntrap, traps,
+						     force_fallback))
+		return true;
+
+	if (composite_unaligned_boxes_inplace(op, src, src_x, src_y,
+					      dst, ntrap, traps,
+					      force_fallback))
+		return true;
+
+	dst_x = pixman_fixed_to_int(traps[0].left.p1.x);
+	dst_y = pixman_fixed_to_int(traps[0].left.p1.y);
+	dx = dst->pDrawable->x;
+	dy = dst->pDrawable->y;
 	for (n = 0; n < ntrap; n++) {
 		xTrapezoid *t = &traps[n];
 		PixmapPtr scratch;
@@ -2934,10 +3557,16 @@ composite_unaligned_boxes_fallback(CARD8 op,
 						   extents.y2 - extents.y1))
 			continue;
 
-		scratch = sna_pixmap_create_upload(screen,
-						   extents.x2 - extents.x1,
-						   extents.y2 - extents.y1,
-						   8, KGEM_BUFFER_WRITE);
+		if (force_fallback)
+			scratch = sna_pixmap_create_unattached(screen,
+							       extents.x2 - extents.x1,
+							       extents.y2 - extents.y1,
+							       8);
+		else
+			scratch = sna_pixmap_create_upload(screen,
+							   extents.x2 - extents.x1,
+							   extents.y2 - extents.y1,
+							   8, KGEM_BUFFER_WRITE);
 		if (!scratch)
 			continue;
 
@@ -2956,12 +3585,14 @@ composite_unaligned_boxes_fallback(CARD8 op,
 			blt_unaligned_box_row(scratch, &extents, t, y1, y1 + 1,
 					      grid_coverage(SAMPLES_Y, t->bottom) - grid_coverage(SAMPLES_Y, t->top));
 		} else {
-			if (pixman_fixed_frac(t->top))
+			if (pixman_fixed_frac(t->top)) {
 				blt_unaligned_box_row(scratch, &extents, t, y1, y1 + 1,
 						      SAMPLES_Y - grid_coverage(SAMPLES_Y, t->top));
+				y1++;
+			}
 
-			if (y2 > y1 + 1)
-				blt_unaligned_box_row(scratch, &extents, t, y1+1, y2,
+			if (y2 > y1)
+				blt_unaligned_box_row(scratch, &extents, t, y1, y2,
 						      SAMPLES_Y);
 
 			if (pixman_fixed_frac(t->bottom))
@@ -2985,17 +3616,18 @@ composite_unaligned_boxes_fallback(CARD8 op,
 		}
 	}
 
-	return TRUE;
+	return true;
 }
 
-static Bool
+static bool
 composite_unaligned_boxes(struct sna *sna,
 			  CARD8 op,
 			  PicturePtr src,
 			  PicturePtr dst,
 			  PictFormatPtr maskFormat,
 			  INT16 src_x, INT16 src_y,
-			  int ntrap, xTrapezoid *traps)
+			  int ntrap, xTrapezoid *traps,
+			  bool force_fallback)
 {
 	BoxRec extents;
 	struct sna_composite_spans_op tmp;
@@ -3013,9 +3645,13 @@ composite_unaligned_boxes(struct sna *sna,
 	if (ntrap > 1 && maskFormat)
 		return false;
 
-	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
-	if (priv == NULL || !sna->render.composite_spans)
-		return composite_unaligned_boxes_fallback(op, src, dst, src_x, src_y, ntrap, traps);
+	if (force_fallback || !sna->render.composite_spans) {
+fallback:
+		return composite_unaligned_boxes_fallback(op, src, dst,
+							  src_x, src_y,
+							  ntrap, traps,
+							  force_fallback);
+	}
 
 	dst_x = extents.x1 = pixman_fixed_to_int(traps[0].left.p1.x);
 	extents.x2 = pixman_fixed_to_int(traps[0].right.p1.x + pixman_fixed_1_minus_e);
@@ -3077,10 +3713,14 @@ composite_unaligned_boxes(struct sna *sna,
 	switch (op) {
 	case PictOpAdd:
 	case PictOpOver:
+		priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
+		assert(priv != NULL);
 		if (priv->clear && priv->clear_color == 0)
 			op = PictOpSrc;
 		break;
 	case PictOpIn:
+		priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
+		assert(priv != NULL);
 		if (priv->clear && priv->clear_color == 0)
 			return true;
 		break;
@@ -3097,13 +3737,13 @@ composite_unaligned_boxes(struct sna *sna,
 					 &tmp)) {
 		DBG(("%s: composite spans render op not supported\n",
 		     __FUNCTION__));
-		return false;
+		REGION_UNINIT(NULL, &clip);
+		goto fallback;
 	}
 
 	for (n = 0; n < ntrap; n++)
 		composite_unaligned_trap(sna, &tmp, &traps[n], dx, dy, c);
 	tmp.done(sna, &tmp);
-
 	REGION_UNINIT(NULL, &clip);
 	return true;
 }
@@ -3629,13 +4269,6 @@ struct inplace {
 	};
 };
 
-static force_inline uint8_t
-mul_8_8(uint8_t a, uint8_t b)
-{
-    uint16_t t = a * (uint16_t)b + 0x7f;
-    return ((t >> 8) + t) >> 8;
-}
-
 static force_inline uint8_t coverage_opacity(int coverage, uint8_t opacity)
 {
 	coverage = coverage * 256 / FAST_SAMPLES_XY;
@@ -3798,36 +4431,6 @@ tor_blt_add_clipped(struct sna *sna,
 	pixman_region_fini(&region);
 }
 
-#define ONE_HALF 0x7f
-#define RB_MASK 0x00ff00ff
-#define RB_ONE_HALF 0x007f007f
-#define RB_MASK_PLUS_ONE 0x01000100
-#define G_SHIFT 8
-
-static force_inline uint32_t
-mul8x2_8 (uint32_t a, uint8_t b)
-{
-	uint32_t t = (a & RB_MASK) * b + RB_ONE_HALF;
-	return ((t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT) & RB_MASK;
-}
-
-static force_inline uint32_t
-add8x2_8x2(uint32_t a, uint32_t b)
-{
-	uint32_t t = a + b;
-	t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK);
-	return t & RB_MASK;
-}
-
-static force_inline uint32_t
-lerp8x4(uint32_t src, uint8_t a, uint32_t dst)
-{
-	return (add8x2_8x2(mul8x2_8(src, a),
-			   mul8x2_8(dst, ~a)) |
-		add8x2_8x2(mul8x2_8(src >> G_SHIFT, a),
-			   mul8x2_8(dst >> G_SHIFT, ~a)) << G_SHIFT);
-}
-
 static void
 tor_blt_lerp32(struct sna *sna,
 	       struct sna_composite_spans_op *op,
@@ -4196,7 +4799,6 @@ unbounded_pass:
 static bool
 trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 				 uint32_t color,
-				 PicturePtr src,
 				 PicturePtr dst,
 				 PictFormatPtr maskFormat,
 				 int ntrap, xTrapezoid *traps)
@@ -4234,7 +4836,7 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 		do {
 			/* XXX unwind errors? */
 			if (!trapezoid_span_inplace__x8r8g8b8(op, color,
-							      src, dst, NULL,
+							      dst, NULL,
 							      1, traps++))
 				return false;
 		} while (--ntrap);
@@ -4252,7 +4854,7 @@ trapezoid_span_inplace__x8r8g8b8(CARD8 op,
 	     region.extents.x2, region.extents.y2));
 
 	if (!sna_compute_composite_extents(&region.extents,
-					   src, NULL, dst,
+					   NULL, NULL, dst,
 					   0, 0,
 					   0, 0,
 					   region.extents.x1, region.extents.y1,
@@ -4372,7 +4974,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)
 		return trapezoid_span_inplace__x8r8g8b8(op, color,
-							src, dst, maskFormat,
+							dst, maskFormat,
 							ntrap, traps);
 
 	if (dst->format != PICT_a8) {
@@ -4454,8 +5056,8 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	     region.extents.x2, region.extents.y2));
 
 	if (!sna_compute_composite_extents(&region.extents,
-					   src, NULL, dst,
-					   src_x, src_y,
+					   NULL, NULL, dst,
+					   0, 0,
 					   0, 0,
 					   region.extents.x1, region.extents.y1,
 					   region.extents.x2 - region.extents.x1,
@@ -4687,7 +5289,7 @@ sna_composite_trapezoids(CARD8 op,
 	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv;
-	bool rectilinear, pixel_aligned;
+	bool rectilinear, pixel_aligned, force_fallback;
 	unsigned flags;
 	int n;
 
@@ -4718,12 +5320,14 @@ sna_composite_trapezoids(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(priv) && !picture_is_gpu(src)) {
-		DBG(("%s: fallback -- dst is too small, %dx%d\n",
+	force_fallback = false;
+	if ((too_small(priv) || DAMAGE_IS_ALL(priv->cpu_damage)) &&
+	    !picture_is_gpu(src)) {
+		DBG(("%s: force fallbacks -- dst is too small, %dx%d\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width,
 		     dst->pDrawable->height));
-		goto fallback;
+		force_fallback = true;
 	}
 
 	/* scan through for fast rectangles */
@@ -4769,18 +5373,23 @@ sna_composite_trapezoids(CARD8 op,
 			if (composite_aligned_boxes(sna, op, src, dst,
 						    maskFormat,
 						    xSrc, ySrc,
-						    ntrap, traps))
+						    ntrap, traps,
+						    force_fallback))
 			    return;
 		} else {
 			if (composite_unaligned_boxes(sna, op, src, dst,
 						      maskFormat,
 						      xSrc, ySrc,
-						      ntrap, traps))
+						      ntrap, traps,
+						      force_fallback))
 				return;
 		}
 		flags |= COMPOSITE_SPANS_RECTILINEAR;
 	}
 
+	if (force_fallback)
+		goto fallback;
+
 	if (is_mono(dst, maskFormat) &&
 	    mono_trapezoids_span_converter(op, src, dst,
 					   xSrc, ySrc,
commit ae3c0963790cfb6f984ed4ad3ecbaae492775e1b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 16:25:51 2012 +0100

    sna: Composite glyphs inplace if the CPU is already all-damaged
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 17c42d5..3d24ea4 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1364,7 +1364,8 @@ sna_glyphs(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(priv) && !picture_is_gpu(src)) {
+	if ((too_small(priv) || DAMAGE_IS_ALL(priv->cpu_damage)) &&
+	    !picture_is_gpu(src)) {
 		DBG(("%s: fallback -- too small (%dx%d)\n",
 		     __FUNCTION__, dst->pDrawable->width, dst->pDrawable->height));
 		goto fallback;
commit eaed58b2baf30eaea37be06cfc1d9d81059aba27
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 16 12:55:54 2012 +0100

    sna: Tweak placement of operations
    
    Take in account busyness of the damaged GPU bo for considering placement
    of the subsequent operations. In particular, note that is_cpu is only
    used for when we feel like the following operation would be better on
    the CPU and just want to confirm that doing so will not stall.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2eee01d..ed996df 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1282,17 +1282,17 @@ static inline bool region_inplace(struct sna *sna,
 	if (wedged(sna))
 		return false;
 
-	if (priv->flush) {
-		DBG(("%s: yes, exported via dri, will flush\n", __FUNCTION__));
-		return true;
-	}
-
 	if (priv->cpu_damage &&
 	    region_overlaps_damage(region, priv->cpu_damage)) {
 		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
 		return false;
 	}
 
+	if (priv->flush) {
+		DBG(("%s: yes, exported via dri, will flush\n", __FUNCTION__));
+		return true;
+	}
+
 	if (priv->mapped) {
 		DBG(("%s: yes, already mapped, continuiung\n", __FUNCTION__));
 		return true;
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index b9acea1..15512fd 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -75,7 +75,7 @@ is_gpu(DrawablePtr drawable)
 	if (priv == NULL || priv->clear)
 		return false;
 
-	if (priv->gpu_damage || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
+	if (DAMAGE_IS_ALL(priv->gpu_damage) || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
 		return true;
 
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
@@ -85,10 +85,14 @@ static inline Bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	if (priv == NULL || priv->clear || DAMAGE_IS_ALL(priv->cpu_damage))
+	if (priv == NULL || priv->gpu_bo == NULL || priv->clear || DAMAGE_IS_ALL(priv->cpu_damage))
 		return true;
 
-	if (priv->gpu_damage || (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))
+	assert(!priv->gpu_bo->proxy); /* should be marked as cpu damaged */
+	if (priv->gpu_damage && kgem_bo_is_busy(priv->gpu_bo));
+		return false;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
 		return false;
 
 	return true;
@@ -106,7 +110,7 @@ too_small(struct sna_pixmap *priv)
 {
 	assert(priv);
 
-	if (priv->gpu_damage)
+	if (priv->gpu_bo)
 		return false;
 
 	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
@@ -120,7 +124,7 @@ unattached(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
 
-	if (priv == NULL)
+	if (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))
 		return true;
 
 	return priv->gpu_bo == NULL && priv->cpu_bo == NULL;
commit 8eac098962891a5deb7c53d36c6dec57c7f2b972
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 17 09:51:12 2012 +0100

    sna/gen3: Add another DBG for dropping vbo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index e58cdd6..62b3a8e 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1697,6 +1697,7 @@ static void gen3_vertex_close(struct sna *sna)
 			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
 			sna->render.vertices = kgem_bo_map__gtt(&sna->kgem, bo);
 			if (sna->render.vertices == NULL) {
+				DBG(("%s: discarding non-mappable vertices\n",__FUNCTION__));
 				sna->render.vbo = NULL;
 				sna->render.vertices = sna->render.vertex_data;
 				sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
commit 515c8b19d638d4a811b159ef0dc7cf4059e30217
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:41:14 2012 +0100

    sna: Prefer to operate inplace if already mapped of the GPU is wholly dirty
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b0021f9..2eee01d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1283,16 +1283,26 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 
 	if (priv->flush) {
-		DBG(("%s: exported via dri, will flush\n", __FUNCTION__));
+		DBG(("%s: yes, exported via dri, will flush\n", __FUNCTION__));
 		return true;
 	}
 
 	if (priv->cpu_damage &&
 	    region_overlaps_damage(region, priv->cpu_damage)) {
-		DBG(("%s: uncovered CPU damage pending\n", __FUNCTION__));
+		DBG(("%s: no, uncovered CPU damage pending\n", __FUNCTION__));
 		return false;
 	}
 
+	if (priv->mapped) {
+		DBG(("%s: yes, already mapped, continuiung\n", __FUNCTION__));
+		return true;
+	}
+
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+		DBG(("%s: yes, already wholly damaged on the GPU\n", __FUNCTION__));
+		return true;
+	}
+
 	DBG(("%s: (%dx%d), inplace? %d\n",
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
commit d1713941e9db3e7a6d83466be1b253978fb4bf01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:29:59 2012 +0100

    sna: Tweaks for DBG missing glyphs through fallbacks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ad6f6c5..6379a18 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1476,6 +1476,9 @@ gen4_emit_state(struct sna *sna,
 	gen4_emit_drawing_rectangle(sna, op);
 
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
+		DBG(("%s: flushing dirty (%d, %d)\n", __FUNCTION__,
+		     kgem_bo_is_dirty(op->src.bo),
+		     kgem_bo_is_dirty(op->mask.bo)));
 		OUT_BATCH(MI_FLUSH);
 		kgem_clear_dirty(&sna->kgem);
 		kgem_bo_mark_dirty(op->dst.bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bc85643..b0021f9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1563,6 +1563,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			    region->extents.x2 - region->extents.x1 == 1 &&
 			    region->extents.y2 - region->extents.y1 == 1) {
 				/*  Often associated with synchronisation, KISS */
+				DBG(("%s: single pixel read\n", __FUNCTION__));
 				sna_read_boxes(sna,
 					       priv->gpu_bo, 0, 0,
 					       pixmap, 0, 0,
@@ -1571,8 +1572,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		} else {
 			if (sna_damage_contains_box__no_reduce(priv->cpu_damage,
-							       &region->extents))
+							       &region->extents)) {
+				DBG(("%s: region already in CPU damage\n",
+				     __FUNCTION__));
 				goto done;
+			}
 		}
 
 		if (sna_damage_contains_box(priv->gpu_damage,
@@ -1631,6 +1635,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				BoxPtr box;
 				int n;
 
+				DBG(("%s: region wholly contains damage\n",
+				     __FUNCTION__));
+
 				n = sna_damage_get_boxes(priv->gpu_damage,
 							 &box);
 				if (n) {
@@ -1658,6 +1665,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				int n = REGION_NUM_RECTS(r);
 				Bool ok = FALSE;
 
+				DBG(("%s: region wholly inside damage\n",
+				     __FUNCTION__));
+
 				if (use_cpu_bo_for_write(sna, priv))
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
@@ -1680,6 +1690,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					int n = REGION_NUM_RECTS(&need);
 					Bool ok = FALSE;
 
+					DBG(("%s: region intersects damage\n",
+					     __FUNCTION__));
+
 					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
@@ -1717,6 +1730,16 @@ done:
 		}
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
+#ifdef HAVE_FULL_DEBUG
+		{
+			RegionRec need;
+
+			RegionNull(&need);
+			assert(priv->gpu_damage == NULL ||
+			       !sna_damage_intersect(priv->gpu_damage, r, &need));
+			RegionUninit(&need);
+		}
+#endif
 	}
 
 	if (dx | dy)
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 97fe70e..ab1cd39 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -544,12 +544,14 @@ fallback:
 		flags = MOVE_WRITE | MOVE_INPLACE_HINT;
 	else
 		flags = MOVE_WRITE | MOVE_READ;
+	DBG(("%s: fallback -- move dst to cpu\n", __FUNCTION__));
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region, flags))
 		goto out;
 	if (dst->alphaMap &&
 	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, flags))
 		goto out;
 	if (src->pDrawable) {
+		DBG(("%s: fallback -- move src to cpu\n", __FUNCTION__));
 		if (!sna_drawable_move_to_cpu(src->pDrawable,
 					      MOVE_READ))
 			goto out;
@@ -560,6 +562,7 @@ fallback:
 			goto out;
 	}
 	if (mask && mask->pDrawable) {
+		DBG(("%s: fallback -- move mask to cpu\n", __FUNCTION__));
 		if (!sna_drawable_move_to_cpu(mask->pDrawable,
 					      MOVE_READ))
 			goto out;
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index b06bcda..17c42d5 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -226,20 +226,22 @@ bail:
 }
 
 static void
-glyph_cache_upload(ScreenPtr screen,
-		   struct sna_glyph_cache *cache,
-		   GlyphPtr glyph,
+glyph_cache_upload(struct sna_glyph_cache *cache,
+		   GlyphPtr glyph, PicturePtr glyph_picture,
 		   int16_t x, int16_t y)
 {
 	DBG(("%s: upload glyph %p to cache (%d, %d)x(%d, %d)\n",
-	     __FUNCTION__, glyph, x, y, glyph->info.width, glyph->info.height));
+	     __FUNCTION__,
+	     glyph, x, y,
+	     glyph_picture->pDrawable->width,
+	     glyph_picture->pDrawable->height));
 	sna_composite(PictOpSrc,
-		      GetGlyphPicture(glyph, screen), 0, cache->picture,
+		      glyph_picture, 0, cache->picture,
 		      0, 0,
 		      0, 0,
 		      x, y,
-		      glyph->info.width,
-		      glyph->info.height);
+		      glyph_picture->pDrawable->width,
+		      glyph_picture->pDrawable->height);
 }
 
 static void
@@ -392,7 +394,7 @@ glyph_cache(ScreenPtr screen,
 		pos >>= 2;
 	}
 
-	glyph_cache_upload(screen, cache, glyph,
+	glyph_cache_upload(cache, glyph, glyph_picture,
 			   priv->coordinate.x, priv->coordinate.y);
 
 	return TRUE;
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index dfa0623..3841e52 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -453,9 +453,9 @@ fallback:
 		int width  = box->x2 - box->x1;
 		int pitch = PITCH(width, cpp);
 
-		DBG(("    copy offset %lx [%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
+		DBG(("    copy offset %lx [%08x...%08x...%08x]: (%d, %d) x (%d, %d), src pitch=%d, dst pitch=%d, bpp=%d\n",
 		     (long)((char *)src - (char *)ptr),
-		     *(uint32_t*)src, *(uint32_t*)(src+pitch*height - 4),
+		     *(uint32_t*)src, *(uint32_t*)(src+pitch*height/2 + pitch/2 - 4), *(uint32_t*)(src+pitch*height - 4),
 		     box->x1 + dst_dx,
 		     box->y1 + dst_dy,
 		     width, height,
@@ -558,8 +558,8 @@ static bool upload_inplace(struct kgem *kgem,
 }
 
 bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
-		     struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-		     const void *src, int stride, int16_t src_dx, int16_t src_dy,
+		     struct kgem_bo * const dst_bo, int16_t const dst_dx, int16_t const dst_dy,
+		     const void * const src, int const stride, int16_t const src_dx, int16_t const src_dy,
 		     const BoxRec *box, int nbox)
 {
 	struct kgem *kgem = &sna->kgem;
@@ -570,7 +570,7 @@ bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 	int n, cmd, br13;
 	bool can_blt;
 
-	DBG(("%s x %d\n", __FUNCTION__, nbox));
+	DBG(("%s x %d, src stride=%d,  src dx=(%d, %d)\n", __FUNCTION__, nbox, stride, src_dx, src_dy));
 
 	if (upload_inplace(kgem, dst_bo, box, nbox, dst->drawable.bitsPerPixel)) {
 fallback:
commit 2b23605efba009fb340ec10b37d54caae159b9b1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 15:28:31 2012 +0100

    sna: Don't trim prepare for glyphs_via_mask
    
    If we pass the expected width/height without passing the per-glyph
    offset into the preparation function, we make the erroneous mistake of
    analysing the glyph cache only for the mask extents and so will miss
    glyphs that we need to upload for the operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 63a6287..b06bcda 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -978,13 +978,13 @@ next_image:
 						ok = sna->render.composite(sna, PictOpAdd,
 									   this_atlas, NULL, mask,
 									   0, 0, 0, 0, 0, 0,
-									   width, height,
+									   0, 0,
 									   &tmp);
 					} else {
 						ok = sna->render.composite(sna, PictOpAdd,
 									   sna->render.white_picture, this_atlas, mask,
 									   0, 0, 0, 0, 0, 0,
-									   width, height,
+									   0, 0,
 									   &tmp);
 					}
 					if (!ok) {
commit 9f66b27114fcc457fa5cb2d5889e875384f89e75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 13:47:33 2012 +0100

    sna: Remove mark-as-cpu after gem_pread
    
    The kernel no longer moves the read bo into the CPU domain, so remove
    the last vestiges of that tracking.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 89921d4..84475fe 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4147,8 +4147,6 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 			     bo->base.handle, (char *)bo->mem+offset,
 			     offset, length))
 			return;
-
-		kgem_bo_map__cpu(kgem, &bo->base);
 	}
 	kgem_bo_retire(kgem, &bo->base);
 }
commit bfd88d5429528b0162eafcc9496bfd1c708b60cd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 11:21:29 2012 +0100

    sna: Silence a few compiler warnings
    
    Nothing harmful, just noise.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ab451f9..bc85643 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1917,7 +1917,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
 	if (MIGRATE_ALL || region_subsumes_damage(&r, priv->cpu_damage)) {
 		int n;
 
-		n = sna_damage_get_boxes(priv->cpu_damage, &box);
+		n = sna_damage_get_boxes(priv->cpu_damage, (BoxPtr *)&box);
 		if (n) {
 			Bool ok = FALSE;
 
@@ -8781,7 +8781,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 	      (gc->fillStyle == FillTiled && gc->tileIsPixel)),
 	     gc->fillStyle, gc->tileIsPixel,
 	     gc->alu));
-	DBG(("%s: draw=%d, offset=(%d, %d), size=%dx%d\n",
+	DBG(("%s: draw=%ld, offset=(%d, %d), size=%dx%d\n",
 	     __FUNCTION__, draw->serialNumber,
 	     draw->x, draw->y, draw->width, draw->height));
 
@@ -11875,7 +11875,7 @@ static GCOps sna_gc_ops__tmp = {
 static void
 sna_validate_gc(GCPtr gc, unsigned long changes, DrawablePtr drawable)
 {
-	DBG(("%s changes=%x\n", __FUNCTION__, changes));
+	DBG(("%s changes=%lx\n", __FUNCTION__, changes));
 
 	if (changes & (GCClipMask|GCSubwindowMode) ||
 	    drawable->serialNumber != (gc->serialNumber & DRAWABLE_SERIAL_BITS) ||
@@ -12195,7 +12195,7 @@ static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 	int i;
 
 	DBG(("%s: now=%d, active=%08x, ready=%08x\n",
-	     __FUNCTION__, now, sna->timer_active, sna->timer_ready));
+	     __FUNCTION__, (int)now, sna->timer_active, sna->timer_ready));
 	active = sna->timer_active & ~sna->timer_ready;
 	if (active == 0)
 		return 0;
commit b3659beec8c4126de5da4df5cb2cc077b7af6c0b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 15 11:08:18 2012 +0100

    sna: Check for zero-sized damage regions after trimming Windows to their pixmaps
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 982f562..ab451f9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1736,6 +1736,11 @@ out:
 	return true;
 }
 
+static inline bool box_empty(const BoxRec *box)
+{
+	return box->x2 <= box->x1 || box->y2 <= box->y1;
+}
+
 bool
 sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
 {
@@ -1770,6 +1775,9 @@ sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
 	if (region.extents.y2 > pixmap->drawable.height)
 		region.extents.y2 = pixmap->drawable.height;
 
+	if (box_empty(&region.extents))
+		return true;
+
 	return sna_drawable_move_region_to_cpu(&pixmap->drawable, &region, flags);
 }
 
@@ -2714,11 +2722,6 @@ static inline void box32_add_rect(Box32Rec *box, const xRectangle *r)
 		box->y2 = v;
 }
 
-static inline bool box_empty(const BoxRec *box)
-{
-	return box->x2 <= box->x1 || box->y2 <= box->y1;
-}
-
 static Bool
 sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			 int x, int y, int w, int h, char *bits, int stride)
commit 293a867ea55d3004f5be21b1d0ad765a89c28a5a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 23:02:10 2012 +0100

    sna: Specialise the self-copy blitter to handle vertically overlapping copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index 99bdece..c0922b5 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -217,98 +217,17 @@ memcpy_blt(const void *src, void *dst, int bpp,
 }
 
 void
-memmove_blt(const void *src, void *dst, int bpp,
-	    int32_t src_stride, int32_t dst_stride,
-	    int16_t src_x, int16_t src_y,
-	    int16_t dst_x, int16_t dst_y,
-	    uint16_t width, uint16_t height)
-{
-	const uint8_t *src_bytes;
-	uint8_t *dst_bytes;
-	int byte_width;
-
-	assert(src);
-	assert(dst);
-	assert(width && height);
-	assert(bpp >= 8);
-
-	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
-	     __FUNCTION__, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
-
-	bpp /= 8;
-
-	src_bytes = (const uint8_t *)src + src_stride * src_y + src_x * bpp;
-	dst_bytes = (uint8_t *)dst + dst_stride * dst_y + dst_x * bpp;
-
-	byte_width = width * bpp;
-	if (byte_width == src_stride && byte_width == dst_stride) {
-		byte_width *= height;
-		height = 1;
-	}
-
-	switch (byte_width) {
-	case 1:
-		do {
-			*dst_bytes = *src_bytes;
-			src_bytes += src_stride;
-			dst_bytes += dst_stride;
-		} while (--height);
-		break;
-
-	case 2:
-		do {
-			*(uint16_t *)dst_bytes = *(const uint16_t *)src_bytes;
-			src_bytes += src_stride;
-			dst_bytes += dst_stride;
-		} while (--height);
-		break;
-
-	case 4:
-		do {
-			*(uint32_t *)dst_bytes = *(const uint32_t *)src_bytes;
-			src_bytes += src_stride;
-			dst_bytes += dst_stride;
-		} while (--height);
-		break;
-
-	case 8:
-		do {
-			*(uint64_t *)dst_bytes = *(const uint64_t *)src_bytes;
-			src_bytes += src_stride;
-			dst_bytes += dst_stride;
-		} while (--height);
-		break;
-
-	default:
-		if (src_stride == dst_stride) {
-			if (dst_bytes < src_bytes + byte_width &&
-			    src_bytes < dst_bytes + byte_width) {
-				do {
-					memmove(dst_bytes, src_bytes, byte_width);
-					src_bytes += src_stride;
-					dst_bytes += src_stride;
-				} while (--height);
-			} else {
-				do {
-					memcpy(dst_bytes, src_bytes, byte_width);
-					src_bytes += src_stride;
-					dst_bytes += src_stride;
-				} while (--height);
-			}
-		} else do {
-			memmove(dst_bytes, src_bytes, byte_width);
-			src_bytes += src_stride;
-			dst_bytes += dst_stride;
-		} while (--height);
-		break;
-	}
-}
-
-void
-memmove_blt__box(const void *src, void *dst,
-		 int bpp, int32_t stride,
-		 const BoxRec *box)
+memmove_box(const void *src, void *dst,
+	    int bpp, int32_t stride,
+	    const BoxRec *box,
+	    int dx, int dy)
 {
+	union {
+		uint8_t u8;
+		uint16_t u16;
+		uint32_t u32;
+		uint64_t u64;
+	} tmp;
 	const uint8_t *src_bytes;
 	uint8_t *dst_bytes;
 	int width, height;
@@ -319,8 +238,10 @@ memmove_blt__box(const void *src, void *dst,
 	assert(box->x2 > box->x1);
 	assert(box->y2 > box->y1);
 
-	DBG(("%s: box=(%d, %d), (%d, %d), pitch=%d, bpp=%d\n",
-	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2, stride, bpp));
+	DBG(("%s: box=(%d, %d), (%d, %d), pitch=%d, bpp=%d, dx=%d, dy=%d\n",
+	     __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2,
+	     stride, bpp, dx, dy));
 
 	bpp /= 8;
 	width = box->y1 * stride + box->x1 * bpp;
@@ -334,55 +255,111 @@ memmove_blt__box(const void *src, void *dst,
 		height = 1;
 	}
 
-	switch (width) {
-	case 1:
-		do {
-			*dst_bytes = *src_bytes;
-			src_bytes += stride;
-			dst_bytes += stride;
-		} while (--height);
-		break;
-
-	case 2:
-		do {
-			*(uint16_t *)dst_bytes = *(const uint16_t *)src_bytes;
-			src_bytes += stride;
-			dst_bytes += stride;
-		} while (--height);
-		break;
-
-	case 4:
-		do {
-			*(uint32_t *)dst_bytes = *(const uint32_t *)src_bytes;
-			src_bytes += stride;
-			dst_bytes += stride;
-		} while (--height);
-		break;
+	if (dy >= 0) {
+		switch (width) {
+		case 1:
+			do {
+				*dst_bytes = tmp.u8 = *src_bytes;
+				src_bytes += stride;
+				dst_bytes += stride;
+			} while (--height);
+			break;
 
-	case 8:
-		do {
-			*(uint64_t *)dst_bytes = *(const uint64_t *)src_bytes;
-			src_bytes += stride;
-			dst_bytes += stride;
-		} while (--height);
-		break;
+		case 2:
+			do {
+				*(uint16_t *)dst_bytes = tmp.u16 = *(const uint16_t *)src_bytes;
+				src_bytes += stride;
+				dst_bytes += stride;
+			} while (--height);
+			break;
 
-	default:
-		if (dst_bytes < src_bytes + width &&
-		    src_bytes < dst_bytes + width) {
+		case 4:
 			do {
-				memmove(dst_bytes, src_bytes, width);
+				*(uint32_t *)dst_bytes = tmp.u32 = *(const uint32_t *)src_bytes;
 				src_bytes += stride;
 				dst_bytes += stride;
 			} while (--height);
-		} else {
+			break;
+
+		case 8:
 			do {
-				memcpy(dst_bytes, src_bytes, width);
+				*(uint64_t *)dst_bytes = tmp.u64 = *(const uint64_t *)src_bytes;
 				src_bytes += stride;
 				dst_bytes += stride;
 			} while (--height);
+			break;
+
+		default:
+			if (dst_bytes < src_bytes + width &&
+			    src_bytes < dst_bytes + width) {
+				do {
+					memmove(dst_bytes, src_bytes, width);
+					src_bytes += stride;
+					dst_bytes += stride;
+				} while (--height);
+			} else {
+				do {
+					memcpy(dst_bytes, src_bytes, width);
+					src_bytes += stride;
+					dst_bytes += stride;
+				} while (--height);
+			}
+			break;
+		}
+	} else {
+		src_bytes += (height-1) * stride;
+		dst_bytes += (height-1) * stride;
+
+		switch (width) {
+		case 1:
+			do {
+				*dst_bytes = tmp.u8 = *src_bytes;
+				src_bytes -= stride;
+				dst_bytes -= stride;
+			} while (--height);
+			break;
+
+		case 2:
+			do {
+				*(uint16_t *)dst_bytes = tmp.u16 = *(const uint16_t *)src_bytes;
+				src_bytes -= stride;
+				dst_bytes -= stride;
+			} while (--height);
+			break;
+
+		case 4:
+			do {
+				*(uint32_t *)dst_bytes = tmp.u32 = *(const uint32_t *)src_bytes;
+				src_bytes -= stride;
+				dst_bytes -= stride;
+			} while (--height);
+			break;
+
+		case 8:
+			do {
+				*(uint64_t *)dst_bytes = tmp.u64 = *(const uint64_t *)src_bytes;
+				src_bytes -= stride;
+				dst_bytes -= stride;
+			} while (--height);
+			break;
+
+		default:
+			if (dst_bytes < src_bytes + width &&
+			    src_bytes < dst_bytes + width) {
+				do {
+					memmove(dst_bytes, src_bytes, width);
+					src_bytes -= stride;
+					dst_bytes -= stride;
+				} while (--height);
+			} else {
+				do {
+					memcpy(dst_bytes, src_bytes, width);
+					src_bytes -= stride;
+					dst_bytes -= stride;
+				} while (--height);
+			}
+			break;
 		}
-		break;
 	}
 }
 
diff --git a/src/sna/sna.h b/src/sna/sna.h
index a426e69..ee8273c 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -673,15 +673,10 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	   int16_t dst_x, int16_t dst_y,
 	   uint16_t width, uint16_t height);
 void
-memmove_blt(const void *src, void *dst, int bpp,
-	    int32_t src_stride, int32_t dst_stride,
-	    int16_t src_x, int16_t src_y,
-	    int16_t dst_x, int16_t dst_y,
-	    uint16_t width, uint16_t height);
-void
-memmove_blt__box(const void *src, void *dst,
-		 int bpp, int32_t stride,
-		 const BoxRec *box);
+memmove_box(const void *src, void *dst,
+	    int bpp, int32_t stride,
+	    const BoxRec *box,
+	    int dx, int dy);
 
 void
 memcpy_xor(const void *src, void *dst, int bpp,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a44cdb1..982f562 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3489,7 +3489,7 @@ fallback:
 
 		stride = pixmap->devKind;
 		bpp = pixmap->drawable.bitsPerPixel;
-		if (alu == GXcopy && !reverse && !upsidedown && bpp >= 8) {
+		if (alu == GXcopy && bpp >= 8) {
 			dst_bits = (FbBits *)
 				((char *)pixmap->devPrivate.ptr +
 				 ty * stride + tx * bpp / 8);
@@ -3498,8 +3498,9 @@ fallback:
 				 dy * stride + dx * bpp / 8);
 
 			do {
-				memmove_blt__box(src_bits, dst_bits,
-						 bpp, stride, box);
+				memmove_box(src_bits, dst_bits,
+					    bpp, stride, box,
+					    dx, dy);
 				box++;
 			} while (--n);
 		} else {
commit 3b76ab995bb70da5f76afead7c696cb7fd72b158
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 22:33:11 2012 +0100

    sna: Add a little bit more DBG to migration IO paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3c6044c..dfa0623 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -92,6 +92,12 @@ static void read_boxes_inplace(struct kgem *kgem,
 		return;
 
 	do {
+		DBG(("%s: copying box (%d, %d), (%d, %d)\n",
+		     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
+
+		assert(box->x2 > box->x1);
+		assert(box->y2 > box->y1);
+
 		assert(box->x1 + src_dx >= 0);
 		assert(box->y1 + src_dy >= 0);
 		assert(box->x2 + src_dx <= pixmap->drawable.width);
@@ -502,6 +508,9 @@ static bool write_boxes_inplace(struct kgem *kgem,
 		     box->x2 - box->x1, box->y2 - box->y1,
 		     bpp, stride, bo->pitch));
 
+		assert(box->x2 > box->x1);
+		assert(box->y2 > box->y1);
+
 		assert(box->x1 + dst_dx >= 0);
 		assert((box->x2 + dst_dx)*bpp <= 8*bo->pitch);
 		assert(box->y1 + dst_dy >= 0);
commit 49543522e789a0a8213c164b37a18c0642e60ac4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 20:25:21 2012 +0100

    sna: Fix reversed assertion
    
    Darn the double negative! After adding damage we want to assert that
    the region is not empty, not !not empty.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index bdd6a5e..1957177 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -503,7 +503,7 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 		pixman_region_fini(region);
 		pixman_region_init_rects(region, boxes, nboxes);
 
-		assert(!pixman_region_not_empty(region));
+		assert(pixman_region_not_empty(region));
 		assert(damage->extents.x1 == region->extents.x1 &&
 		       damage->extents.y1 == region->extents.y1 &&
 		       damage->extents.x2 == region->extents.x2 &&
commit 4e50467b4f50b522a07bedae7ff68b9c37362af8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 17:40:09 2012 +0100

    uxa: Force the outputs to off for consistency with xf86DisableUnusedFunctions()
    
    Upon a VT switch, we set the desired modes and turn off the DPMS on any
    unused output. Make this explicit so that we always maintain consistency
    between the kernel and X's list of enabled CRTCs.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50772
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index caf07bb..1555acd 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -362,6 +362,7 @@ enum {
 
 extern Bool intel_mode_pre_init(ScrnInfoPtr pScrn, int fd, int cpp);
 extern void intel_mode_init(struct intel_screen_private *intel);
+extern void intel_mode_disable_unused_functions(ScrnInfoPtr scrn);
 extern void intel_mode_remove_fb(intel_screen_private *intel);
 extern void intel_mode_fini(intel_screen_private *intel);
 
diff --git a/src/intel_display.c b/src/intel_display.c
index 8de6344..949a822 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -332,9 +332,24 @@ mode_to_kmode(ScrnInfoPtr scrn,
 }
 
 static void
-intel_crtc_dpms(xf86CrtcPtr intel_crtc, int mode)
+intel_crtc_dpms(xf86CrtcPtr crtc, int mode)
 {
+}
+
+void
+intel_mode_disable_unused_functions(ScrnInfoPtr scrn)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
+	struct intel_mode *mode = intel_get_screen_private(scrn)->modes;
+	int i;
 
+	/* Force off for consistency between kernel and ddx */
+	for (i = 0; i < xf86_config->num_crtc; i++) {
+		xf86CrtcPtr crtc = xf86_config->crtc[i];
+		if (!crtc->enabled)
+			drmModeSetCrtc(mode->fd, crtc_id(crtc->driver_private),
+				       0, 0, 0, NULL, 0, NULL);
+	}
 }
 
 static Bool
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 8962a11..3a9fe6f 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1068,6 +1068,7 @@ static Bool I830EnterVT(VT_FUNC_ARGS_DECL)
 	if (!xf86SetDesiredModes(scrn))
 		return FALSE;
 
+	intel_mode_disable_unused_functions(scrn);
 	return TRUE;
 }
 
commit 3a56a0b10eb3ab07efd28efdc79d0fd3a6fa20ff
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 19:26:42 2012 +0100

    sna: Make the disable-unused after vt switch distinct from DPMS off
    
    So that we do not lose control over dpms on/off!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index ce06e4d..a426e69 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -304,6 +304,7 @@ Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 void sna_mode_adjust_frame(struct sna *sna, int x, int y);
 extern void sna_mode_remove_fb(struct sna *sna);
 extern void sna_mode_update(struct sna *sna);
+extern void sna_mode_disable_unused(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
 extern int sna_crtc_id(xf86CrtcPtr crtc);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a198b5c..de834ae 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -515,9 +515,20 @@ sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 {
 	DBG(("%s(pipe %d, dpms mode -> %d):= active=%d\n",
 	     __FUNCTION__, to_sna_crtc(crtc)->pipe, mode, mode == DPMSModeOn));
+}
+
+void sna_mode_disable_unused(struct sna *sna)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	int i;
 
-	if (mode == DPMSModeOff)
-		sna_crtc_disable(crtc);
+	DBG(("%s\n", __FUNCTION__));
+
+	/* Force consistency between kernel and ourselves */
+	for (i = 0; i < xf86_config->num_crtc; i++) {
+		if (!xf86_config->crtc[i]->enabled)
+			sna_crtc_disable(xf86_config->crtc[i]);
+	}
 }
 
 static struct kgem_bo *sna_create_bo_for_fbcon(struct sna *sna,
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 2fa6adc..76ae24e 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -988,6 +988,8 @@ static Bool sna_enter_vt(VT_FUNC_ARGS_DECL)
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "failed to restore desired modes on VT switch\n");
 
+	sna_mode_disable_unused(sna);
+
 	return TRUE;
 }
 
commit b415ca05c2e1c4f09f85d8769d39e5369661ed3a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 17:06:19 2012 +0100

    sna: Reset damage extents after reduction goes to zero
    
    If the reduction of the damage clears all of the boxes, we need to reset
    the -infinite extents so that we continue to accumulate further damage.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50744
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 6149be7..bdd6a5e 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -164,6 +164,12 @@ reset_embedded_box(struct sna_damage *damage)
 	list_init(&damage->embedded_box.list);
 }
 
+static void reset_extents(struct sna_damage *damage)
+{
+	damage->extents.x1 = damage->extents.y1 = MAXSHORT;
+	damage->extents.x2 = damage->extents.y2 = MINSHORT;
+}
+
 static struct sna_damage *_sna_damage_create(void)
 {
 	struct sna_damage *damage;
@@ -179,8 +185,7 @@ static struct sna_damage *_sna_damage_create(void)
 	reset_embedded_box(damage);
 	damage->mode = DAMAGE_ADD;
 	pixman_region_init(&damage->region);
-	damage->extents.x1 = damage->extents.y1 = MAXSHORT;
-	damage->extents.x2 = damage->extents.y2 = MINSHORT;
+	reset_extents(damage);
 
 	return damage;
 }
@@ -498,6 +503,7 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 		pixman_region_fini(region);
 		pixman_region_init_rects(region, boxes, nboxes);
 
+		assert(!pixman_region_not_empty(region));
 		assert(damage->extents.x1 == region->extents.x1 &&
 		       damage->extents.y1 == region->extents.y1 &&
 		       damage->extents.x2 == region->extents.x2 &&
@@ -513,7 +519,10 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 		       damage->extents.y1 <= region->extents.y1 &&
 		       damage->extents.x2 >= region->extents.x2 &&
 		       damage->extents.y2 >= region->extents.y2);
-		damage->extents = region->extents;
+		if (pixman_region_not_empty(region))
+			damage->extents = region->extents;
+		else
+			reset_extents(damage);
 	}
 
 	free(free_boxes);
commit 0df7c488640d3590d2a88dc353b72167b6644eaa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 16:09:38 2012 +0100

    sna: Supporting overlapping copies for fallback blits
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50393
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index 494b413..99bdece 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -217,6 +217,176 @@ memcpy_blt(const void *src, void *dst, int bpp,
 }
 
 void
+memmove_blt(const void *src, void *dst, int bpp,
+	    int32_t src_stride, int32_t dst_stride,
+	    int16_t src_x, int16_t src_y,
+	    int16_t dst_x, int16_t dst_y,
+	    uint16_t width, uint16_t height)
+{
+	const uint8_t *src_bytes;
+	uint8_t *dst_bytes;
+	int byte_width;
+
+	assert(src);
+	assert(dst);
+	assert(width && height);
+	assert(bpp >= 8);
+
+	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d, pitch=%d/%d\n",
+	     __FUNCTION__, src_x, src_y, dst_x, dst_y, width, height, src_stride, dst_stride));
+
+	bpp /= 8;
+
+	src_bytes = (const uint8_t *)src + src_stride * src_y + src_x * bpp;
+	dst_bytes = (uint8_t *)dst + dst_stride * dst_y + dst_x * bpp;
+
+	byte_width = width * bpp;
+	if (byte_width == src_stride && byte_width == dst_stride) {
+		byte_width *= height;
+		height = 1;
+	}
+
+	switch (byte_width) {
+	case 1:
+		do {
+			*dst_bytes = *src_bytes;
+			src_bytes += src_stride;
+			dst_bytes += dst_stride;
+		} while (--height);
+		break;
+
+	case 2:
+		do {
+			*(uint16_t *)dst_bytes = *(const uint16_t *)src_bytes;
+			src_bytes += src_stride;
+			dst_bytes += dst_stride;
+		} while (--height);
+		break;
+
+	case 4:
+		do {
+			*(uint32_t *)dst_bytes = *(const uint32_t *)src_bytes;
+			src_bytes += src_stride;
+			dst_bytes += dst_stride;
+		} while (--height);
+		break;
+
+	case 8:
+		do {
+			*(uint64_t *)dst_bytes = *(const uint64_t *)src_bytes;
+			src_bytes += src_stride;
+			dst_bytes += dst_stride;
+		} while (--height);
+		break;
+
+	default:
+		if (src_stride == dst_stride) {
+			if (dst_bytes < src_bytes + byte_width &&
+			    src_bytes < dst_bytes + byte_width) {
+				do {
+					memmove(dst_bytes, src_bytes, byte_width);
+					src_bytes += src_stride;
+					dst_bytes += src_stride;
+				} while (--height);
+			} else {
+				do {
+					memcpy(dst_bytes, src_bytes, byte_width);
+					src_bytes += src_stride;
+					dst_bytes += src_stride;
+				} while (--height);
+			}
+		} else do {
+			memmove(dst_bytes, src_bytes, byte_width);
+			src_bytes += src_stride;
+			dst_bytes += dst_stride;
+		} while (--height);
+		break;
+	}
+}
+
+void
+memmove_blt__box(const void *src, void *dst,
+		 int bpp, int32_t stride,
+		 const BoxRec *box)
+{
+	const uint8_t *src_bytes;
+	uint8_t *dst_bytes;
+	int width, height;
+
+	assert(src);
+	assert(dst);
+	assert(bpp >= 8);
+	assert(box->x2 > box->x1);
+	assert(box->y2 > box->y1);
+
+	DBG(("%s: box=(%d, %d), (%d, %d), pitch=%d, bpp=%d\n",
+	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2, stride, bpp));
+
+	bpp /= 8;
+	width = box->y1 * stride + box->x1 * bpp;
+	src_bytes = (const uint8_t *)src + width;
+	dst_bytes = (uint8_t *)dst + width;
+
+	width = (box->x2 - box->x1) * bpp;
+	height = (box->y2 - box->y1);
+	if (width == stride) {
+		width *= height;
+		height = 1;
+	}
+
+	switch (width) {
+	case 1:
+		do {
+			*dst_bytes = *src_bytes;
+			src_bytes += stride;
+			dst_bytes += stride;
+		} while (--height);
+		break;
+
+	case 2:
+		do {
+			*(uint16_t *)dst_bytes = *(const uint16_t *)src_bytes;
+			src_bytes += stride;
+			dst_bytes += stride;
+		} while (--height);
+		break;
+
+	case 4:
+		do {
+			*(uint32_t *)dst_bytes = *(const uint32_t *)src_bytes;
+			src_bytes += stride;
+			dst_bytes += stride;
+		} while (--height);
+		break;
+
+	case 8:
+		do {
+			*(uint64_t *)dst_bytes = *(const uint64_t *)src_bytes;
+			src_bytes += stride;
+			dst_bytes += stride;
+		} while (--height);
+		break;
+
+	default:
+		if (dst_bytes < src_bytes + width &&
+		    src_bytes < dst_bytes + width) {
+			do {
+				memmove(dst_bytes, src_bytes, width);
+				src_bytes += stride;
+				dst_bytes += stride;
+			} while (--height);
+		} else {
+			do {
+				memcpy(dst_bytes, src_bytes, width);
+				src_bytes += stride;
+				dst_bytes += stride;
+			} while (--height);
+		}
+		break;
+	}
+}
+
+void
 memcpy_xor(const void *src, void *dst, int bpp,
 	   int32_t src_stride, int32_t dst_stride,
 	   int16_t src_x, int16_t src_y,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 634692c..ce06e4d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -671,6 +671,16 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	   int16_t src_x, int16_t src_y,
 	   int16_t dst_x, int16_t dst_y,
 	   uint16_t width, uint16_t height);
+void
+memmove_blt(const void *src, void *dst, int bpp,
+	    int32_t src_stride, int32_t dst_stride,
+	    int16_t src_x, int16_t src_y,
+	    int16_t dst_x, int16_t dst_y,
+	    uint16_t width, uint16_t height);
+void
+memmove_blt__box(const void *src, void *dst,
+		 int bpp, int32_t stride,
+		 const BoxRec *box);
 
 void
 memcpy_xor(const void *src, void *dst, int bpp,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6c48db8..a44cdb1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3498,12 +3498,8 @@ fallback:
 				 dy * stride + dx * bpp / 8);
 
 			do {
-				memcpy_blt(src_bits, dst_bits, bpp,
-					   stride, stride,
-					   box->x1, box->y1,
-					   box->x1, box->y1,
-					   box->x2 - box->x1,
-					   box->y2 - box->y1);
+				memmove_blt__box(src_bits, dst_bits,
+						 bpp, stride, box);
 				box++;
 			} while (--n);
 		} else {
commit d5b273b20b7d917d08af120815c28b1618d3342c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 14 15:35:24 2012 +0100

    sna: Explicitly disable the CRTC so that xserver/kernel agree after VT switch
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50772
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 9453c20..634692c 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -316,8 +316,6 @@ extern int sna_page_flip(struct sna *sna,
 			 int ref_crtc_hw_id,
 			 uint32_t *old_fb);
 
-extern PixmapPtr sna_set_screen_pixmap(struct sna *sna, PixmapPtr pixmap);
-
 constant static inline struct sna *
 to_sna(ScrnInfoPtr scrn)
 {
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a51abb5..a198b5c 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -95,8 +95,10 @@ struct sna_output {
 	struct list link;
 };
 
-static void
-sna_output_dpms(xf86OutputPtr output, int mode);
+static inline struct sna_crtc *to_sna_crtc(xf86CrtcPtr crtc)
+{
+	return crtc->driver_private;
+}
 
 #define BACKLIGHT_CLASS "/sys/class/backlight"
 
@@ -132,24 +134,23 @@ crtc_id(struct sna_crtc *crtc)
 
 int sna_crtc_id(xf86CrtcPtr crtc)
 {
-	return crtc_id(crtc->driver_private);
+	return to_sna_crtc(crtc)->id;
 }
 
 int sna_crtc_on(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = crtc->driver_private;
-	return sna_crtc->active;
+	return to_sna_crtc(crtc)->active;
 }
 
 int sna_crtc_to_pipe(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	return sna_crtc->pipe;
 }
 
 int sna_crtc_to_plane(xf86CrtcPtr crtc)
 {
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	return sna_crtc->plane;
 }
 
@@ -399,7 +400,7 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 	struct drm_mode_crtc mode;
 
 	VG_CLEAR(mode);
-	mode.crtc_id = crtc_id(crtc->driver_private);
+	mode.crtc_id = to_sna_crtc(crtc)->id;
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
 		return false;
 
@@ -412,7 +413,7 @@ static Bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
 	struct sna_mode *mode = &sna->mode;
 	struct drm_mode_crtc arg;
@@ -459,10 +460,10 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
 		   sna_crtc->kmode.hdisplay,
 		   sna_crtc->kmode.vdisplay,
-		   crtc_id(sna_crtc), sna_crtc->pipe);
+		   sna_crtc->id, sna_crtc->pipe);
 
 	DBG(("%s: applying crtc [%d] mode=%dx%d@%d, fb=%d%s update to %d outputs\n",
-	     __FUNCTION__, crtc_id(sna_crtc),
+	     __FUNCTION__, sna_crtc->id,
 	     sna_crtc->kmode.hdisplay,
 	     sna_crtc->kmode.vdisplay,
 	     sna_crtc->kmode.clock,
@@ -493,65 +494,30 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 }
 
 static void
-sna_crtc_restore(ScrnInfoPtr scrn)
+sna_crtc_disable(xf86CrtcPtr crtc)
 {
-	struct sna *sna = to_sna(scrn);
-	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
-	struct kgem_bo *bo;
-	int i;
-
-	DBG(("%s (fb_pixmap=%ld, front=%ld)\n", __FUNCTION__,
-	     (long)sna->mode.fb_pixmap,
-	     (long)sna->front->drawable.serialNumber));
-
-	if (sna->mode.fb_pixmap == sna->front->drawable.serialNumber)
-		return;
-
-	bo = sna_pixmap_pin(sna->front);
-	if (!bo)
-		return;
-
-	DBG(("%s: create fb %dx%d@%d/%d\n",
-	     __FUNCTION__,
-	     sna->front->drawable.width,
-	     sna->front->drawable.height,
-	     sna->front->drawable.depth,
-	     sna->front->drawable.bitsPerPixel));
-
-	sna_mode_remove_fb(sna);
-	sna->mode.fb_id = get_fb(sna, bo,
-				 sna->front->drawable.width,
-				 sna->front->drawable.height);
-	if (sna->mode.fb_id == 0)
-		return;
+	struct sna *sna = to_sna(crtc->scrn);
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
+	struct drm_mode_crtc arg;
 
-	DBG(("%s: handle %d attached to fb %d\n",
-	     __FUNCTION__, bo->handle, sna->mode.fb_id));
+	DBG(("%s: disabling crtc [%d]\n", __FUNCTION__, sna_crtc->id));
 
-	for (i = 0; i < xf86_config->num_crtc; i++) {
-		xf86CrtcPtr crtc = xf86_config->crtc[i];
-		if (crtc->enabled)
-			sna_crtc_apply(crtc);
-	}
-	sna_mode_update(sna);
-
-	kgem_bo_retire(&sna->kgem, bo);
-	scrn->displayWidth = bo->pitch / sna->mode.cpp;
-	sna->mode.fb_pixmap = sna->front->drawable.serialNumber;
+	VG_CLEAR(arg);
+	arg.crtc_id = sna_crtc->id;
+	arg.fb_id = 0;
+	arg.mode_valid = 0;
+	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
+	sna_crtc->active = false;
 }
 
 static void
 sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 {
-	struct sna_crtc *sna_crtc = crtc->driver_private;
-
 	DBG(("%s(pipe %d, dpms mode -> %d):= active=%d\n",
-	     __FUNCTION__, sna_crtc->pipe, mode, mode == DPMSModeOn));
+	     __FUNCTION__, to_sna_crtc(crtc)->pipe, mode, mode == DPMSModeOn));
 
-	if (mode != DPMSModeOff)
-		sna_crtc_restore(crtc->scrn);
-	else
-		sna_crtc->active = false;
+	if (mode == DPMSModeOff)
+		sna_crtc_disable(crtc);
 }
 
 static struct kgem_bo *sna_create_bo_for_fbcon(struct sna *sna,
@@ -604,7 +570,7 @@ void sna_copy_fbcon(struct sna *sna)
 	VG_CLEAR(fbcon);
 	fbcon.fb_id = 0;
 	for (i = 0; i < xf86_config->num_crtc; i++) {
-		struct sna_crtc *crtc = xf86_config->crtc[i]->driver_private;
+		struct sna_crtc *crtc = to_sna_crtc(xf86_config->crtc[i]);
 		struct drm_mode_crtc mode;
 
 		VG_CLEAR(mode);
@@ -704,7 +670,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 {
 	ScrnInfoPtr scrn = crtc->scrn;
 	struct sna *sna = to_sna(scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct sna_mode *sna_mode = &sna->mode;
 	int saved_x, saved_y;
 	Rotation saved_rotation;
@@ -779,10 +745,10 @@ static void
 sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_cursor arg;
 
-	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, sna_crtc->id));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_BO;
@@ -790,17 +756,17 @@ sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 	arg.width = arg.height = 64;
 	arg.handle = 0;
 
-	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
+	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
 sna_crtc_show_cursor(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_cursor arg;
 
-	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, sna_crtc->id));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_BO;
@@ -808,24 +774,24 @@ sna_crtc_show_cursor(xf86CrtcPtr crtc)
 	arg.width = arg.height = 64;
 	arg.handle = sna_crtc->cursor;
 
-	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
+	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
 sna_crtc_set_cursor_colors(xf86CrtcPtr crtc, int bg, int fg)
 {
 	__DBG(("%s: CRTC:%d (bg=%x, fg=%x)\n", __FUNCTION__,
-	       crtc_id(crtc->driver_private), bg, fg));
+	       to_sna_crtc(crtc)->id, bg, fg));
 }
 
 static void
 sna_crtc_set_cursor_position(xf86CrtcPtr crtc, int x, int y)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_mode_cursor arg;
 
-	__DBG(("%s: CRTC:%d (%d, %d)\n", __FUNCTION__, crtc_id(sna_crtc), x, y));
+	__DBG(("%s: CRTC:%d (%d, %d)\n", __FUNCTION__, sna_crtc->id, x, y));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_MOVE;
@@ -834,17 +800,17 @@ sna_crtc_set_cursor_position(xf86CrtcPtr crtc, int x, int y)
 	arg.y = y;
 	arg.handle = sna_crtc->cursor;
 
-	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
+	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
 sna_crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 *image)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	struct drm_i915_gem_pwrite pwrite;
 
-	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, sna_crtc->id));
 
 	VG_CLEAR(pwrite);
 	pwrite.handle = sna_crtc->cursor;
@@ -859,7 +825,7 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 {
 	ScrnInfoPtr scrn = crtc->scrn;
 	struct sna *sna = to_sna(scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 	PixmapPtr shadow;
 	struct kgem_bo *bo;
 
@@ -897,7 +863,7 @@ sna_crtc_shadow_create(xf86CrtcPtr crtc, void *data, int width, int height)
 static void
 sna_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr pixmap, void *data)
 {
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 
 	/* We may have not called shadow_create() on the data yet and
 	 * be cleaning up a NULL shadow_pixmap.
@@ -918,9 +884,9 @@ sna_crtc_gamma_set(xf86CrtcPtr crtc,
 		       CARD16 *red, CARD16 *green, CARD16 *blue, int size)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 
-	drmModeCrtcSetGamma(sna->kgem.fd, crtc_id(sna_crtc),
+	drmModeCrtcSetGamma(sna->kgem.fd, sna_crtc->id,
 			    size, red, green, blue);
 }
 
@@ -928,7 +894,7 @@ static void
 sna_crtc_destroy(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 
 	sna_crtc_hide_cursor(crtc);
 	gem_close(sna->kgem.fd, sna_crtc->cursor);
@@ -1014,9 +980,12 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	VG_CLEAR(get_pipe);
 	get_pipe.pipe = 0;
 	get_pipe.crtc_id = sna_crtc->id;
-	drmIoctl(sna->kgem.fd,
-		 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
-		 &get_pipe);
+	if (drmIoctl(sna->kgem.fd,
+		     DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
+		     &get_pipe)) {
+		free(sna_crtc);
+		return;
+	}
 	sna_crtc->pipe = get_pipe.pipe;
 	sna_crtc->plane = sna_crtc_find_plane(sna, sna_crtc->pipe);
 
@@ -1904,18 +1873,18 @@ static int do_page_flip(struct sna *sna, void *data, int ref_crtc_hw_id)
 		evdata |= crtc->pipe == ref_crtc_hw_id;
 
 		DBG(("%s: crtc %d [ref? %d] --> fb %d\n",
-		     __FUNCTION__, crtc_id(crtc),
+		     __FUNCTION__, crtc->id,
 		     crtc->pipe == ref_crtc_hw_id,
 		     sna->mode.fb_id));
 		if (drmModePageFlip(sna->kgem.fd,
-				    crtc_id(crtc),
+				    crtc->id,
 				    sna->mode.fb_id,
 				    DRM_MODE_PAGE_FLIP_EVENT,
 				    (void*)evdata)) {
 			int err = errno;
 			DBG(("%s: flip [fb=%d] on crtc %d [%d] failed - %d\n",
 			     __FUNCTION__, sna->mode.fb_id,
-			     i, crtc_id(crtc), err));
+			     i, crtc->id, err));
 			xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 				   "flip queue failed: %s\n", strerror(err));
 			continue;
@@ -1927,22 +1896,6 @@ static int do_page_flip(struct sna *sna, void *data, int ref_crtc_hw_id)
 	return count;
 }
 
-PixmapPtr sna_set_screen_pixmap(struct sna *sna, PixmapPtr pixmap)
-{
-	PixmapPtr old = sna->front;
-	ScrnInfoPtr scrn = sna->scrn;
-
-	assert(sna->front != pixmap);
-
-	sna->front = pixmap;
-	pixmap->refcnt++;
-
-	sna_redirect_screen_pixmap(scrn, old, pixmap);
-	scrn->displayWidth = sna_pixmap_get_bo(pixmap)->pitch / sna->mode.cpp;
-
-	return old;
-}
-
 int
 sna_page_flip(struct sna *sna,
 	      struct kgem_bo *bo,
@@ -2334,7 +2287,7 @@ void sna_mode_update(struct sna *sna)
 	/* Validate CRTC attachments */
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
-		struct sna_crtc *sna_crtc = crtc->driver_private;
+		struct sna_crtc *sna_crtc = to_sna_crtc(crtc);
 		if (crtc->enabled)
 			sna_crtc->active = sna_crtc_is_bound(sna, crtc);
 		else
commit 221534abe2dc04fae8b8fc332104bca275d4863b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 22:48:43 2012 +0100

    sna: Only reduce damage addition to a region operation if clean
    
    Some paths bypass operating upon the region as they do not have an
    YX-banded set of boxes and so prefer to defer the costly construction of
    the region till later. As a result, we have to be careful not to
    overwrite any existing information if we do operate on the region after
    setting the dirty boxes.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50744
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 1ea9e3c..6149be7 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -574,14 +574,9 @@ static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 		break;
 	}
 
-	switch (REGION_NUM_RECTS(&damage->region)) {
-	case 0:
-		pixman_region_init_rects(&damage->region, box, 1);
-		damage->extents = *box;
-		return damage;
-	case 1:
+	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		_pixman_region_union_box(&damage->region, box);
-		damage->extents = damage->region.extents;
+		damage_union(damage, box);
 		return damage;
 	}
 
@@ -616,7 +611,7 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		pixman_region_union(&damage->region, &damage->region, region);
-		damage->extents = damage->region.extents;
+		damage_union(damage, &region->extents);
 		return damage;
 	}
 
commit 0db789e180b6b01fb8aff547879387058f52a0b9
Author: Zdenek Kabelac <zkabelac at redhat.com>
Date:   Wed Jun 13 14:26:37 2012 +0200

    sna: Constification
    
    Adding preserving const modifiers to decrease amount of const warnings
    
    Signed-off-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index e9d06eb..494b413 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -164,7 +164,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 
 	bpp /= 8;
 
-	src_bytes = (uint8_t *)src + src_stride * src_y + src_x * bpp;
+	src_bytes = (const uint8_t *)src + src_stride * src_y + src_x * bpp;
 	dst_bytes = (uint8_t *)dst + dst_stride * dst_y + dst_x * bpp;
 
 	byte_width = width * bpp;
@@ -184,7 +184,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 
 	case 2:
 		do {
-			*(uint16_t *)dst_bytes = *(uint16_t *)src_bytes;
+			*(uint16_t *)dst_bytes = *(const uint16_t *)src_bytes;
 			src_bytes += src_stride;
 			dst_bytes += dst_stride;
 		} while (--height);
@@ -192,7 +192,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 
 	case 4:
 		do {
-			*(uint32_t *)dst_bytes = *(uint32_t *)src_bytes;
+			*(uint32_t *)dst_bytes = *(const uint32_t *)src_bytes;
 			src_bytes += src_stride;
 			dst_bytes += dst_stride;
 		} while (--height);
@@ -200,7 +200,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 
 	case 8:
 		do {
-			*(uint64_t *)dst_bytes = *(uint64_t *)src_bytes;
+			*(uint64_t *)dst_bytes = *(const uint64_t *)src_bytes;
 			src_bytes += src_stride;
 			dst_bytes += dst_stride;
 		} while (--height);
@@ -224,7 +224,7 @@ memcpy_xor(const void *src, void *dst, int bpp,
 	   uint16_t width, uint16_t height,
 	   uint32_t and, uint32_t or)
 {
-	uint8_t *src_bytes;
+	const uint8_t *src_bytes;
 	uint8_t *dst_bytes;
 	int i;
 
@@ -239,7 +239,7 @@ memcpy_xor(const void *src, void *dst, int bpp,
 	     bpp, and, or));
 
 	bpp /= 8;
-	src_bytes = (uint8_t *)src + src_stride * src_y + src_x * bpp;
+	src_bytes = (const uint8_t *)src + src_stride * src_y + src_x * bpp;
 	dst_bytes = (uint8_t *)dst + dst_stride * dst_y + dst_x * bpp;
 
 	if (and == 0xffffffff) {
@@ -262,7 +262,7 @@ memcpy_xor(const void *src, void *dst, int bpp,
 			if (width & 1) {
 				do {
 					uint16_t *d = (uint16_t *)dst_bytes;
-					uint16_t *s = (uint16_t *)src_bytes;
+					const uint16_t *s = (const uint16_t *)src_bytes;
 
 					for (i = 0; i < width; i++)
 						d[i] = s[i] | or;
@@ -285,7 +285,7 @@ memcpy_xor(const void *src, void *dst, int bpp,
 			if (have_sse2()) {
 				do {
 					uint32_t *d = (uint32_t *)dst_bytes;
-					uint32_t *s = (uint32_t *)src_bytes;
+					const uint32_t *s = (const uint32_t *)src_bytes;
 					__m128i mask = xmm_create_mask_32(or);
 
 					i = width;
diff --git a/src/sna/kgem_debug.c b/src/sna/kgem_debug.c
index e46ffca..2dc1b45 100644
--- a/src/sna/kgem_debug.c
+++ b/src/sna/kgem_debug.c
@@ -225,7 +225,7 @@ decode_2d(struct kgem *kgem, uint32_t offset)
 	};
 
 	unsigned int op, len;
-	char *format = NULL;
+	const char *format = NULL;
 	uint32_t *data = kgem->batch + offset;
 	struct drm_i915_gem_relocation_entry *reloc;
 
diff --git a/src/sna/kgem_debug_gen2.c b/src/sna/kgem_debug_gen2.c
index e00cd81..09f3873 100644
--- a/src/sna/kgem_debug_gen2.c
+++ b/src/sna/kgem_debug_gen2.c
@@ -245,14 +245,14 @@ decode_3d_1d(struct kgem *kgem, uint32_t offset)
 {
     uint32_t *data = kgem->batch + offset;
     unsigned int len, i, idx, word, map;
-    char *format, *zformat, *type;
+    const char *format, *zformat, *type;
     uint32_t opcode;
 
     static const struct {
 	uint32_t opcode;
 	int min_len;
 	int max_len;
-	char *name;
+	const char *name;
     } opcodes_3d_1d[] = {
 	{ 0x86, 4, 4, "3DSTATE_CHROMA_KEY" },
 	{ 0x88, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index de9d217..5d6d175 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -344,7 +344,7 @@ gen3_get_instruction_dst(uint32_t *data, int i, char *dstname, int do_mask)
     uint32_t a0 = data[i];
     int dst_nr = (a0 >> 14) & 0xf;
     char dstmask[8];
-    char *sat;
+    const char *sat;
 
     if (do_mask) {
 	if (((a0 >> 10) & 0xf) == 0xf) {
@@ -396,7 +396,7 @@ gen3_get_instruction_dst(uint32_t *data, int i, char *dstname, int do_mask)
     }
 }
 
-static char *
+static const char *
 gen3_get_channel_swizzle(uint32_t select)
 {
     switch (select & 0x7) {
@@ -468,10 +468,10 @@ gen3_get_instruction_src0(uint32_t *data, int i, char *srcname)
     uint32_t a0 = data[i];
     uint32_t a1 = data[i + 1];
     int src_nr = (a0 >> 2) & 0x1f;
-    char *swizzle_x = gen3_get_channel_swizzle((a1 >> 28) & 0xf);
-    char *swizzle_y = gen3_get_channel_swizzle((a1 >> 24) & 0xf);
-    char *swizzle_z = gen3_get_channel_swizzle((a1 >> 20) & 0xf);
-    char *swizzle_w = gen3_get_channel_swizzle((a1 >> 16) & 0xf);
+    const char *swizzle_x = gen3_get_channel_swizzle((a1 >> 28) & 0xf);
+    const char *swizzle_y = gen3_get_channel_swizzle((a1 >> 24) & 0xf);
+    const char *swizzle_z = gen3_get_channel_swizzle((a1 >> 20) & 0xf);
+    const char *swizzle_w = gen3_get_channel_swizzle((a1 >> 16) & 0xf);
     char swizzle[100];
 
     gen3_get_instruction_src_name((a0 >> 7) & 0x7, src_nr, srcname);
@@ -486,10 +486,10 @@ gen3_get_instruction_src1(uint32_t *data, int i, char *srcname)
     uint32_t a1 = data[i + 1];
     uint32_t a2 = data[i + 2];
     int src_nr = (a1 >> 8) & 0x1f;
-    char *swizzle_x = gen3_get_channel_swizzle((a1 >> 4) & 0xf);
-    char *swizzle_y = gen3_get_channel_swizzle((a1 >> 0) & 0xf);
-    char *swizzle_z = gen3_get_channel_swizzle((a2 >> 28) & 0xf);
-    char *swizzle_w = gen3_get_channel_swizzle((a2 >> 24) & 0xf);
+    const char *swizzle_x = gen3_get_channel_swizzle((a1 >> 4) & 0xf);
+    const char *swizzle_y = gen3_get_channel_swizzle((a1 >> 0) & 0xf);
+    const char *swizzle_z = gen3_get_channel_swizzle((a2 >> 28) & 0xf);
+    const char *swizzle_w = gen3_get_channel_swizzle((a2 >> 24) & 0xf);
     char swizzle[100];
 
     gen3_get_instruction_src_name((a1 >> 13) & 0x7, src_nr, srcname);
@@ -503,10 +503,10 @@ gen3_get_instruction_src2(uint32_t *data, int i, char *srcname)
 {
     uint32_t a2 = data[i + 2];
     int src_nr = (a2 >> 16) & 0x1f;
-    char *swizzle_x = gen3_get_channel_swizzle((a2 >> 12) & 0xf);
-    char *swizzle_y = gen3_get_channel_swizzle((a2 >> 8) & 0xf);
-    char *swizzle_z = gen3_get_channel_swizzle((a2 >> 4) & 0xf);
-    char *swizzle_w = gen3_get_channel_swizzle((a2 >> 0) & 0xf);
+    const char *swizzle_x = gen3_get_channel_swizzle((a2 >> 12) & 0xf);
+    const char *swizzle_y = gen3_get_channel_swizzle((a2 >> 8) & 0xf);
+    const char *swizzle_z = gen3_get_channel_swizzle((a2 >> 4) & 0xf);
+    const char *swizzle_w = gen3_get_channel_swizzle((a2 >> 0) & 0xf);
     char swizzle[100];
 
     gen3_get_instruction_src_name((a2 >> 21) & 0x7, src_nr, srcname);
@@ -554,7 +554,7 @@ gen3_get_instruction_addr(uint32_t src_type, uint32_t src_nr, char *name)
 
 static void
 gen3_decode_alu1(uint32_t *data, uint32_t offset,
-		 int i, char *instr_prefix, char *op_name)
+		 int i, char *instr_prefix, const char *op_name)
 {
     char dst[100], src0[100];
 
@@ -569,7 +569,7 @@ gen3_decode_alu1(uint32_t *data, uint32_t offset,
 
 static void
 gen3_decode_alu2(uint32_t *data, uint32_t offset,
-		 int i, char *instr_prefix, char *op_name)
+		 int i, char *instr_prefix, const char *op_name)
 {
     char dst[100], src0[100], src1[100];
 
@@ -585,7 +585,7 @@ gen3_decode_alu2(uint32_t *data, uint32_t offset,
 
 static void
 gen3_decode_alu3(uint32_t *data, uint32_t offset,
-		 int i, char *instr_prefix, char *op_name)
+		 int i, char *instr_prefix, const char *op_name)
 {
     char dst[100], src0[100], src1[100], src2[100];
 
@@ -602,7 +602,7 @@ gen3_decode_alu3(uint32_t *data, uint32_t offset,
 
 static void
 gen3_decode_tex(uint32_t *data, uint32_t offset, int i, char *instr_prefix,
-		char *tex_name)
+		const char *tex_name)
 {
     uint32_t t0 = data[i];
     uint32_t t1 = data[i + 1];
@@ -626,12 +626,12 @@ static void
 gen3_decode_dcl(uint32_t *data, uint32_t offset, int i, char *instr_prefix)
 {
 	uint32_t d0 = data[i];
-	char *sampletype;
+	const char *sampletype;
 	int dcl_nr = (d0 >> 14) & 0xf;
-	char *dcl_x = d0 & (1 << 10) ? "x" : "";
-	char *dcl_y = d0 & (1 << 11) ? "y" : "";
-	char *dcl_z = d0 & (1 << 12) ? "z" : "";
-	char *dcl_w = d0 & (1 << 13) ? "w" : "";
+	const char *dcl_x = d0 & (1 << 10) ? "x" : "";
+	const char *dcl_y = d0 & (1 << 11) ? "y" : "";
+	const char *dcl_z = d0 & (1 << 12) ? "z" : "";
+	const char *dcl_w = d0 & (1 << 13) ? "w" : "";
 	char dcl_mask[10];
 
 	switch ((d0 >> 19) & 0x3) {
@@ -790,7 +790,7 @@ gen3_decode_instruction(uint32_t *data, uint32_t offset,
     }
 }
 
-static char *
+static const char *
 gen3_decode_compare_func(uint32_t op)
 {
 	switch (op&0x7) {
@@ -806,7 +806,7 @@ gen3_decode_compare_func(uint32_t op)
 	return "";
 }
 
-static char *
+static const char *
 gen3_decode_stencil_op(uint32_t op)
 {
 	switch (op&0x7) {
@@ -824,7 +824,7 @@ gen3_decode_stencil_op(uint32_t op)
 
 #if 0
 /* part of MODES_4 */
-static char *
+static const char *
 gen3_decode_logic_op(uint32_t op)
 {
 	switch (op&0xf) {
@@ -849,7 +849,7 @@ gen3_decode_logic_op(uint32_t op)
 }
 #endif
 
-static char *
+static const char *
 gen3_decode_blend_fact(uint32_t op)
 {
 	switch (op&0xf) {
@@ -872,7 +872,7 @@ gen3_decode_blend_fact(uint32_t op)
 	return "";
 }
 
-static char *
+static const char *
 decode_tex_coord_mode(uint32_t mode)
 {
     switch (mode&0x7) {
@@ -886,7 +886,7 @@ decode_tex_coord_mode(uint32_t mode)
     return "";
 }
 
-static char *
+static const char *
 gen3_decode_sample_filter(uint32_t mode)
 {
 	switch (mode&0x7) {
@@ -949,8 +949,8 @@ gen3_decode_load_state_immediate_1(struct kgem *kgem, uint32_t offset)
 				break;
 			case 4:
 				{
-					char *cullmode = "";
-					char *vfmt_xyzw = "";
+					const char *cullmode = "";
+					const char *vfmt_xyzw = "";
 					switch((data[i]>>13)&0x3) {
 					case 0: cullmode = "both"; break;
 					case 1: cullmode = "none"; break;
@@ -1050,13 +1050,13 @@ gen3_decode_3d_1d(struct kgem *kgem, uint32_t offset)
 {
 	uint32_t *data = kgem->batch + offset;
 	unsigned int len, i, c, idx, word, map, sampler, instr;
-	char *format, *zformat, *type;
+	const char *format, *zformat, *type;
 	uint32_t opcode;
-	const struct {
+	static const struct {
 		uint32_t opcode;
 		int min_len;
 		int max_len;
-		char *name;
+		const char *name;
 	} opcodes_3d_1d[] = {
 		{ 0x86, 4, 4, "3DSTATE_CHROMA_KEY" },
 		{ 0x88, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
@@ -1310,7 +1310,7 @@ gen3_decode_3d_1d(struct kgem *kgem, uint32_t offset)
 		for (sampler = 0; sampler <= 15; sampler++) {
 			if (data[1] & (1 << sampler)) {
 				uint32_t dword;
-				char *mip_filter = "";
+				const char *mip_filter = "";
 				dword = data[i];
 				switch ((dword>>20)&0x3) {
 				case 0: mip_filter = "none"; break;
@@ -1483,7 +1483,7 @@ gen3_decode_3d_primitive(struct kgem *kgem, uint32_t offset)
 	uint32_t *data = kgem->batch + offset;
 	char immediate = (data[0] & (1 << 23)) == 0;
 	unsigned int len, i, ret;
-	char *primtype;
+	const char *primtype;
 	unsigned int vertex = 0;
 
 	switch ((data[0] >> 18) & 0xf) {
@@ -1553,11 +1553,11 @@ out:
 
 int kgem_gen3_decode_3d(struct kgem *kgem, uint32_t offset)
 {
-    struct {
+    static const struct {
 	uint32_t opcode;
 	int min_len;
 	int max_len;
-	char *name;
+	const char *name;
     } opcodes[] = {
 	{ 0x06, 1, 1, "3DSTATE_ANTI_ALIASING" },
 	{ 0x08, 1, 1, "3DSTATE_BACKFACE_STENCIL_OPS" },
diff --git a/src/sna/kgem_debug_gen4.c b/src/sna/kgem_debug_gen4.c
index 53c350b..9b80dc8 100644
--- a/src/sna/kgem_debug_gen4.c
+++ b/src/sna/kgem_debug_gen4.c
@@ -256,7 +256,7 @@ static void primitive_out(struct kgem *kgem, uint32_t *data)
 
 static void
 state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
-	       char *name)
+	       const char *name)
 {
     if (data[index] & 1)
 	kgem_debug_print(data, offset, index,
@@ -270,7 +270,7 @@ state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
 
 static void
 state_max_out(uint32_t *data, uint32_t offset, unsigned int index,
-	      char *name)
+	      const char *name)
 {
 	if (data[index] == 1)
 		kgem_debug_print(data, offset, index,
@@ -460,7 +460,7 @@ int kgem_gen4_decode_3d(struct kgem *kgem, uint32_t offset)
 	uint32_t op;
 	unsigned int len;
 	int i;
-	char *desc1 = NULL;
+	const char *desc1 = NULL;
 
 	len = (data[0] & 0xff) + 2;
 	op = (data[0] & 0xffff0000) >> 16;
diff --git a/src/sna/kgem_debug_gen5.c b/src/sna/kgem_debug_gen5.c
index 9e7360a..e23ceb1 100644
--- a/src/sna/kgem_debug_gen5.c
+++ b/src/sna/kgem_debug_gen5.c
@@ -230,7 +230,7 @@ static void primitive_out(struct kgem *kgem, uint32_t *data)
 
 static void
 state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
-	       char *name)
+	       const char *name)
 {
     if (data[index] & 1)
 	kgem_debug_print(data, offset, index,
@@ -244,7 +244,7 @@ state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
 
 static void
 state_max_out(uint32_t *data, uint32_t offset, unsigned int index,
-	      char *name)
+	      const char *name)
 {
 	if (data[index] == 1)
 		kgem_debug_print(data, offset, index,
@@ -434,7 +434,7 @@ int kgem_gen5_decode_3d(struct kgem *kgem, uint32_t offset)
 	uint32_t op;
 	unsigned int len;
 	int i;
-	char *desc1 = NULL;
+	const char *desc1 = NULL;
 
 	len = (data[0] & 0xff) + 2;
 	op = (data[0] & 0xffff0000) >> 16;
diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c
index 961aa00..e0b09d5 100644
--- a/src/sna/kgem_debug_gen6.c
+++ b/src/sna/kgem_debug_gen6.c
@@ -298,7 +298,7 @@ static void finish_state(struct kgem *kgem)
 
 static void
 state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
-	       char *name)
+	       const char *name)
 {
     if (data[index] & 1)
 	kgem_debug_print(data, offset, index,
@@ -312,7 +312,7 @@ state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
 
 static void
 state_max_out(uint32_t *data, uint32_t offset, unsigned int index,
-	      char *name)
+	      const char *name)
 {
 	if (data[index] == 1)
 		kgem_debug_print(data, offset, index,
@@ -635,7 +635,7 @@ int kgem_gen6_decode_3d(struct kgem *kgem, uint32_t offset)
 	uint32_t op;
 	unsigned int len;
 	int i, j;
-	char *desc1 = NULL;
+	const char *desc1 = NULL;
 
 	len = (data[0] & 0xff) + 2;
 	op = (data[0] & 0xffff0000) >> 16;
diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c
index 78eae01..1bc014b 100644
--- a/src/sna/kgem_debug_gen7.c
+++ b/src/sna/kgem_debug_gen7.c
@@ -302,7 +302,7 @@ static void finish_state(struct kgem *kgem)
 
 static void
 state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
-	       char *name)
+	       const char *name)
 {
     if (data[index] & 1)
 	kgem_debug_print(data, offset, index,
@@ -316,7 +316,7 @@ state_base_out(uint32_t *data, uint32_t offset, unsigned int index,
 
 static void
 state_max_out(uint32_t *data, uint32_t offset, unsigned int index,
-	      char *name)
+	      const char *name)
 {
 	if (data[index] == 1)
 		kgem_debug_print(data, offset, index,
@@ -595,8 +595,7 @@ int kgem_gen7_decode_3d(struct kgem *kgem, uint32_t offset)
 	uint32_t *data = kgem->batch + offset;
 	uint32_t op;
 	unsigned int len;
-	int i, j;
-	char *desc1 = NULL;
+	int i;
 	const char *name;
 
 	len = (data[0] & 0xff) + 2;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 07806b2..6c48db8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4221,7 +4221,7 @@ sna_fill_spans__cpu(DrawablePtr drawable,
 		    GCPtr gc, int n,
 		    DDXPointPtr pt, int *width, int sorted)
 {
-	const RegionRec *clip = sna_gc(gc)->priv;
+	RegionRec *clip = sna_gc(gc)->priv;
 
 	DBG(("%s x %d\n", __FUNCTION__, n));
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 0cfa8b1..83bcd69 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -790,7 +790,7 @@ inline static void _sna_blt_fill_box(struct sna *sna,
 	kgem->nbatch += 3;
 
 	b[0] = blt->cmd;
-	*(uint64_t *)(b+1) = *(uint64_t *)box;
+	*(uint64_t *)(b+1) = *(const uint64_t *)box;
 }
 
 inline static void _sna_blt_fill_boxes(struct sna *sna,
@@ -818,31 +818,31 @@ inline static void _sna_blt_fill_boxes(struct sna *sna,
 
 		kgem->nbatch += 3 * nbox_this_time;
 		while (nbox_this_time >= 8) {
-			b[0] = cmd; *(uint64_t *)(b+1) = *(uint64_t *)box++;
-			b[3] = cmd; *(uint64_t *)(b+4) = *(uint64_t *)box++;
-			b[6] = cmd; *(uint64_t *)(b+7) = *(uint64_t *)box++;
-			b[9] = cmd; *(uint64_t *)(b+10) = *(uint64_t *)box++;
-			b[12] = cmd; *(uint64_t *)(b+13) = *(uint64_t *)box++;
-			b[15] = cmd; *(uint64_t *)(b+16) = *(uint64_t *)box++;
-			b[18] = cmd; *(uint64_t *)(b+19) = *(uint64_t *)box++;
-			b[21] = cmd; *(uint64_t *)(b+22) = *(uint64_t *)box++;
+			b[0] = cmd; *(uint64_t *)(b+1) = *(const uint64_t *)box++;
+			b[3] = cmd; *(uint64_t *)(b+4) = *(const uint64_t *)box++;
+			b[6] = cmd; *(uint64_t *)(b+7) = *(const uint64_t *)box++;
+			b[9] = cmd; *(uint64_t *)(b+10) = *(const uint64_t *)box++;
+			b[12] = cmd; *(uint64_t *)(b+13) = *(const uint64_t *)box++;
+			b[15] = cmd; *(uint64_t *)(b+16) = *(const uint64_t *)box++;
+			b[18] = cmd; *(uint64_t *)(b+19) = *(const uint64_t *)box++;
+			b[21] = cmd; *(uint64_t *)(b+22) = *(const uint64_t *)box++;
 			b += 24;
 			nbox_this_time -= 8;
 		}
 		if (nbox_this_time & 4) {
-			b[0] = cmd; *(uint64_t *)(b+1) = *(uint64_t *)box++;
-			b[3] = cmd; *(uint64_t *)(b+4) = *(uint64_t *)box++;
-			b[6] = cmd; *(uint64_t *)(b+7) = *(uint64_t *)box++;
-			b[9] = cmd; *(uint64_t *)(b+10) = *(uint64_t *)box++;
+			b[0] = cmd; *(uint64_t *)(b+1) = *(const uint64_t *)box++;
+			b[3] = cmd; *(uint64_t *)(b+4) = *(const uint64_t *)box++;
+			b[6] = cmd; *(uint64_t *)(b+7) = *(const uint64_t *)box++;
+			b[9] = cmd; *(uint64_t *)(b+10) = *(const uint64_t *)box++;
 			b += 12;
 		}
 		if (nbox_this_time & 2) {
-			b[0] = cmd; *(uint64_t *)(b+1) = *(uint64_t *)box++;
-			b[3] = cmd; *(uint64_t *)(b+4) = *(uint64_t *)box++;
+			b[0] = cmd; *(uint64_t *)(b+1) = *(const uint64_t *)box++;
+			b[3] = cmd; *(uint64_t *)(b+4) = *(const uint64_t *)box++;
 			b += 6;
 		}
 		if (nbox_this_time & 1) {
-			b[0] = cmd; *(uint64_t *)(b+1) = *(uint64_t *)box++;
+			b[0] = cmd; *(uint64_t *)(b+1) = *(const uint64_t *)box++;
 		}
 
 		if (!nbox)
@@ -1848,7 +1848,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	overwrites = alu == GXcopy || alu == GXclear || alu == GXset;
 	if (overwrites && kgem->nbatch >= 6 &&
 	    kgem->batch[kgem->nbatch-6] == cmd &&
-	    *(uint64_t *)&kgem->batch[kgem->nbatch-4] == *(uint64_t *)box &&
+	    *(uint64_t *)&kgem->batch[kgem->nbatch-4] == *(const uint64_t *)box &&
 	    kgem->reloc[kgem->nreloc-1].target_handle == bo->handle) {
 		DBG(("%s: replacing last fill\n", __FUNCTION__));
 		kgem->batch[kgem->nbatch-5] = br13;
@@ -1857,7 +1857,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	}
 	if (overwrites && kgem->nbatch >= 8 &&
 	    (kgem->batch[kgem->nbatch-8] & 0xffc0000f) == XY_SRC_COPY_BLT_CMD &&
-	    *(uint64_t *)&kgem->batch[kgem->nbatch-6] == *(uint64_t *)box &&
+	    *(uint64_t *)&kgem->batch[kgem->nbatch-6] == *(const uint64_t *)box &&
 	    kgem->reloc[kgem->nreloc-2].target_handle == bo->handle) {
 		DBG(("%s: replacing last copy\n", __FUNCTION__));
 		kgem->batch[kgem->nbatch-8] = cmd;
@@ -1893,7 +1893,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	b = kgem->batch + kgem->nbatch;
 	b[0] = cmd;
 	b[1] = br13;
-	*(uint64_t *)(b+2) = *(uint64_t *)box;
+	*(uint64_t *)(b+2) = *(const uint64_t *)box;
 	b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, bo,
 			      I915_GEM_DOMAIN_RENDER << 16 |
 			      I915_GEM_DOMAIN_RENDER |
commit 33998a7080aa7f50ba922c764c6a93fe951c5b64
Author: Zdenek Kabelac <zkabelac at redhat.com>
Date:   Wed Jun 13 14:26:36 2012 +0200

    sna: Fix typo and compare y1 to y2
    
    It seems 'y1' was meant to be compared here
    
    Signed-off-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9da0198..07806b2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7630,7 +7630,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 			if (seg[i].y1 < seg[i].y2) {
 				rect[i].y = seg[i].y1;
 				rect[i].height = seg[i].y2 - seg[i].y1 + 1;
-			} else if (seg[i].x1 > seg[i].y2) {
+			} else if (seg[i].y1 > seg[i].y2) {
 				rect[i].y = seg[i].y2;
 				rect[i].height = seg[i].y1 - seg[i].y2 + 1;
 			} else {
commit 0ade32fbad4014efcc8aa8ed4cd0f8a39f9ab107
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 14:28:43 2012 +0100

    sna: Fix operator preference around a | b & c | d
    
    Tell the compiler what we really mean is a | (b & (c | d))
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 9ac6720..0cfa8b1 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -431,7 +431,7 @@ static void sna_blt_copy_one(struct sna *sna,
 	/* Compare against a previous fill */
 	if (kgem->nbatch >= 6 &&
 	    blt->overwrites &&
-	    kgem->batch[kgem->nbatch-6] == (XY_COLOR_BLT | (blt->cmd & BLT_WRITE_ALPHA | BLT_WRITE_RGB)) &&
+	    kgem->batch[kgem->nbatch-6] == (XY_COLOR_BLT | (blt->cmd & (BLT_WRITE_ALPHA | BLT_WRITE_RGB))) &&
 	    kgem->batch[kgem->nbatch-4] == ((uint32_t)dst_y << 16 | (uint16_t)dst_x) &&
 	    kgem->batch[kgem->nbatch-3] == ((uint32_t)(dst_y+height) << 16 | (uint16_t)(dst_x+width)) &&
 	    kgem->reloc[kgem->nreloc-1].target_handle == blt->bo[1]->handle) {
commit 20f3114ccd63052b4fd06ef1d87efaaabbbac7ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 14:27:10 2012 +0100

    sna: Use long for simplicity when printing size_t values for debugging
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 153f261..9da0198 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12372,14 +12372,14 @@ static bool sna_accel_do_debug_memory(struct sna *sna)
 
 static void sna_accel_debug_memory(struct sna *sna)
 {
-	ErrorF("Allocated shadow pixels: %d, %d bytes, as CPU bo: %d, %d bytes\n",
+	ErrorF("Allocated shadow pixels: %d, %ld bytes, as CPU bo: %d, %ld bytes\n",
 	       sna->debug_memory.shadow_pixels_allocs,
-	       sna->debug_memory.shadow_pixels_bytes,
+	       (long)sna->debug_memory.shadow_pixels_bytes,
 	       sna->debug_memory.cpu_bo_allocs,
-	       sna->debug_memory.cpu_bo_bytes);
-	ErrorF("Allocated bo: %d, %d bytes\n",
+	       (long)sna->debug_memory.cpu_bo_bytes);
+	ErrorF("Allocated bo: %d, %ld bytes\n",
 	       sna->kgem.debug_memory.bo_allocs,
-	       sna->kgem.debug_memory.bo_bytes);
+	       (long)sna->kgem.debug_memory.bo_bytes);
 }
 
 #else
commit 6f75b8da6a468188dd0c00015395424598b3f502
Author: Zdenek Kabelac <zkabelac at redhat.com>
Date:   Wed Jun 13 14:26:33 2012 +0200

    sna: Valgrind updates
    
    Add some mising initialization for unknown ioctl
    
    Signed-off-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 398ffbb..153f261 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10804,6 +10804,7 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
 	if (out->bits == NULL)
 		return false;
 
+	VG(memset(out->bits, 0, (w*h + 7) & ~7));
 	src = (uint8_t *)in->bits;
 	dst = (uint8_t *)out->bits;
 	stride -= w;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 05d97b6..a51abb5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1144,6 +1144,7 @@ sna_output_attach_edid(xf86OutputPtr output)
 		if (tmp == NULL)
 			continue;
 
+		VG(memset(tmp, 0, blob.length));
 		blob.data = (uintptr_t)tmp;
 		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) {
 			free(tmp);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index e585205..2fa6adc 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -335,6 +335,8 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	}
 
 	val = FALSE;
+
+	VG_CLEAR(gp);
 	gp.param = I915_PARAM_HAS_BLT;
 	gp.value = &val;
 	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) {
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 61b02b2..612711f 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -111,6 +111,7 @@ static Bool sna_has_overlay(struct sna *sna)
 	int has_overlay = 0;
 	int ret;
 
+	VG_CLEAR(gp);
 	gp.param = I915_PARAM_HAS_OVERLAY;
 	gp.value = &has_overlay;
 	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp);
commit 70cf637ca47c9e67758e09c7e1d10eef51db1f98
Author: Zdenek Kabelac <zkabelac at redhat.com>
Date:   Wed Jun 13 14:26:32 2012 +0200

    legacy/i810: Fix declaration to match definition
    
    Signed-off-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 519a4f0..141c19c 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -78,7 +78,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "../legacy.h"
 
 static Bool I810PreInit(ScrnInfoPtr pScrn, int flags);
-static Bool I810ScreenInit(SCREEN_INIT_ARGS);
+static Bool I810ScreenInit(SCREEN_INIT_ARGS_DECL);
 static Bool I810EnterVT(VT_FUNC_ARGS_DECL);
 static void I810LeaveVT(VT_FUNC_ARGS_DECL);
 static Bool I810CloseScreen(CLOSE_SCREEN_ARGS_DECL);
commit 5fea2478cd5ab4156c182210d28c5e27e5f67403
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 14:10:01 2012 +0100

    Lots of trivial semantic fixes
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index 0ad991a..acbd82c 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -344,7 +344,6 @@ static Bool i915_texture_setup(PicturePtr picture, PixmapPtr pixmap, int unit)
 		    (FILTER_LINEAR << SS2_MIN_FILTER_SHIFT);
 		break;
 	default:
-		filter = 0;
 		intel_debug_fallback(scrn, "Bad filter 0x%x\n",
 				     picture->filter);
 		return FALSE;
@@ -486,13 +485,10 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 			      int w, int h)
 {
 	Bool is_affine_src = TRUE, is_affine_mask = TRUE;
-	int per_vertex;
 	int tex_unit = 0;
 	int src_unit = -1, mask_unit = -1;
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
 
-	per_vertex = 2;		/* dest x/y */
-
 	src_unit = tex_unit++;
 
 	is_affine_src = intel_transform_is_affine(intel->transform[src_unit]);
@@ -517,8 +513,6 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 						      &src_x[2],
 						      &src_y[2]))
 			return;
-
-		per_vertex += 2;	/* src x/y */
 	} else {
 		if (!intel_get_transformed_coordinates_3d(srcX, srcY,
 							 intel->
@@ -543,8 +537,6 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 							 &src_y[2],
 							 &src_w[2]))
 			return;
-
-		per_vertex += 4;	/* src x/y/z/w */
 	}
 
 	if (intel->render_mask) {
@@ -572,8 +564,6 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 							      &mask_x[2],
 							      &mask_y[2]))
 				return;
-
-			per_vertex += 2;	/* mask x/y */
 		} else {
 			if (!intel_get_transformed_coordinates_3d(maskX, maskY,
 								 intel->
@@ -598,8 +588,6 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 								 &mask_y[2],
 								 &mask_w[2]))
 				return;
-
-			per_vertex += 4;	/* mask x/y/z/w */
 		}
 	}
 
@@ -723,7 +711,8 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 	else
 		floats_per_vertex += 4;	/* src x/y/z/w */
 
-	if (mask != NULL) {
+	if (mask_picture != NULL) {
+		assert(mask != NULL);
 		if (!i915_texture_setup(mask_picture, mask, tex_unit++)) {
 			intel_debug_fallback(scrn,
 					     "fail to setup mask texture\n");
diff --git a/src/i965_render.c b/src/i965_render.c
index 75c99e2..9d45944 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2087,11 +2087,12 @@ i965_prepare_composite(int op, PicturePtr source_picture,
 	intel->transform[0] = source_picture->transform;
 	composite_op->is_affine = intel_transform_is_affine(intel->transform[0]);
 
-	if (!mask) {
+	if (mask_picture == NULL) {
 		intel->transform[1] = NULL;
 		intel->scale_units[1][0] = -1;
 		intel->scale_units[1][1] = -1;
 	} else {
+		assert(mask != NULL);
 		intel->transform[1] = mask_picture->transform;
 		intel->scale_units[1][0] = 1. / mask->drawable.width;
 		intel->scale_units[1][1] = 1. / mask->drawable.height;
diff --git a/src/intel_video.c b/src/intel_video.c
index 09674e5..235845f 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -451,7 +451,6 @@ static XF86VideoAdaptorPtr I830SetupImageVideoOverlay(ScreenPtr screen)
 	if (INTEL_INFO(intel)->gen >= 30) {
 		memcpy((char *)att, (char *)GammaAttributes,
 		       sizeof(XF86AttributeRec) * GAMMA_ATTRIBUTES);
-		att += GAMMA_ATTRIBUTES;
 	}
 	adapt->nImages = NUM_IMAGES - XVMC_IMAGE;
 
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8d7b1d6..89921d4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1914,6 +1914,7 @@ void _kgem_submit(struct kgem *kgem)
 				if (i != -1) {
 					ret = write(i, kgem->batch, batch_end*sizeof(uint32_t));
 					close(i);
+					(void)ret;
 				}
 
 				for (i = 0; i < kgem->nexec; i++) {
diff --git a/src/sna/kgem_debug_gen3.c b/src/sna/kgem_debug_gen3.c
index 969b1d1..de9d217 100644
--- a/src/sna/kgem_debug_gen3.c
+++ b/src/sna/kgem_debug_gen3.c
@@ -945,7 +945,7 @@ gen3_decode_load_state_immediate_1(struct kgem *kgem, uint32_t offset)
 
 				break;
 			case 3:
-				kgem_debug_print(data, offset, i, "S3: not documented\n", word);
+				kgem_debug_print(data, offset, i, "S3: not documented\n");
 				break;
 			case 4:
 				{
@@ -968,7 +968,7 @@ gen3_decode_load_state_immediate_1(struct kgem *kgem, uint32_t offset)
 					case 4<<6 | 1<<2: vfmt_xyzw = "XYWF,"; break;
 					}
 					kgem_debug_print(data, offset, i, "S4: point_width=%i, line_width=%.1f,"
-						  "%s%s%s%s%s cullmode=%s, vfmt=%s%s%s%s%s%s "
+						  "%s%s%s%s%s cullmode=%s, vfmt=%s%s%s%s%s%s%s%s "
 						  "%s%s\n",
 						  (data[i]>>23)&0x1ff,
 						  ((data[i]>>19)&0xf) / 2.0,
@@ -1344,8 +1344,7 @@ gen3_decode_3d_1d(struct kgem *kgem, uint32_t offset)
 					  dword&(1<<0)?" deinterlacer,":"");
 				dword = data[i];
 				kgem_debug_print(data, offset, i++, "sampler %d SS4: border color\n",
-					  sampler, ((dword>>24)&0xff)/(0x10*1.0),
-					  dword);
+					  sampler);
 			}
 		}
 		assert(len == i);
diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c
index 72ed299..961aa00 100644
--- a/src/sna/kgem_debug_gen6.c
+++ b/src/sna/kgem_debug_gen6.c
@@ -555,6 +555,16 @@ gen6_blend_function_to_string(uint32_t v)
 	}
 }
 
+static float unpack_float(uint32_t dw)
+{
+	union {
+		float f;
+		uint32_t dw;
+	} u;
+	u.dw = dw;
+	return u.f;
+}
+
 static void
 gen6_decode_blend(struct kgem *kgem, const uint32_t *reloc)
 {
@@ -883,9 +893,9 @@ int kgem_gen6_decode_3d(struct kgem *kgem, uint32_t offset)
 			  (data[4] & (1 << 31)) != 0 ? "en" : "dis",
 			  (data[4] & (1 << 12)) != 0 ? 4 : 8,
 			  (data[4] & (1 << 11)) != 0);
-		kgem_debug_print(data, offset, 5, "Global Depth Offset Constant %f\n", data[5]);
-		kgem_debug_print(data, offset, 6, "Global Depth Offset Scale %f\n", data[6]);
-		kgem_debug_print(data, offset, 7, "Global Depth Offset Clamp %f\n", data[7]);
+		kgem_debug_print(data, offset, 5, "Global Depth Offset Constant %f\n", unpack_float(data[5]));
+		kgem_debug_print(data, offset, 6, "Global Depth Offset Scale %f\n", unpack_float(data[6]));
+		kgem_debug_print(data, offset, 7, "Global Depth Offset Clamp %f\n", unpack_float(data[7]));
 		for (i = 0, j = 0; i < 8; i++, j+=2)
 			kgem_debug_print(data, offset, i+8, "Attrib %d (Override %s%s%s%s, Const Source %d, Swizzle Select %d, "
 				  "Source %d); Attrib %d (Override %s%s%s%s, Const Source %d, Swizzle Select %d, Source %d)\n",
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c26dc45..398ffbb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -214,7 +214,7 @@ typedef struct box32 {
 	(((_pm) & FbFullMask((_draw)->depth)) == FbFullMask((_draw)->depth))
 
 #if DEBUG_ACCEL
-static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char *function)
+static void _assert_pixmap_contains_box(PixmapPtr pixmap, const BoxRec *box, const char *function)
 {
 	if (box->x1 < 0 || box->y1 < 0 ||
 	    box->x2 > pixmap->drawable.width ||
@@ -229,7 +229,7 @@ static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char
 	}
 }
 
-static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, BoxPtr box, int n, int dx, int dy, const char *function)
+static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, const BoxRec *box, int n, int dx, int dy, const char *function)
 {
 	BoxRec extents;
 
@@ -255,7 +255,7 @@ static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, BoxPtr box, int n, i
 }
 
 
-static void _assert_pixmap_contains_points(PixmapPtr pixmap, DDXPointRec *pt, int n, int dx, int dy, const char *function)
+static void _assert_pixmap_contains_points(PixmapPtr pixmap, const DDXPointRec *pt, int n, int dx, int dy, const char *function)
 {
 	BoxRec extents;
 
@@ -416,16 +416,16 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
+#ifdef DEBUG_MEMORY
+		sna->debug_memory.cpu_bo_allocs--;
+		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
+#endif
 		if (priv->cpu_bo->sync) {
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 			sna_accel_watch_flush(sna, -1);
 		}
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
-#ifdef DEBUG_MEMORY
-		sna->debug_memory.cpu_bo_allocs--;
-		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
-#endif
 	} else
 		free(priv->ptr);
 
@@ -928,7 +928,7 @@ static inline bool pixmap_inplace(struct sna *sna,
 }
 
 static bool
-sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned flags);
+sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned flags);
 
 static bool
 sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
@@ -1834,7 +1834,7 @@ inline static unsigned drawable_gc_flags(DrawablePtr draw,
 }
 
 static bool
-sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
+sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int flags)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
@@ -2151,6 +2151,7 @@ move_to_gpu:
 	}
 
 done:
+	assert(priv->gpu_bo != NULL);
 	if (sna_damage_is_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height))
@@ -2164,6 +2165,7 @@ done:
 	return priv->gpu_bo;
 
 use_gpu_bo:
+	assert(priv->gpu_bo != NULL);
 	priv->clear = false;
 	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive,
@@ -3916,6 +3918,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		goto out;
 	} else if (dst_priv->cpu_bo &&
 		   src_priv && DAMAGE_IS_ALL(src_priv->gpu_damage) && !src_priv->clear) {
+		assert(src_priv->gpu_bo != NULL); /* guaranteed by gpu_damage */
 		if (!sna->render.copy_boxes(sna, alu,
 					    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
 					    dst_pixmap, dst_priv->cpu_bo, dst_dx, dst_dy,
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index e84c87f..9ac6720 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -2008,7 +2008,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 		nbox -= nbox_this_time;
 
 		do {
-			uint32_t *b = kgem->batch + kgem->nbatch;
+			uint32_t *b;
 
 			DBG(("%s: (%d, %d), (%d, %d): %08x\n",
 			     __FUNCTION__,
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 8528217..97fe70e 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -330,6 +330,7 @@ sna_compute_composite_extents(BoxPtr extents,
 	v = dst_y + height;
 	if (v > dst->pDrawable->height)
 		v = dst->pDrawable->height;
+	extents->y2 = v;
 
 	DBG(("%s: initial clip against dst->pDrawable: (%d, %d), (%d, %d)\n",
 	     __FUNCTION__,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 8d61f40..4dc7062 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -387,9 +387,13 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		return NULL;
 	}
 
-	if (DBG_FORCE_UPLOAD < 0)
-		return sna_pixmap_force_to_gpu(pixmap,
-					       MOVE_SOURCE_HINT | MOVE_READ);
+	if (DBG_FORCE_UPLOAD < 0) {
+		if (!sna_pixmap_force_to_gpu(pixmap,
+					       MOVE_SOURCE_HINT | MOVE_READ))
+			return NULL;
+
+		return priv->gpu_bo;
+	}
 
 	w = box->x2 - box->x1;
 	h = box->y2 - box->y1;
@@ -584,9 +588,9 @@ sna_render_pixmap_bo(struct sna *sna,
 static int sna_render_picture_downsample(struct sna *sna,
 					 PicturePtr picture,
 					 struct sna_composite_channel *channel,
-					 int16_t x, int16_t y,
-					 int16_t w, int16_t h,
-					 int16_t dst_x, int16_t dst_y)
+					 const int16_t x, const int16_t y,
+					 const int16_t w, const int16_t h,
+					 const int16_t dst_x, const int16_t dst_y)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
 	ScreenPtr screen = pixmap->drawable.pScreen;
@@ -596,15 +600,12 @@ static int sna_render_picture_downsample(struct sna *sna,
 	pixman_transform_t t;
 	PixmapPtr tmp;
 	int width, height, size;
-	int sx, sy, ox, oy, ow, oh;
+	int sx, sy, sw, sh;
 	int error, ret = 0;
 	BoxRec box, b;
 
-	ow = w;
-	oh = h;
-
-	ox = box.x1 = x;
-	oy = box.y1 = y;
+	box.x1 = x;
+	box.y1 = y;
 	box.x2 = x + w;
 	box.y2 = y + h;
 	if (channel->transform) {
@@ -612,12 +613,10 @@ static int sna_render_picture_downsample(struct sna *sna,
 
 		pixman_transform_bounds(channel->transform, &box);
 
-		v.vector[0] = ox << 16;
-		v.vector[1] = oy << 16;
-		v.vector[2] =  1 << 16;
+		v.vector[0] = x << 16;
+		v.vector[1] = y << 16;
+		v.vector[2] = 1 << 16;
 		pixman_transform_point(channel->transform, &v);
-		ox = v.vector[0] / v.vector[2];
-		oy = v.vector[1] / v.vector[2];
 	}
 
 	if (channel->repeat == RepeatNone || channel->repeat == RepeatPad) {
@@ -645,33 +644,33 @@ static int sna_render_picture_downsample(struct sna *sna,
 				return sna_render_picture_fixup(sna,
 								picture,
 								channel,
-								x, y, ow, oh,
+								x, y, w, h,
 								dst_x, dst_y);
 			}
 		}
 	}
 
-	w = box.x2 - box.x1;
-	h = box.y2 - box.y1;
+	sw = box.x2 - box.x1;
+	sh = box.y2 - box.y1;
 
 	DBG(("%s: sample (%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
-	sx = (w + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
-	sy = (h + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
+	sx = (sw + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
+	sy = (sh + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
 
 	DBG(("%s: scaling (%d, %d) down by %dx%d\n",
-	     __FUNCTION__, w, h, sx, sy));
+	     __FUNCTION__, sw, sh, sx, sy));
 
-	width  = w / sx;
-	height = h / sy;
+	width  = sw / sx;
+	height = sh / sy;
 
 	DBG(("%s: creating temporary GPU bo %dx%d\n",
 	     __FUNCTION__, width, height));
 
 	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_SOURCE_HINT | MOVE_READ))
 		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, ow, oh,
+						x, y, w, h,
 						dst_x, dst_y);
 
 	tmp = screen->CreatePixmap(screen,
@@ -706,9 +705,9 @@ static int sna_render_picture_downsample(struct sna *sna,
 	 */
 	tmp_src->filter = PictFilterNearest;
 	memset(&t, 0, sizeof(t));
-	t.matrix[0][0] = (w << 16) / width;
+	t.matrix[0][0] = (sw << 16) / width;
 	t.matrix[0][2] = box.x1 << 16;
-	t.matrix[1][1] = (h << 16) / height;
+	t.matrix[1][1] = (sh << 16) / height;
 	t.matrix[1][2] = box.y1 << 16;
 	t.matrix[2][2] = 1 << 16;
 	tmp_src->transform = &t;
@@ -721,20 +720,20 @@ static int sna_render_picture_downsample(struct sna *sna,
 	while (size * size * 4 > sna->kgem.max_copy_tile_size)
 		size /= 2;
 
-	w = size / sx - 2 * sx;
-	h = size / sy - 2 * sy;
+	sw = size / sx - 2 * sx;
+	sh = size / sy - 2 * sy;
 	DBG(("%s %d:%d downsampling using %dx%d GPU tiles\n",
-	     __FUNCTION__, (width + w-1)/w, (height + h-1)/h, w, h));
+	     __FUNCTION__, (width + sw-1)/sw, (height + sh-1)/sh, sw, sh));
 
 	for (b.y1 = 0; b.y1 < height; b.y1 = b.y2) {
-		b.y2 = b.y1 + h;
+		b.y2 = b.y1 + sh;
 		if (b.y2 > height)
 			b.y2 = height;
 
 		for (b.x1 = 0; b.x1 < width; b.x1 = b.x2) {
 			struct sna_composite_op op;
 
-			b.x2 = b.x1 + w;
+			b.x2 = b.x1 + sw;
 			if (b.x2 > width)
 				b.x2 = width;
 
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index 945818a..61b02b2 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -627,7 +627,6 @@ XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna,
 {
 	XF86VideoAdaptorPtr adaptor;
 	struct sna_video *video;
-	XF86AttributePtr att;
 
 	if (!sna_has_overlay(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_INFO,
@@ -664,17 +663,15 @@ XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna,
 	adaptor->nAttributes = NUM_ATTRIBUTES;
 	if (HAS_GAMMA(sna))
 		adaptor->nAttributes += GAMMA_ATTRIBUTES;
-	adaptor->pAttributes =
+
+	 adaptor->pAttributes =
 	    xnfalloc(sizeof(XF86AttributeRec) * adaptor->nAttributes);
 	/* Now copy the attributes */
-	att = adaptor->pAttributes;
-	memcpy(att, Attributes, sizeof(XF86AttributeRec) * NUM_ATTRIBUTES);
-	att += NUM_ATTRIBUTES;
-	if (HAS_GAMMA(sna)) {
-		memcpy(att, GammaAttributes,
+	memcpy(adaptor->pAttributes, Attributes, sizeof(XF86AttributeRec) * NUM_ATTRIBUTES);
+	if (HAS_GAMMA(sna))
+		memcpy(adaptor->pAttributes + NUM_ATTRIBUTES, GammaAttributes,
 		       sizeof(XF86AttributeRec) * GAMMA_ATTRIBUTES);
-		att += GAMMA_ATTRIBUTES;
-	}
+
 	adaptor->nImages = NUM_IMAGES;
 	adaptor->pImages = (XF86ImagePtr)Images;
 	adaptor->PutVideo = NULL;
commit e499f207c161d1b3cd75f065dc89021ff5f40b63
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 13:11:17 2012 +0100

    sna: Fix memleak from sna_crtc_find_plane()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 37709ee..05d97b6 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -957,31 +957,41 @@ static const xf86CrtcFuncsRec sna_crtc_funcs = {
 static uint32_t
 sna_crtc_find_plane(struct sna *sna, int pipe)
 {
-	drmModePlaneRes *resources;
-	uint32_t id = 0;
+	struct drm_mode_get_plane_res r;
+	uint32_t *planes, id = 0;
 	int i;
 
-	resources = drmModeGetPlaneResources(sna->kgem.fd);
-	if (!resources) {
-		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
-			   "failed to get plane resources: %s\n",
-			   strerror(errno));
+	VG_CLEAR(r);
+	r.count_planes = 0;
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &r))
+		return 0;
+
+	if (!r.count_planes)
 		return 0;
-	}
 
-	for (i = 0; id == 0 && i < resources->count_planes; i++) {
-		drmModePlane *p;
+	planes = malloc(sizeof(uint32_t)*r.count_planes);
+	if (planes == NULL)
+		return 0;
+
+	r.plane_id_ptr = (uintptr_t)planes;
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &r))
+		r.count_planes = 0;
 
-		p = drmModeGetPlane(sna->kgem.fd, resources->planes[i]);
-		if (p) {
-			if (p->possible_crtcs & (1 << pipe))
-				id = p->plane_id;
+	for (i = 0; i < r.count_planes; i++) {
+		struct drm_mode_get_plane p;
 
-			drmModeFreePlane(p);
+		VG_CLEAR(p);
+		p.plane_id = planes[i];
+		p.count_format_types = 0;
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPLANE, &p) == 0) {
+			if (p.possible_crtcs & (1 << pipe)) {
+				id = p.plane_id;
+				break;
+			}
 		}
 	}
+	free(planes);
 
-	free(resources);
 	return id;
 }
 
commit 49da55da518348fc6b88e09d5132dd1b1d751304
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 11:20:41 2012 +0100

    sna: Free the reversed glyph bits along with the font
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 34f25ef..c26dc45 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10503,6 +10503,9 @@ struct sna_font {
 	CharInfoRec glyphs8[256];
 	CharInfoRec *glyphs16[256];
 };
+#define GLYPH_INVALID (void *)1
+#define GLYPH_EMPTY (void *)2
+#define GLYPH_CLEAR (void *)3
 
 static Bool
 sna_realize_font(ScreenPtr screen, FontPtr font)
@@ -10525,15 +10528,28 @@ static Bool
 sna_unrealize_font(ScreenPtr screen, FontPtr font)
 {
 	struct sna_font *priv = FontGetPrivate(font, sna_font_key);
-	int n;
+	int i, j;
 
-	if (priv) {
-		for (n = 0; n < 256; n++)
-			free(priv->glyphs16[n]);
-		free(priv);
-		FontSetPrivate(font, sna_font_key, NULL);
+	if (priv == NULL)
+		return TRUE;
+
+	for (i = 0; i < 256; i++) {
+		if ((uintptr_t)priv->glyphs8[i].bits & ~3)
+			free(priv->glyphs8[i].bits);
 	}
+	for (j = 0; j < 256; j++) {
+		if (priv->glyphs16[j] == NULL)
+			continue;
+
+		for (i = 0; i < 256; i++) {
+			if ((uintptr_t)priv->glyphs16[j][i].bits & ~3)
+				free(priv->glyphs16[j][i].bits);
+		}
+		free(priv->glyphs16[j]);
+	}
+	free(priv);
 
+	FontSetPrivate(font, sna_font_key, NULL);
 	return TRUE;
 }
 
@@ -10634,10 +10650,10 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			int w8 = (w + 7) >> 3;
 			int x1, y1, len;
 
-			if (c->bits == (void *)1)
+			if (c->bits == GLYPH_EMPTY)
 				goto skip;
 
-			if (!transparent && c->bits == (void *)2)
+			if (!transparent && c->bits == GLYPH_CLEAR)
 				goto skip;
 
 			len = (w8 * h + 7) >> 3 << 1;
@@ -10684,7 +10700,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			b[0] = br00 | (1 + len);
 			b[1] = (uint16_t)y1 << 16 | (uint16_t)x1;
 			b[2] = (uint16_t)(y1+h) << 16 | (uint16_t)(x1+w);
-			if (c->bits == (void *)2) {
+			if (c->bits == GLYPH_CLEAR) {
 				memset(b+3, 0, len*4);
 			} else {
 				uint64_t *src = (uint64_t *)c->bits;
@@ -10775,7 +10791,7 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
 
 	/* Skip empty glyphs */
 	if (w == 0 || h == 0 || ((w|h) == 1 && (in->bits[0] & 1) == 0)) {
-		out->bits = (void *)1;
+		out->bits = GLYPH_EMPTY;
 		return true;
 	}
 
@@ -10799,7 +10815,7 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
 
 	if (clear) {
 		free(out->bits);
-		out->bits = (void *)2;
+		out->bits = GLYPH_CLEAR;
 	}
 
 	return true;
@@ -10814,12 +10830,12 @@ inline static bool sna_get_glyph8(FontPtr font, struct sna_font *priv,
 	p = &priv->glyphs8[g];
 	if (p->bits) {
 		*out = p;
-		return p->bits != (void*)-1;
+		return p->bits != GLYPH_INVALID;
 	}
 
 	font->get_glyphs(font, 1, &g, Linear8Bit, &n, &ret);
 	if (n == 0) {
-		p->bits = (void*)-1;
+		p->bits = GLYPH_INVALID;
 		return false;
 	}
 
@@ -10839,14 +10855,14 @@ inline static bool sna_get_glyph16(FontPtr font, struct sna_font *priv,
 	p = &page[g&0xff];
 	if (p->bits) {
 		*out = p;
-		return p->bits != (void*)-1;
+		return p->bits != GLYPH_INVALID;
 	}
 
 	font->get_glyphs(font, 1, (unsigned char *)&g,
 			 FONTLASTROW(font) ? TwoD16Bit : Linear16Bit,
 			 &n, &ret);
 	if (n == 0) {
-		p->bits = (void*)-1;
+		p->bits = GLYPH_INVALID;
 		return false;
 	}
 
commit 9f68c7fe6aa34ab17d82489fca5f63a3ce335444
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 11:15:42 2012 +0100

    sna: Free clip rectangles after intersection with PolyRectangle extents
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e39f4a4..34f25ef 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -8031,6 +8031,7 @@ zero_clipped:
 				}
 			} while (--n);
 		}
+		RegionUninit(&clip);
 	}
 	goto done;
 
@@ -8208,6 +8209,7 @@ wide_clipped:
 				}
 			} while (--n);
 		}
+		RegionUninit(&clip);
 	}
 	goto done;
 
commit ab3b7fe31b5a9d7924e959f21d29c4f7352ec8cb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 10:35:14 2012 +0100

    sna: Only reuse a write buffer if all external references have been dropped
    
    This avoids the unhappy situation of overwriting an upload buffer that
    we intend to use for a fallback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0c7b56c..8d7b1d6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3651,7 +3651,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
-		    !bo->mmapped && size <= bytes(&bo->base)) {
+		    bo->base.refcnt == 1 && !bo->mmapped &&
+		    size <= bytes(&bo->base)) {
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 			gem_write(kgem->fd, bo->base.handle,
commit e8cb656dc4bab1489df9dfb4cb64704b2a9fa34b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 13 08:10:10 2012 +0100

    sna: Free clip intersection with line extents after use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8498b3c..e39f4a4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6238,6 +6238,7 @@ Y2_continue:
 
 done:
 	fill.done(sna, &fill);
+	RegionUninit(&clip);
 	return true;
 
 damage:
@@ -7402,6 +7403,7 @@ Y2_continue:
 
 done:
 	fill.done(sna, &fill);
+	RegionUninit(&clip);
 	return true;
 
 damage:
commit 572745eb243a96819c8fa248f4ffdd703ed13b0f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 12 22:49:05 2012 +0100

    sna/damage: Remove unused return value
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index e563db2..1ea9e3c 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -154,7 +154,7 @@ static const char *_debug_describe_damage(char *buf, int max,
 
 #endif
 
-static struct sna_damage_box *
+static void
 reset_embedded_box(struct sna_damage *damage)
 {
 	damage->dirty = false;
@@ -162,8 +162,6 @@ reset_embedded_box(struct sna_damage *damage)
 	damage->embedded_box.size =
 		damage->remain = ARRAY_SIZE(damage->embedded_box.box);
 	list_init(&damage->embedded_box.list);
-
-	return (struct sna_damage_box *)&damage->embedded_box;
 }
 
 static struct sna_damage *_sna_damage_create(void)
commit aefc0417dde8b2137a787459a69d91c14902f22b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 12 22:47:41 2012 +0100

    sna/damage: Simplify initialisation of damage extents
    
    Just use the already computed region->extents when adding the first box.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 1cdfe22..e563db2 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -251,7 +251,7 @@ _sna_damage_create_elt(struct sna_damage *damage,
 	}
 	assert(damage->remain >= 0);
 
-	 return damage;
+	return damage;
 }
 
 static struct sna_damage *
@@ -530,7 +530,12 @@ done:
 
 static void damage_union(struct sna_damage *damage, const BoxRec *box)
 {
-	assert(box->x2 > box->x1 && box->y2 > box>y1);
+	DBG(("%s: extending damage (%d, %d), (%d, %d) by (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     damage->extents.x1, damage->extents.y1,
+	     damage->extents.x2, damage->extents.y2,
+	     box->x1, box->y1, box->x2, box->y2));
+	assert(box->x2 > box->x1 && box->y2 > box->y1);
 	if (damage->extents.x2 < damage->extents.x1) {
 		damage->extents = *box;
 	} else {
@@ -574,11 +579,11 @@ static struct sna_damage *__sna_damage_add_box(struct sna_damage *damage,
 	switch (REGION_NUM_RECTS(&damage->region)) {
 	case 0:
 		pixman_region_init_rects(&damage->region, box, 1);
-		damage_union(damage, box);
+		damage->extents = *box;
 		return damage;
 	case 1:
 		_pixman_region_union_box(&damage->region, box);
-		damage_union(damage, box);
+		damage->extents = damage->region.extents;
 		return damage;
 	}
 
@@ -613,7 +618,7 @@ inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
 
 	if (REGION_NUM_RECTS(&damage->region) <= 1) {
 		pixman_region_union(&damage->region, &damage->region, region);
-		damage_union(damage, &region->extents);
+		damage->extents = damage->region.extents;
 		return damage;
 	}
 
commit 78a96e812b912c5b25a02670f603f455f93e9d00
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 12 21:43:31 2012 +0100

    sna: Refresh the damage self-tests
    
    They had bitrotted with the revamp and needed some massaging to update
    to the new interfaces.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index dd67364..1cdfe22 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -236,7 +236,7 @@ _sna_damage_create_elt(struct sna_damage *damage,
 		damage->box += n;
 		damage->remain -= n;
 
-		count -=n;
+		count -= n;
 		boxes += n;
 		if (count == 0)
 			return damage;
@@ -249,6 +249,7 @@ _sna_damage_create_elt(struct sna_damage *damage,
 		damage->box += count;
 		damage->remain -= count;
 	}
+	assert(damage->remain >= 0);
 
 	 return damage;
 }
@@ -276,7 +277,7 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 		damage->box += n;
 		damage->remain -= n;
 
-		count -=n;
+		count -= n;
 		boxes += n;
 		if (count == 0)
 			return damage;
@@ -295,6 +296,7 @@ _sna_damage_create_elt_from_boxes(struct sna_damage *damage,
 	}
 	damage->box += count;
 	damage->remain -= count;
+	assert(damage->remain >= 0);
 
 	return damage;
 }
@@ -323,7 +325,7 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 		damage->box += n;
 		damage->remain -= n;
 
-		count -=n;
+		count -= n;
 		r += n;
 		if (count == 0)
 			return damage;
@@ -342,6 +344,7 @@ _sna_damage_create_elt_from_rectangles(struct sna_damage *damage,
 	}
 	damage->box += count;
 	damage->remain -= count;
+	assert(damage->remain >= 0);
 
 	return damage;
 }
@@ -370,7 +373,7 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 		damage->box += n;
 		damage->remain -= n;
 
-		count -=n;
+		count -= n;
 		p += n;
 		if (count == 0)
 			return damage;
@@ -389,6 +392,7 @@ _sna_damage_create_elt_from_points(struct sna_damage *damage,
 	}
 	damage->box += count;
 	damage->remain -= count;
+	assert(damage->remain >= 0);
 
 	return damage;
 }
@@ -526,6 +530,7 @@ done:
 
 static void damage_union(struct sna_damage *damage, const BoxRec *box)
 {
+	assert(box->x2 > box->x1 && box->y2 > box>y1);
 	if (damage->extents.x2 < damage->extents.x1) {
 		damage->extents = *box;
 	} else {
@@ -1004,7 +1009,6 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 	if (!sna_damage_overlaps_box(damage, &region->extents))
 		return damage;
 
-
 	if (region_is_singular(region) &&
 	    box_contains(&region->extents, &damage->extents)) {
 		__sna_damage_destroy(damage);
@@ -1481,7 +1485,8 @@ static void st_damage_add(struct sna_damage_selftest *test,
 
 	st_damage_init_random_region1(test, &tmp);
 
-	sna_damage_add(damage, &tmp);
+	if (!DAMAGE_IS_ALL(*damage))
+		sna_damage_add(damage, &tmp);
 	pixman_region_union(region, region, &tmp);
 }
 
@@ -1489,15 +1494,14 @@ static void st_damage_add_box(struct sna_damage_selftest *test,
 			      struct sna_damage **damage,
 			      pixman_region16_t *region)
 {
-	BoxRec box;
+	RegionRec r;
 
-	st_damage_init_random_box(test, &box);
+	st_damage_init_random_box(test, &r.extents);
+	r.data = NULL;
 
-	sna_damage_add_box(damage, &box);
-	pixman_region_union_rectangle(region, region,
-				      box.x1, box.y2,
-				      box.x2 - box.x1,
-				      box.y2 - box.y1);
+	if (!DAMAGE_IS_ALL(*damage))
+		sna_damage_add_box(damage, &r.extents);
+	pixman_region_union(region, region, &r);
 }
 
 static void st_damage_subtract(struct sna_damage_selftest *test,
@@ -1512,6 +1516,19 @@ static void st_damage_subtract(struct sna_damage_selftest *test,
 	pixman_region_subtract(region, region, &tmp);
 }
 
+static void st_damage_subtract_box(struct sna_damage_selftest *test,
+				   struct sna_damage **damage,
+				   pixman_region16_t *region)
+{
+	RegionRec r;
+
+	st_damage_init_random_box(test, &r.extents);
+	r.data = NULL;
+
+	sna_damage_subtract_box(damage, &r.extents);
+	pixman_region_subtract(region, region, &r);
+}
+
 static void st_damage_all(struct sna_damage_selftest *test,
 			  struct sna_damage **damage,
 			  pixman_region16_t *region)
@@ -1520,7 +1537,8 @@ static void st_damage_all(struct sna_damage_selftest *test,
 
 	pixman_region_init_rect(&tmp, 0, 0, test->width, test->height);
 
-	sna_damage_all(damage, test->width, test->height);
+	if (!DAMAGE_IS_ALL(*damage))
+		sna_damage_all(damage, test->width, test->height);
 	pixman_region_union(region, region, &tmp);
 }
 
@@ -1531,7 +1549,7 @@ static bool st_check_equal(struct sna_damage_selftest *test,
 	int d_num, r_num;
 	BoxPtr d_boxes, r_boxes;
 
-	d_num = sna_damage_get_boxes(*damage, &d_boxes);
+	d_num = *damage ? sna_damage_get_boxes(*damage, &d_boxes) : 0;
 	r_boxes = pixman_region_rectangles(region, &r_num);
 
 	if (d_num != r_num) {
@@ -1557,6 +1575,7 @@ void sna_damage_selftest(void)
 		st_damage_add,
 		st_damage_add_box,
 		st_damage_subtract,
+		st_damage_subtract_box,
 		st_damage_all
 	};
 	bool (*const check[])(struct sna_damage_selftest *test,
@@ -1569,13 +1588,14 @@ void sna_damage_selftest(void)
 	char damage_buf[1000];
 	int pass;
 
-	for (pass = 0; pass < 1024; pass++) {
+	for (pass = 0; pass < 16384; pass++) {
 		struct sna_damage_selftest test;
 		struct sna_damage *damage;
 		pixman_region16_t ref;
 		int iter, i;
 
-		iter = rand() % 1024;
+		iter = 1 + rand() % (1 + (pass / 64));
+		ErrorF("%s: pass %d, iters=%d\n", __FUNCTION__, pass, iter);
 
 		test.width = 1 + rand() % 2048;
 		test.height = 1 + rand() % 2048;
commit 6138f7434a6fb014299a7e9a8392ef962c8cba30
Author: Cyril Brulebois <kibi at debian.org>
Date:   Tue Jun 12 21:19:14 2012 +0100

    Fix up braindamage in previous commit.
    
    ickle: Fixing up my idiotic change, obviously too much birthday cake.

diff --git a/src/intel_module.c b/src/intel_module.c
index d6b4eb9..af82cff 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -298,11 +298,8 @@ extern XF86ConfigPtr xf86configptr;
 static XF86ConfDevicePtr
 _xf86findDriver(const char *ident, XF86ConfDevicePtr p)
 {
-	if (p->dev_driver == NULL)
-		return NULL;
-
 	while (p) {
-		if (xf86nameCompare(ident, p->dev_driver) == 0)
+		if (p->dev_driver && xf86nameCompare(ident, p->dev_driver) == 0)
 			return p;
 
 		p = p->list.next;
commit 224d631a232d3536e0f1f2780bfbe1218ee7590f
Author: Cyril Brulebois <kibi at debian.org>
Date:   Tue Jun 12 21:14:53 2012 +0100

    Avoid calling xf86nameCompare() with a NULL string
    
    Device sections without a Driver property would lead to a server
    segfault because of a NULL pointer's being passed as the second
    argument of xf86nameCompare().
    
    Debian bug #677206 <http://bugs.debian.org/677206>
    
    Signed-off-by: Cyril Brulebois <kibi at debian.org>

diff --git a/src/intel_module.c b/src/intel_module.c
index 4430ac6..d6b4eb9 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -298,12 +298,16 @@ extern XF86ConfigPtr xf86configptr;
 static XF86ConfDevicePtr
 _xf86findDriver(const char *ident, XF86ConfDevicePtr p)
 {
+	if (p->dev_driver == NULL)
+		return NULL;
+
 	while (p) {
 		if (xf86nameCompare(ident, p->dev_driver) == 0)
 			return p;
 
 		p = p->list.next;
 	}
+
 	return NULL;
 }
 
commit 3b9b64c7c9b5b0bfaafb97c9a9fe5849bbb412da
Author: Dave Airlie <airlied at gmail.com>
Date:   Tue Jun 12 10:26:34 2012 +0100

    uxa: do copy fb at startup.
    
    Copy the current framebuffer for smooth wayland->gdm handoff.
    
    This has been hanging around in Fedora for too long now, and we've
    dropped the feature a few times, and yes I know the Simpsons did it^W^W^W
    SNA does it.
    
    I've updated the code to have some of the better fixes from nouveau.
    
    I've no idea who wrote this code either, krh or ajax. [ickle: The
    earliest version I've found had krh's fingerprints on it, though it may
    still have been a joint effort.]
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>
    [ickle: improve error handling, only copy the fb during initial takeover]

diff --git a/src/intel.h b/src/intel.h
index 20d8282..caf07bb 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -368,6 +368,7 @@ extern void intel_mode_fini(intel_screen_private *intel);
 extern int intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, xf86CrtcPtr crtc);
 extern int intel_crtc_id(xf86CrtcPtr crtc);
 extern int intel_output_dpms_status(xf86OutputPtr output);
+extern void intel_copy_fb(ScrnInfoPtr scrn);
 
 enum DRI2FrameEventType {
 	DRI2_SWAP,
diff --git a/src/intel_display.c b/src/intel_display.c
index 6f3f7e6..8de6344 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -35,6 +35,7 @@
 #include <unistd.h>
 #include <errno.h>
 #include <poll.h>
+#include <sys/ioctl.h>
 
 #include "xorgVersion.h"
 
@@ -45,6 +46,8 @@
 #include "X11/Xatom.h"
 #include "X11/extensions/dpmsconst.h"
 #include "xf86DDC.h"
+#include "fb.h"
+#include "uxa.h"
 
 #include "intel_glamor.h"
 
@@ -1748,3 +1751,129 @@ Bool intel_crtc_on(xf86CrtcPtr crtc)
 
 	return ret;
 }
+
+static PixmapPtr
+intel_create_pixmap_for_bo(ScreenPtr pScreen, dri_bo *bo,
+			   int width, int height,
+			   int depth, int bpp,
+			   int pitch)
+{
+	PixmapPtr pixmap;
+
+	pixmap = pScreen->CreatePixmap(pScreen, 0, 0, depth, 0);
+	if (pixmap == NullPixmap)
+		return pixmap;
+
+	if (!pScreen->ModifyPixmapHeader(pixmap,
+					 width, height,
+					 depth, bpp,
+					 pitch, NULL)) {
+		pScreen->DestroyPixmap(pixmap);
+		return NullPixmap;
+	}
+
+	intel_set_pixmap_bo(pixmap, bo);
+	return pixmap;
+}
+
+static PixmapPtr
+intel_create_pixmap_for_fbcon(ScrnInfoPtr scrn, int fbcon_id)
+{
+	ScreenPtr pScreen = xf86ScrnToScreen(scrn);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	struct intel_mode *mode = intel->modes;
+	int fd = mode->fd;
+	drmModeFBPtr fbcon;
+	struct drm_gem_flink flink;
+	drm_intel_bo *bo;
+	PixmapPtr pixmap = NullPixmap;
+
+	fbcon = drmModeGetFB(fd, fbcon_id);
+	if (fbcon == NULL)
+		return NULL;
+
+	if (fbcon->depth != scrn->depth ||
+	    fbcon->width != scrn->virtualX ||
+	    fbcon->height != scrn->virtualY)
+		goto out_free_fb;
+
+	flink.handle = fbcon->handle;
+	if (ioctl(fd, DRM_IOCTL_GEM_FLINK, &flink) < 0) {
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			   "Couldn't flink fbcon handle\n");
+		goto out_free_fb;
+	}
+
+	bo = drm_intel_bo_gem_create_from_name(intel->bufmgr,
+					       "fbcon", flink.name);
+	if (bo == NULL) {
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			   "Couldn't allocate bo for fbcon handle\n");
+		goto out_free_fb;
+	}
+
+	pixmap = intel_create_pixmap_for_bo(pScreen, bo,
+					    fbcon->width, fbcon->height,
+					    fbcon->depth, fbcon->bpp,
+					    fbcon->pitch);
+	if (pixmap == NullPixmap)
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			   "Couldn't allocate pixmap fbcon contents\n");
+	drm_intel_bo_unreference(bo);
+out_free_fb:
+	drmModeFreeFB(fbcon);
+
+	return pixmap;
+}
+
+void intel_copy_fb(ScrnInfoPtr scrn)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
+	ScreenPtr pScreen = xf86ScrnToScreen(scrn);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	PixmapPtr src, dst;
+	unsigned int pitch = scrn->displayWidth * intel->cpp;
+	struct intel_crtc *intel_crtc;
+	int i, fbcon_id;
+
+	if (intel->force_fallback)
+		return;
+
+	fbcon_id = 0;
+	for (i = 0; i < xf86_config->num_crtc; i++) {
+		intel_crtc = xf86_config->crtc[i]->driver_private;
+		if (intel_crtc->mode_crtc->buffer_id)
+			fbcon_id = intel_crtc->mode_crtc->buffer_id;
+	}
+	if (!fbcon_id)
+		return;
+
+	src = intel_create_pixmap_for_fbcon(scrn, fbcon_id);
+	if (src == NULL)
+		return;
+
+	/* We dont have a screen Pixmap yet */
+	dst = intel_create_pixmap_for_bo(pScreen, intel->front_buffer,
+					 scrn->virtualX, scrn->virtualY,
+					 scrn->depth, scrn->bitsPerPixel,
+					 pitch);
+	if (dst == NullPixmap)
+		goto cleanup_src;
+
+	if (!intel->uxa_driver->prepare_copy(src, dst,
+					     -1, -1,
+					     GXcopy, FB_ALLONES))
+		goto cleanup_dst;
+
+	intel->uxa_driver->copy(dst,
+				0, 0,
+				0, 0,
+				scrn->virtualX, scrn->virtualY);
+	intel->uxa_driver->done_copy(dst);
+	pScreen->canDoBGNoneRoot = TRUE;
+
+cleanup_dst:
+	(*pScreen->DestroyPixmap)(dst);
+cleanup_src:
+	(*pScreen->DestroyPixmap)(src);
+}
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 67cec48..8962a11 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -167,7 +167,11 @@ static Bool i830CreateScreenResources(ScreenPtr screen)
 	if (!(*screen->CreateScreenResources) (screen))
 		return FALSE;
 
-	return intel_uxa_create_screen_resources(screen);
+	if (!intel_uxa_create_screen_resources(screen))
+		return FALSE;
+
+	intel_copy_fb(scrn);
+	return TRUE;
 }
 
 static void PreInitCleanup(ScrnInfoPtr scrn)
commit b6525702b9ffd21beb8ea6bb10a8ad5ce7f9de14
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 12 11:59:31 2012 +0100

    sna: Hook up AdjustFrame()
    
    Looks to be unused by the core, just a solitary invocation in an obscure
    extension it seems. However the implementation looks trivial so
    incorporate it until it is finally removed, just in case.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 16bdf13..9453c20 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -301,6 +301,7 @@ struct sna {
 };
 
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
+void sna_mode_adjust_frame(struct sna *sna, int x, int y);
 extern void sna_mode_remove_fb(struct sna *sna);
 extern void sna_mode_update(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 7926f8f..37709ee 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -765,6 +765,16 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	return TRUE;
 }
 
+void sna_mode_adjust_frame(struct sna *sna, int x, int y)
+{
+	xf86CrtcConfigPtr config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	xf86OutputPtr output = config->output[config->compat_output];
+	xf86CrtcPtr crtc = output->crtc;
+
+	if (crtc && crtc->enabled)
+		sna_crtc_set_mode_major(crtc, &crtc->mode, crtc->rotation, x, y);
+}
+
 static void
 sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 {
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 64132b5..e585205 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -943,6 +943,9 @@ sna_screen_init(SCREEN_INIT_ARGS_DECL)
 
 static void sna_adjust_frame(ADJUST_FRAME_ARGS_DECL)
 {
+	SCRN_INFO_PTR(arg);
+	DBG(("%s(%d, %d)\n", __FUNCTION__, x, y));
+	sna_mode_adjust_frame(to_sna(scrn), x, y);
 }
 
 static void sna_free_screen(FREE_SCREEN_ARGS_DECL)
commit 7b281d1300127d65392aaadbbe2299fa9e1749a9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 12 10:37:04 2012 +0100

    sna: Remove a pair of empty stub functions
    
    The export wrappers for the glyph cache constructor/destructor existed
    in case there was a need to add more routines. Since that never
    happened, remove the extra step of indirection.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index ca70e76..63a6287 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -114,7 +114,7 @@ static inline struct sna_glyph *sna_glyph(GlyphPtr glyph)
 
 #define NeedsComponent(f) (PICT_FORMAT_A(f) != 0 && PICT_FORMAT_RGB(f) != 0)
 
-static void unrealize_glyph_caches(struct sna *sna)
+void sna_glyphs_close(struct sna *sna)
 {
 	struct sna_render *render = &sna->render;
 	unsigned int i;
@@ -150,7 +150,7 @@ static void unrealize_glyph_caches(struct sna *sna)
  * This function allocates the storage pixmap, and then fills in the
  * rest of the allocated structures for all caches with the given format.
  */
-static Bool realize_glyph_caches(struct sna *sna)
+Bool sna_glyphs_create(struct sna *sna)
 {
 	ScreenPtr screen = sna->scrn->pScreen;
 	pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff };
@@ -221,7 +221,7 @@ static Bool realize_glyph_caches(struct sna *sna)
 	return sna->render.white_image && sna->render.white_picture;
 
 bail:
-	unrealize_glyph_caches(sna);
+	sna_glyphs_close(sna);
 	return FALSE;
 }
 
@@ -1032,11 +1032,6 @@ next_glyph:
 	return TRUE;
 }
 
-Bool sna_glyphs_create(struct sna *sna)
-{
-	return realize_glyph_caches(sna);
-}
-
 static PictFormatPtr
 glyphs_format(int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 {
@@ -1424,8 +1419,3 @@ sna_glyph_unrealize(ScreenPtr screen, GlyphPtr glyph)
 		priv->atlas = NULL;
 	}
 }
-
-void sna_glyphs_close(struct sna *sna)
-{
-	unrealize_glyph_caches(sna);
-}
commit 392e33a62d729c64c57699505220b4029e015470
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 11 09:51:41 2012 +0100

    sna: Instrument memory/bo allocations for monitoring over time
    
    Hide it behind --enable-debug=memory to avoid incurring the cost for
    everybody.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index a9e8fa5..0b93a2f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -315,7 +315,11 @@ if test "x$DEBUG" != xno; then
 		AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
 	fi
 fi
+if test "x$DEBUG" = xmemory; then
+	AC_DEFINE(DEBUG_MEMORY,1,[Enable memory debugging])
+fi
 if test "x$DEBUG" = xfull; then
+	AC_DEFINE(DEBUG_MEMORY,1,[Enable memory debugging])
 	AC_DEFINE(HAS_DEBUG_FULL,1,[Enable all debugging])
         CFLAGS="$CFLAGS -O0 -ggdb3"
 fi
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d1ba753..0c7b56c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -116,6 +116,21 @@ static inline int bytes(struct kgem_bo *bo)
 #define bucket(B) (B)->size.pages.bucket
 #define num_pages(B) (B)->size.pages.count
 
+#ifdef DEBUG_MEMORY
+static void debug_alloc(struct kgem *kgem, size_t size)
+{
+	kgem->debug_memory.bo_allocs++;
+	kgem->debug_memory.bo_bytes += size;
+}
+static void debug_alloc__bo(struct kgem *kgem, struct kgem_bo *bo)
+{
+	debug_alloc(kgem, bytes(bo));
+}
+#else
+#define debug_alloc(k, b)
+#define debug_alloc__bo(k, b)
+#endif
+
 static void kgem_sna_reset(struct kgem *kgem)
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
@@ -1008,6 +1023,11 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->exec == NULL);
 	assert(!bo->vmap || bo->rq == NULL);
 
+#ifdef DEBUG_MEMORY
+	kgem->debug_memory.bo_allocs--;
+	kgem->debug_memory.bo_bytes -= bytes(bo);
+#endif
+
 	kgem_bo_binding_free(kgem, bo);
 
 	if (IS_VMAP_MAP(bo->map)) {
@@ -2316,6 +2336,8 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 
 	bo->reusable = false;
 	bo->flush = true;
+
+	debug_alloc__bo(kgem, bo);
 	return bo;
 }
 
@@ -2349,6 +2371,7 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
 		return NULL;
 	}
 
+	debug_alloc__bo(kgem, bo);
 	return bo;
 }
 
@@ -2882,6 +2905,8 @@ create:
 
 	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
 
+	debug_alloc__bo(kgem, bo);
+
 	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d\n",
 	     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
 	return bo;
@@ -3449,6 +3474,8 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	bo->reusable = false;
 	bo->vmap = true;
 
+	debug_alloc__bo(kgem, bo);
+
 	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
 	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle));
 	return bo;
@@ -3729,6 +3756,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 			DBG(("%s: created handle=%d for buffer\n",
 			     __FUNCTION__, bo->base.handle));
+
+			debug_alloc(kgem, alloc);
 		}
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
@@ -3916,6 +3945,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 			DBG(("%s: created handle=%d for buffer\n",
 			     __FUNCTION__, bo->base.handle));
+
+			debug_alloc(kgem, alloc * PAGE_SIZE);
 		}
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
@@ -4195,6 +4226,8 @@ kgem_replace_bo(struct kgem *kgem,
 			gem_close(kgem->fd, handle);
 			return NULL;
 		}
+
+		debug_alloc__bo(kgem, dst);
 	}
 	dst->pitch = pitch;
 	dst->unique_id = kgem_get_unique_id(kgem);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index e4a5b4c..b747dc7 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -173,6 +173,13 @@ struct kgem {
 	uint32_t batch[64*1024-8];
 	struct drm_i915_gem_exec_object2 exec[256];
 	struct drm_i915_gem_relocation_entry reloc[4096];
+
+#ifdef DEBUG_MEMORY
+	struct {
+		int bo_allocs;
+		size_t bo_bytes;
+	} debug_memory;
+#endif
 };
 
 #define KGEM_BATCH_RESERVED 1
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6aa54d1..16bdf13 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -195,9 +195,11 @@ enum {
 	THROTTLE_TIMER,
 	EXPIRE_TIMER,
 	INACTIVE_TIMER,
+#if DEBUG_MEMORY
+	DEBUG_MEMORY_TIMER,
+#endif
 	NUM_TIMERS
 };
-#define NUM_FINE_TIMERS 1
 
 struct sna {
 	ScrnInfoPtr scrn;
@@ -287,6 +289,15 @@ struct sna {
 
 	struct kgem kgem;
 	struct sna_render render;
+
+#if DEBUG_MEMORY
+	struct {
+	       int shadow_pixels_allocs;
+	       int cpu_bo_allocs;
+	       size_t shadow_pixels_bytes;
+	       size_t cpu_bo_bytes;
+	} debug_memory;
+#endif
 };
 
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0840056..8498b3c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -362,6 +362,11 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
+#ifdef DEBUG_MEMORY
+	sna->debug_memory.shadow_pixels_allocs++;
+	sna->debug_memory.shadow_pixels_bytes += priv->stride * pixmap->drawable.height;
+#endif
+
 	if (priv->create & KGEM_CAN_CREATE_CPU) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
@@ -377,6 +382,10 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
 			priv->stride = priv->cpu_bo->pitch;
+#ifdef DEBUG_MEMORY
+			sna->debug_memory.cpu_bo_allocs++;
+			sna->debug_memory.cpu_bo_bytes += kgem_bo_size(priv->cpu_bo);
+#endif
 		}
 	}
 
@@ -400,6 +409,10 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 	assert(priv->cpu_damage == NULL);
 	assert(list_is_empty(&priv->list));
 
+#ifdef DEBUG_MEMORY
+	sna->debug_memory.shadow_pixels_allocs--;
+	sna->debug_memory.shadow_pixels_bytes -= priv->stride * priv->pixmap->drawable.height;
+#endif
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
@@ -409,6 +422,10 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 		}
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
+#ifdef DEBUG_MEMORY
+		sna->debug_memory.cpu_bo_allocs--;
+		sna->debug_memory.cpu_bo_bytes -= kgem_bo_size(priv->cpu_bo);
+#endif
 	} else
 		free(priv->ptr);
 
@@ -12317,6 +12334,35 @@ static void sna_accel_inactive(struct sna *sna)
 		sna_accel_disarm_timer(sna, INACTIVE_TIMER);
 }
 
+#ifdef DEBUG_MEMORY
+static bool sna_accel_do_debug_memory(struct sna *sna)
+{
+	int32_t delta = sna->timer_expire[DEBUG_MEMORY_TIMER] - sna->time;
+
+	if (delta <= 3) {
+		sna->timer_expire[DEBUG_MEMORY_TIMER] = sna->time + 10 * 1000;
+		return true;
+	} else
+		return false;
+}
+
+static void sna_accel_debug_memory(struct sna *sna)
+{
+	ErrorF("Allocated shadow pixels: %d, %d bytes, as CPU bo: %d, %d bytes\n",
+	       sna->debug_memory.shadow_pixels_allocs,
+	       sna->debug_memory.shadow_pixels_bytes,
+	       sna->debug_memory.cpu_bo_allocs,
+	       sna->debug_memory.cpu_bo_bytes);
+	ErrorF("Allocated bo: %d, %d bytes\n",
+	       sna->kgem.debug_memory.bo_allocs,
+	       sna->kgem.debug_memory.bo_bytes);
+}
+
+#else
+#define sna_accel_do_debug_memory(x) 0
+static void sna_accel_debug_memory(struct sna *sna) { }
+#endif
+
 Bool sna_accel_pre_init(struct sna *sna)
 {
 	sna->timer = TimerSet(NULL, 0, 0, sna_timeout, sna);
@@ -12340,6 +12386,10 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 
 	AddGeneralSocket(sna->kgem.fd);
 
+#ifdef DEBUG_MEMORY
+	sna->timer_expire[DEBUG_MEMORY_TIMER] = GetTimeInMillis()+ 10 * 1000;
+#endif
+
 	screen->CreateGC = sna_create_gc;
 	screen->GetImage = sna_get_image;
 	screen->GetSpans = sna_get_spans;
@@ -12476,6 +12526,9 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 	if (sna_accel_do_inactive(sna))
 		sna_accel_inactive(sna);
 
+	if (sna_accel_do_debug_memory(sna))
+		sna_accel_debug_memory(sna);
+
 	if (sna->flush == 0 && sna->watch_flush == 1) {
 		DBG(("%s: removing watchers\n", __FUNCTION__));
 		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 32b96e1..64132b5 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -1074,10 +1074,14 @@ Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled: %s\n", BUILDER_DESCRIPTION);
 #endif
-#if HAVE_EXTRA_DEBUG
+#if HAS_EXTRA_DEBUG
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled with debugging enabled\n");
 #endif
+#if DEBUG_MEMORY
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "SNA compiled with memory allocation reporting enabled\n");
+#endif
 
 	DBG(("%s\n", __FUNCTION__));
 	DBG(("pixman version: %s\n", pixman_version_string()));
commit 4e984b79cce038ba5026d8bdd93dec9b5a6ced8a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 11 09:02:03 2012 +0100

    configure: Correct the help string for --with-default-accel
    
    Prior to finalizing the defaults I changed my mind and realised that the
    default had to reflect the current behaviour of someone enabling SNA for
    the first time, and not the previous behaviour of --enable-sna to
    override UXA. This is so that distro's could offer an SNA enabled DDX
    for the brave whilst not affecting their typical no-xorg.conf users.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 79df7e4..a9e8fa5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -160,7 +160,7 @@ fi
 
 AC_ARG_WITH(default-accel,
 	    AS_HELP_STRING([--with-default-accel],
-			   [Select the default acceleration method [default=sna if enabled, otherwise uxa]]),
+			   [Select the default acceleration method [default=uxa if enabled, otherwise sna]]),
 			   [accel="$withval"],
 			   [accel=auto])
 if test "x$accel" = xyes; then
commit 7614a541886dd9eb5c9d441e2618785e9cc53a65
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jun 10 09:36:38 2012 +0100

    configure: Harden --with-default-accel against simple mistakes
    
    If the user specifies no options, assume automatic selection. Then
    double check we found a valid backend and so avoid later breaking the
    build.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index cb740a7..79df7e4 100644
--- a/configure.ac
+++ b/configure.ac
@@ -163,6 +163,10 @@ AC_ARG_WITH(default-accel,
 			   [Select the default acceleration method [default=sna if enabled, otherwise uxa]]),
 			   [accel="$withval"],
 			   [accel=auto])
+if test "x$accel" = xyes; then
+	AC_MSG_WARN([No default acceleration specified, choosing automatic selection])
+	accel="auto"
+fi
 
 AC_MSG_CHECKING([which acceleration method to use by default])
 if test "x$accel" = xauto; then
@@ -178,9 +182,11 @@ if test "x$accel" = xauto; then
 	fi
 fi
 
+have_accel=no
 if test "x$accel" = xsna; then
 	if test "x$SNA" != "xno"; then
 		AC_DEFINE(DEFAULT_ACCEL_METHOD, SNA, [Default acceleration method])
+		have_accel=yes
 	else
 		AC_MSG_ERROR([SNA requested as default, but is not enabled])
 	fi
@@ -189,11 +195,15 @@ fi
 if test "x$accel" = xuxa; then
 	if test "x$UXA" != "xno"; then
 		AC_DEFINE(DEFAULT_ACCEL_METHOD, UXA, [Default acceleration method])
+		have_accel=yes
 	else
 		AC_MSG_ERROR([UXA requested as default, but is not enabled])
 	fi
 fi
 AC_MSG_RESULT($accel)
+if test "x$accel" = xno; then
+	AC_MSG_ERROR([No default acceleration option])
+fi
 
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
commit ea0209fbc12fca282bd8ef72223791bf163801ce
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 8 22:44:23 2012 +0100

    sna: Remove option to disable vmap
    
    It works everywhere, so when it is finally enabled, let it be.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index 56f1ae9..78575a6 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -20,7 +20,6 @@ const OptionInfoRec intel_options[] = {
 #endif
 #ifdef USE_SNA
 	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	1},
-	{OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	1},
 	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	0},
 	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	1},
 #endif
diff --git a/src/intel_options.h b/src/intel_options.h
index ba2fc4b..05a2ad1 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -26,7 +26,6 @@ enum intel_options {
 #endif
 #ifdef USE_SNA
 	OPTION_THROTTLE,
-	OPTION_VMAP,
 	OPTION_ZAPHOD,
 	OPTION_DELAYED_FLUSH,
 #endif
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 8da4f52..32b96e1 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -501,14 +501,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 			   "Disabling use of relaxed fencing\n");
 		sna->kgem.has_relaxed_fencing = 0;
 	}
-	if (!xf86ReturnOptValBool(sna->Options,
-				  OPTION_VMAP,
-				  sna->kgem.has_vmap)) {
-		xf86DrvMsg(scrn->scrnIndex,
-			   sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
-			   "Disabling use of vmap\n");
-		sna->kgem.has_vmap = 0;
-	}
 
 	/* Enable tiling by default */
 	sna->tiling = SNA_TILING_ALL;
commit 18baa1428b1c4c0a49927d4c32db091db9e4e97b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 8 22:43:01 2012 +0100

    Make the enum list of options consistent with the actual table.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.h b/src/intel_options.h
index 42a9e56..ba2fc4b 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -15,21 +15,15 @@ enum intel_options {
 	OPTION_DRI,
 	OPTION_VIDEO_KEY,
 	OPTION_COLOR_KEY,
-	OPTION_TILING_FB,
 	OPTION_TILING_2D,
-	OPTION_SHADOW,
+	OPTION_TILING_FB,
 	OPTION_SWAPBUFFERS_WAIT,
-	OPTION_TRIPLE_BUFFER,
-#ifdef INTEL_XVMC
-	OPTION_XVMC,
-#endif
 	OPTION_PREFER_OVERLAY,
-	OPTION_DEBUG_FLUSH_BATCHES,
-	OPTION_DEBUG_FLUSH_CACHES,
-	OPTION_DEBUG_WAIT,
 	OPTION_HOTPLUG,
 	OPTION_RELAXED_FENCING,
-	OPTION_USE_SNA,
+#ifdef INTEL_XVMC
+	OPTION_XVMC,
+#endif
 #ifdef USE_SNA
 	OPTION_THROTTLE,
 	OPTION_VMAP,
@@ -38,7 +32,12 @@ enum intel_options {
 #endif
 #ifdef USE_UXA
 	OPTION_FALLBACKDEBUG,
+	OPTION_DEBUG_FLUSH_BATCHES,
+	OPTION_DEBUG_FLUSH_CACHES,
+	OPTION_DEBUG_WAIT,
 	OPTION_BUFFER_CACHE,
+	OPTION_SHADOW,
+	OPTION_TRIPLE_BUFFER,
 #endif
 	NUM_OPTIONS,
 };
commit adc872a9654dc18d778323ca0721704878ad3851
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 8 06:44:20 2012 +0100

    sna: Add DBG spew to flink()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d2ca995..d1ba753 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1092,7 +1092,8 @@ static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(bo->proxy == NULL);
 
-	DBG(("%s: handle=%d, fb=%d\n", __FUNCTION__, bo->handle, bo->delta));
+	DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
+	     __FUNCTION__, bo->handle, bo->delta, bo->reusable));
 	if (bo->delta) {
 		drmModeRmFB(kgem->fd, bo->delta);
 		bo->delta = 0;
@@ -3381,6 +3382,9 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 	if (ret)
 		return 0;
 
+	DBG(("%s: flinked handle=%d to name=%d, marking non-reusable\n",
+	     __FUNCTION__, flink.handle, flink.name));
+
 	/* Ordinarily giving the name aware makes the buffer non-reusable.
 	 * However, we track the lifetime of all clients and their hold
 	 * on the buffer, and *presuming* they do not pass it on to a third
commit a62ad4e80722db187766c83a16fa84ec236cc5eb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 23:37:59 2012 +0100

    sna: Skip flushing the active queue if there is not a suitable bo pending
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3078f49..d2ca995 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2136,6 +2136,11 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			return NULL;
 		}
 
+		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE))) {
+			DBG(("%s: active cache bucket empty\n", __FUNCTION__));
+			return NULL;
+		}
+
 		if (!__kgem_throttle_retire(kgem, 0)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
commit 318982566bbc7145847bd03601087150eef7a8d8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 21:56:04 2012 +0100

    sna/dri: Disable experimental code by default
    
    Since these require non-upstream patches to other components, we don't
    want it enabled by default and randomly breaking builds.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index a025521..cb740a7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -197,7 +197,7 @@ AC_MSG_RESULT($accel)
 
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
-			     [Enable use of vmap [default=no]]),
+			     [Enable use of vmap (experimental) [default=no]]),
 	      [VMAP="$enableval"],
 	      [VMAP=no])
 AM_CONDITIONAL(USE_VMAP, test x$VMAP = xyes)
@@ -205,6 +205,15 @@ if test "x$VMAP" = xyes; then
 	AC_DEFINE(USE_VMAP,1,[Assume VMAP support])
 fi
 
+AC_ARG_ENABLE(async-swap,
+	      AS_HELP_STRING([--enable-async-swap],
+			     [Enable use of asynchronous swaps (experimental) [default=no]]),
+	      [ASYNC_SWAP="$enableval"],
+	      [ASYNC_SWAP=no])
+AM_CONDITIONAL(USE_ASYNC_SWAP, test x$ASYNC_SWAP = xyes)
+if test "x$ASYNC_SWAP" = xyes; then
+	AC_DEFINE(USE_ASYNC_SWAP,1,[Assume asynchronous swap support])
+fi
 
 AC_ARG_ENABLE(debug,
 	      AS_HELP_STRING([--enable-debug],
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index da49e12..b9f9b85 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1232,7 +1232,7 @@ static void sna_dri_flip_event(struct sna *sna,
 			sna_dri_frame_event_info_free(flip);
 		break;
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
 	case DRI2_ASYNC_FLIP:
 		DBG(("%s: async swap flip completed on pipe %d, pending? %d, new? %d\n",
 		     __FUNCTION__, flip->pipe,
@@ -1700,7 +1700,7 @@ blit_fallback:
 	return TRUE;
 }
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_ASYNC_SWAP && DRI2INFOREC_VERSION >= 7
 static void
 sna_dri_exchange_attachment(DRI2BufferPtr front, DRI2BufferPtr back)
 {
@@ -2070,7 +2070,7 @@ Bool sna_dri_open(struct sna *sna, ScreenPtr screen)
 	info.ReuseBufferNotify = NULL;
 #endif
 
-#if DRI2INFOREC_VERSION >= 7
+#if USE_AYSYNC_SWAP && DRI2INFOREC_VERSION >= 7
 	info.version = 7;
 	info.AsyncSwap = sna_dri_async_swap;
 #endif
commit ebf84b8e572b5cb1a509000d412dfa5be3d1aca3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 20:15:53 2012 +0100

    sna/trapezoids: Micro-optimise cell allocation
    
    The pool is a fixed size so we can delete the generic code to handle
    variable sizes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 780a0fa..a3bdb16 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -163,8 +163,6 @@ struct quorem {
 
 struct _pool_chunk {
 	size_t size;
-	size_t capacity;
-
 	struct _pool_chunk *prev_chunk;
 	/* Actual data starts here.	 Well aligned for pointers. */
 };
@@ -177,9 +175,6 @@ struct pool {
 	struct _pool_chunk *current;
 	struct _pool_chunk *first_free;
 
-	/* The default capacity of a chunk. */
-	size_t default_capacity;
-
 	/* Header for the sentinel chunk.  Directly following the pool
 	 * struct should be some space for embedded elements from which
 	 * the sentinel chunk allocates from. */
@@ -292,7 +287,7 @@ struct cell_list {
 	 * allocated from this pool.  */
 	struct {
 		struct pool base[1];
-		struct cell embedded[32];
+		struct cell embedded[256];
 	} cell_pool;
 };
 
@@ -351,42 +346,34 @@ floored_muldivrem(int x, int a, int b)
 	return qr;
 }
 
-static void
-_pool_chunk_init(
-    struct _pool_chunk *p,
-    struct _pool_chunk *prev_chunk,
-    size_t capacity)
+static inline void
+_pool_chunk_init(struct _pool_chunk *p,
+		 struct _pool_chunk *prev_chunk)
 {
 	p->prev_chunk = prev_chunk;
-	p->size = 0;
-	p->capacity = capacity;
+	p->size = sizeof(*p);
 }
 
 static struct _pool_chunk *
-_pool_chunk_create(struct _pool_chunk *prev_chunk, size_t size)
+_pool_chunk_create(struct _pool_chunk *prev_chunk)
 {
+	size_t size = 256*sizeof(struct cell);
 	struct _pool_chunk *p;
-	size_t size_with_head = size + sizeof(struct _pool_chunk);
-
-	if (size_with_head < size)
-		return NULL;
 
-	p = malloc(size_with_head);
-	if (p)
-		_pool_chunk_init(p, prev_chunk, size);
+	p = malloc(size + sizeof(struct _pool_chunk));
+	if (unlikely (p == NULL))
+		abort();
 
+	_pool_chunk_init(p, prev_chunk);
 	return p;
 }
 
 static void
-pool_init(struct pool *pool,
-	  size_t default_capacity,
-	  size_t embedded_capacity)
+pool_init(struct pool *pool)
 {
 	pool->current = pool->sentinel;
 	pool->first_free = NULL;
-	pool->default_capacity = default_capacity;
-	_pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
+	_pool_chunk_init(pool->sentinel, NULL);
 }
 
 static void
@@ -403,57 +390,39 @@ pool_fini(struct pool *pool)
 		p = pool->first_free;
 		pool->first_free = NULL;
 	} while (NULL != p);
-	pool_init(pool, 0, 0);
 }
 
-/* Satisfy an allocation by first allocating a new large enough chunk
- * and adding it to the head of the pool's chunk list. This function
- * is called as a fallback if pool_alloc() couldn't do a quick
- * allocation from the current chunk in the pool. */
 static void *
-_pool_alloc_from_new_chunk(struct pool *pool, size_t size)
+_pool_alloc_from_new_chunk(struct pool *pool)
 {
 	struct _pool_chunk *chunk;
 	void *obj;
-	size_t capacity;
-
-	/* If the allocation is smaller than the default chunk size then
-	 * try getting a chunk off the free list.  Force alloc of a new
-	 * chunk for large requests. */
-	capacity = size;
-	chunk = NULL;
-	if (size < pool->default_capacity) {
-		capacity = pool->default_capacity;
-		chunk = pool->first_free;
-		if (chunk) {
-			pool->first_free = chunk->prev_chunk;
-			_pool_chunk_init(chunk, pool->current, chunk->capacity);
-		}
-	}
 
-	if (NULL == chunk) {
-		chunk = _pool_chunk_create (pool->current, capacity);
-		if (unlikely (NULL == chunk))
-			return NULL;
+	chunk = pool->first_free;
+	if (chunk) {
+		pool->first_free = chunk->prev_chunk;
+		_pool_chunk_init(chunk, pool->current);
+	} else {
+		chunk = _pool_chunk_create(pool->current);
 	}
 	pool->current = chunk;
 
-	obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
-	chunk->size += size;
+	obj = (unsigned char*)chunk + chunk->size;
+	chunk->size += sizeof(struct cell);
 	return obj;
 }
 
 inline static void *
-pool_alloc(struct pool *pool, size_t size)
+pool_alloc(struct pool *pool)
 {
 	struct _pool_chunk *chunk = pool->current;
 
-	if (size <= chunk->capacity - chunk->size) {
-		void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
-		chunk->size += size;
+	if (chunk->size < 256*sizeof(struct cell)+sizeof(*chunk)) {
+		void *obj = (unsigned char*)chunk + chunk->size;
+		chunk->size += sizeof(struct cell);
 		return obj;
 	} else
-		return _pool_alloc_from_new_chunk(pool, size);
+		return _pool_alloc_from_new_chunk(pool);
 }
 
 static void
@@ -471,7 +440,7 @@ pool_reset(struct pool *pool)
 
 	/* Reset the sentinel as the current chunk. */
 	pool->current = pool->sentinel;
-	pool->sentinel->size = 0;
+	pool->sentinel->size = sizeof(*chunk);
 }
 
 /* Rewinds the cell list's cursor to the beginning.  After rewinding
@@ -485,9 +454,7 @@ cell_list_rewind(struct cell_list *cells)
 static void
 cell_list_init(struct cell_list *cells)
 {
-	pool_init(cells->cell_pool.base,
-		  256*sizeof(struct cell),
-		  sizeof(cells->cell_pool.embedded));
+	pool_init(cells->cell_pool.base);
 	cells->tail.next = NULL;
 	cells->tail.x = INT_MAX;
 	cells->head.x = INT_MIN;
@@ -516,9 +483,7 @@ cell_list_alloc(struct cell_list *cells,
 {
 	struct cell *cell;
 
-	cell = pool_alloc(cells->cell_pool.base, sizeof (struct cell));
-	if (unlikely(NULL == cell))
-		abort();
+	cell = pool_alloc(cells->cell_pool.base);
 
 	cell->next = tail->next;
 	tail->next = cell;
commit 0a25fc68c5cd82cad4b99b0f2357f430c8783c3f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 19:13:09 2012 +0100

    sna: Actually create Y-tiled source pixmaps
    
    An inconsistency highlighted by 7c51cabaecac revealed that we had a
    mismatch between the check in move_to_gpu() and how we created the
    pixmap. This mismatch resulted in us creating and uploading tiled
    pixmaps for single shot textures, and the increase aperture pressure was
    causing a regression in firefox-fishbowl on pnv, for example.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index f8ec796..6aa54d1 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -270,7 +270,6 @@ struct sna {
 		struct gen7_render_state gen7;
 	} render_state;
 	uint32_t have_render;
-	uint32_t default_tiling;
 
 	Bool directRenderingOpen;
 	char *deviceName;
@@ -418,6 +417,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_READ 0x2
 #define MOVE_INPLACE_HINT 0x4
 #define MOVE_ASYNC_HINT 0x8
+#define MOVE_SOURCE_HINT 0x10
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4a7d55e..0840056 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -59,6 +59,8 @@
 #define FORCE_FALLBACK 0
 #define FORCE_FLUSH 0
 
+#define DEFAULT_TILING I915_TILING_X
+
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
@@ -442,7 +444,8 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	return true;
 }
 
-static inline uint32_t default_tiling(PixmapPtr pixmap)
+static inline uint32_t default_tiling(PixmapPtr pixmap,
+				      uint32_t tiling)
 {
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -471,20 +474,21 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 		return I915_TILING_Y;
 	}
 
-	return sna->default_tiling;
+	return tiling;
 }
 
-constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap)
+constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap,
+						  uint32_t tiling)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	uint32_t tiling, bit;
+	uint32_t bit;
 
 	/* Use tiling by default, but disable per user request */
 	if (pixmap->usage_hint == SNA_CREATE_FB) {
 		tiling = -I915_TILING_X;
 		bit = SNA_TILING_FB;
 	} else {
-		tiling = default_tiling(pixmap);
+		tiling = default_tiling(pixmap, tiling);
 		bit = SNA_TILING_2D;
 	}
 	if ((sna->tiling && (1 << bit)) == 0)
@@ -924,7 +928,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 			       pixmap->drawable.width,
 			       pixmap->drawable.height,
 			       pixmap->drawable.bitsPerPixel,
-			       sna_pixmap_choose_tiling(pixmap),
+			       sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING),
 			       CREATE_GTT_MAP | CREATE_INACTIVE);
 
 	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
@@ -1396,7 +1400,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		}
 
 		if (priv->gpu_bo == NULL && priv->stride &&
-		    sna_pixmap_choose_tiling(pixmap) != I915_TILING_NONE &&
+		    sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) != I915_TILING_NONE &&
 		    region_inplace(sna, pixmap, region, priv) &&
 		    sna_pixmap_create_mappable_gpu(pixmap)) {
 			pixmap->devPrivate.ptr =
@@ -1833,7 +1837,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 	if (priv->gpu_bo == NULL) {
-		unsigned create;
+		unsigned create, tiling;
 
 		create = 0;
 		if (priv->cpu_damage)
@@ -1841,11 +1845,14 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 		if (pixmap->usage_hint == SNA_CREATE_FB)
 			create |= CREATE_EXACT | CREATE_SCANOUT;
 
+		tiling = (flags & MOVE_SOURCE_HINT) ? I915_TILING_Y : DEFAULT_TILING;
+
 		priv->gpu_bo = kgem_create_2d(&sna->kgem,
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap),
+					      sna_pixmap_choose_tiling(pixmap,
+								       tiling),
 					      create);
 		if (priv->gpu_bo == NULL)
 			return false;
@@ -2307,7 +2314,8 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
-					      sna_pixmap_choose_tiling(pixmap),
+					      sna_pixmap_choose_tiling(pixmap,
+								       DEFAULT_TILING),
 					      mode);
 		if (priv->gpu_bo == NULL)
 			return NULL;
@@ -2401,7 +2409,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel,
-					       sna_pixmap_choose_tiling(pixmap),
+					       sna_pixmap_choose_tiling(pixmap,
+									DEFAULT_TILING),
 					       (priv->cpu_damage && priv->cpu_bo == NULL) ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
 		}
 		if (priv->gpu_bo == NULL) {
@@ -3387,7 +3396,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 		return FALSE;
 
 	if (priv->cpu_bo) {
-		if (sna_pixmap_choose_tiling(pixmap) == I915_TILING_NONE)
+		if (sna_pixmap_choose_tiling(pixmap, DEFAULT_TILING) == I915_TILING_NONE)
 			return FALSE;
 
 		return (priv->source_count++-SOURCE_BIAS) * w*h >=
@@ -3636,7 +3645,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (dst_priv->gpu_bo == NULL &&
 	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
 	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
-		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap);
+		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap,
+							   DEFAULT_TILING);
 
 		DBG(("%s: create dst GPU bo for upload\n", __FUNCTION__));
 
@@ -12362,7 +12372,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 
 	backend = "no";
 	sna->have_render = false;
-	sna->default_tiling = I915_TILING_X;
 	no_render_init(sna);
 
 #if !DEBUG_NO_RENDER
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index e28823f..8d61f40 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -388,49 +388,38 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 	}
 
 	if (DBG_FORCE_UPLOAD < 0)
-		migrate = true;
+		return sna_pixmap_force_to_gpu(pixmap,
+					       MOVE_SOURCE_HINT | MOVE_READ);
 
 	w = box->x2 - box->x1;
 	h = box->y2 - box->y1;
 	if (w == pixmap->drawable.width && h == pixmap->drawable.height) {
-		migrate = true;
-		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
-		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-				       I915_TILING_Y,
-				       pixmap->drawable.width,
-				       pixmap->drawable.height,
-				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-			migrate = priv->source_count++ > SOURCE_BIAS;
+		migrate = priv->source_count++ > SOURCE_BIAS;
 
 		DBG(("%s: migrating whole pixmap (%dx%d) for source (%d,%d),(%d,%d), count %d? %d\n",
 		     __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height,
 		     box->x1, box->y1, box->x2, box->y2, priv->source_count,
 		     migrate));
-	} else {
-		/* ignore tiny fractions */
-		if (64*w*h > pixmap->drawable.width * pixmap->drawable.height) {
-			count = priv->source_count++;
-			if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
-			    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-					       I915_TILING_Y,
-					       pixmap->drawable.width,
-					       pixmap->drawable.height,
-					       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-				count -= SOURCE_BIAS;
-
-			DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
-			     __FUNCTION__,
-			     box->x1, box->y1, box->x2, box->y2,
-			     count, w*h,
-			     pixmap->drawable.width * pixmap->drawable.height,
-			     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
-
-			migrate =  count*w*h > pixmap->drawable.width * pixmap->drawable.height;
-		}
+	} else if (kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
+				      I915_TILING_Y, w, h,
+				      pixmap->drawable.bitsPerPixel) != I915_TILING_NONE) {
+		count = priv->source_count++;
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
+			count -= SOURCE_BIAS;
+
+		DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
+		     __FUNCTION__,
+		     box->x1, box->y1, box->x2, box->y2,
+		     count, w*h,
+		     pixmap->drawable.width * pixmap->drawable.height,
+		     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
+
+		migrate = count*w*h > pixmap->drawable.width * pixmap->drawable.height;
 	}
 
-	if (migrate && !sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+	if (migrate && !sna_pixmap_force_to_gpu(pixmap,
+						MOVE_SOURCE_HINT | MOVE_READ))
 		return NULL;
 
 	return priv->gpu_bo;
@@ -680,7 +669,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 	DBG(("%s: creating temporary GPU bo %dx%d\n",
 	     __FUNCTION__, width, height));
 
-	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_SOURCE_HINT | MOVE_READ))
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, ow, oh,
 						dst_x, dst_y);
@@ -943,7 +932,8 @@ sna_render_picture_partial(struct sna *sna,
 
 		bo = sna_pixmap(pixmap)->cpu_bo;
 	} else {
-		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+		if (!sna_pixmap_force_to_gpu(pixmap,
+					     MOVE_SOURCE_HINT | MOVE_READ))
 			return 0;
 
 		bo = sna_pixmap(pixmap)->gpu_bo;
commit c58d137d3eeb0e97bfd53e68404e04d9012b5697
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jun 7 11:18:27 2012 +0100

    sna: Suppress DBG prints along potential SIGIO paths
    
    As ErrorF/fprintf is not re-entrant due to its mutex we can not use DBG
    from code that could be called by a signal handler. X's SIGIO handler
    attempts to move the cursor from within the handler (eek!) and so we
    need to be careful not to take any locks, such in as the aforementioned
    fprintf, along the cursor paths.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50744
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index fbf35cc..7926f8f 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -53,6 +53,12 @@
 #define DBG(x) ErrorF x
 #endif
 
+#if 0
+#define __DBG DBG
+#else
+#define __DBG(x)
+#endif
+
 struct sna_crtc {
 	struct drm_mode_modeinfo kmode;
 	PixmapPtr shadow;
@@ -766,7 +772,7 @@ sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 	struct drm_mode_cursor arg;
 
-	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_BO;
@@ -784,7 +790,7 @@ sna_crtc_show_cursor(xf86CrtcPtr crtc)
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 	struct drm_mode_cursor arg;
 
-	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_BO;
@@ -798,6 +804,8 @@ sna_crtc_show_cursor(xf86CrtcPtr crtc)
 static void
 sna_crtc_set_cursor_colors(xf86CrtcPtr crtc, int bg, int fg)
 {
+	__DBG(("%s: CRTC:%d (bg=%x, fg=%x)\n", __FUNCTION__,
+	       crtc_id(crtc->driver_private), bg, fg));
 }
 
 static void
@@ -807,7 +815,7 @@ sna_crtc_set_cursor_position(xf86CrtcPtr crtc, int x, int y)
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 	struct drm_mode_cursor arg;
 
-	DBG(("%s: CRTC:%d (%d, %d)\n", __FUNCTION__, crtc_id(sna_crtc), x, y));
+	__DBG(("%s: CRTC:%d (%d, %d)\n", __FUNCTION__, crtc_id(sna_crtc), x, y));
 
 	VG_CLEAR(arg);
 	arg.flags = DRM_MODE_CURSOR_MOVE;
@@ -826,6 +834,8 @@ sna_crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 *image)
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 	struct drm_i915_gem_pwrite pwrite;
 
+	__DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+
 	VG_CLEAR(pwrite);
 	pwrite.handle = sna_crtc->cursor;
 	pwrite.offset = 0;
@@ -1005,6 +1015,8 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	crtc->driver_private = sna_crtc;
 
 	sna_crtc->cursor = gem_create(sna->kgem.fd, 64*64*4);
+	DBG(("%s: created handle=%d for cursor on CRTC:%d\n",
+	     __FUNCTION__, sna_crtc->cursor, sna_crtc->id));
 
 	list_add(&sna_crtc->link, &mode->crtcs);
 
commit 3f5b94f3d1625b06840c6441a0b175604ee3d2f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:24:07 2012 +0100

    sna: Check against integer overflows when computing cache size
    
    Even with a 1nm process, I doubt we will see 4+GiB cache sizes ;-)
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3d1e5be..3078f49 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -523,7 +523,9 @@ cpu_cache_size(void)
 		while (getline(&line, &len, file) != -1) {
 			int mb;
 			if (sscanf(line, "cache size : %d KB", &mb) == 1) {
-				size = mb * 1024;
+				/* Paranoid check against gargantuan caches */
+				if (mb <= 1<<20)
+					size = mb * 1024;
 				break;
 			}
 		}
commit 902391bd798775e1a7a53503d4dd1756162f737f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:20:39 2012 +0100

    uxa: Remove dead-code for SourcePictures
    
    All SourcePictures are now converted into Drawables, which had been
    assumed by the driver backend. However, the code still existed to
    attempt to pass procedural pictures onwards and so set pSrcPix to NULL
    which was being flagged by the static analyser as a potential NULL
    dereference.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-render.c b/uxa/uxa-render.c
index 1e88c5d..4463dc2 100644
--- a/uxa/uxa-render.c
+++ b/uxa/uxa-render.c
@@ -1064,10 +1064,25 @@ uxa_try_driver_composite(CARD8 op,
 		return 1;
 	}
 
-	if (localSrc->pDrawable) {
-		pSrcPix = uxa_get_offscreen_pixmap(localSrc->pDrawable,
-						   &src_off_x, &src_off_y);
-		if (!pSrcPix) {
+	pSrcPix = uxa_get_offscreen_pixmap(localSrc->pDrawable,
+					   &src_off_x, &src_off_y);
+	if (!pSrcPix) {
+		REGION_UNINIT(screen, &region);
+
+		if (localSrc != pSrc)
+			FreePicture(localSrc, 0);
+		if (localMask && localMask != pMask)
+			FreePicture(localMask, 0);
+		if (localDst != pDst)
+			FreePicture(localDst, 0);
+
+		return 0;
+	}
+
+	if (localMask) {
+		pMaskPix = uxa_get_offscreen_pixmap(localMask->pDrawable,
+						    &mask_off_x, &mask_off_y);
+		if (!pMaskPix) {
 			REGION_UNINIT(screen, &region);
 
 			if (localSrc != pSrc)
@@ -1079,29 +1094,6 @@ uxa_try_driver_composite(CARD8 op,
 
 			return 0;
 		}
-	} else {
-		pSrcPix = NULL;
-	}
-
-	if (localMask) {
-		if (localMask->pDrawable) {
-			pMaskPix = uxa_get_offscreen_pixmap(localMask->pDrawable,
-							    &mask_off_x, &mask_off_y);
-			if (!pMaskPix) {
-				REGION_UNINIT(screen, &region);
-
-				if (localSrc != pSrc)
-					FreePicture(localSrc, 0);
-				if (localMask && localMask != pMask)
-					FreePicture(localMask, 0);
-				if (localDst != pDst)
-					FreePicture(localDst, 0);
-
-				return 0;
-			}
-		} else {
-			pMaskPix = NULL;
-		}
 	}
 
 	if (!(*uxa_screen->info->prepare_composite)
commit 3d8a1f7176877975a31ad7a6548fa8309065f617
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:15:42 2012 +0100

    sna/gen4+: Add missing "fall through" comments
    
    Reported-by: <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index aafe6d9..ad6f6c5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2409,6 +2409,7 @@ gen4_render_composite(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen4_composite_solid_init(sna, &tmp->src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen4_composite_channel_convert(&tmp->src);
 		break;
@@ -2455,6 +2456,7 @@ gen4_render_composite(struct sna *sna,
 				goto cleanup_src;
 			case 0:
 				gen4_composite_solid_init(sna, &tmp->mask, 0);
+				/* fall through to fixup */
 			case 1:
 				gen4_composite_channel_convert(&tmp->mask);
 				break;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index d51a83b..e31ac5c 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2452,6 +2452,7 @@ gen5_render_composite(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen5_composite_solid_init(sna, &tmp->src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen5_composite_channel_convert(&tmp->src);
 		break;
@@ -2497,6 +2498,7 @@ gen5_render_composite(struct sna *sna,
 				goto cleanup_src;
 			case 0:
 				gen5_composite_solid_init(sna, &tmp->mask, 0);
+				/* fall through to fixup */
 			case 1:
 				gen5_composite_channel_convert(&tmp->mask);
 				break;
@@ -2809,6 +2811,7 @@ gen5_render_composite_spans(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen5_composite_solid_init(sna, &tmp->base.src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen5_composite_channel_convert(&tmp->base.src);
 		break;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d3b3e2a..d613fe1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2665,6 +2665,7 @@ gen6_render_composite(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen6_composite_solid_init(sna, &tmp->src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen6_composite_channel_convert(&tmp->src);
 		break;
@@ -2722,6 +2723,7 @@ gen6_render_composite(struct sna *sna,
 				goto cleanup_src;
 			case 0:
 				gen6_composite_solid_init(sna, &tmp->mask, 0);
+				/* fall through to fixup */
 			case 1:
 				gen6_composite_channel_convert(&tmp->mask);
 				break;
@@ -3112,6 +3114,7 @@ gen6_render_composite_spans(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen6_composite_solid_init(sna, &tmp->base.src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen6_composite_channel_convert(&tmp->base.src);
 		break;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 0029895..a7c498e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2758,6 +2758,7 @@ gen7_render_composite(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen7_composite_solid_init(sna, &tmp->src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen7_composite_channel_convert(&tmp->src);
 		break;
@@ -2815,6 +2816,7 @@ gen7_render_composite(struct sna *sna,
 				goto cleanup_src;
 			case 0:
 				gen7_composite_solid_init(sna, &tmp->mask, 0);
+				/* fall through to fixup */
 			case 1:
 				gen7_composite_channel_convert(&tmp->mask);
 				break;
@@ -3196,6 +3198,7 @@ gen7_render_composite_spans(struct sna *sna,
 		goto cleanup_dst;
 	case 0:
 		gen7_composite_solid_init(sna, &tmp->base.src, 0);
+		/* fall through to fixup */
 	case 1:
 		gen7_composite_channel_convert(&tmp->base.src);
 		break;
commit 8ae4407c43e6a8d26784508f61b416138f908132
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:13:05 2012 +0100

    sna: Silence static analyser complaining about potential NULL pointer
    
    Add an assert to prove that is not.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2ead80a..4a7d55e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12169,8 +12169,9 @@ static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 static void sna_accel_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
-	bool busy = priv->cpu_damage || priv->gpu_bo->rq;
+	bool busy;
 
+	assert(priv != NULL);
 	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
 	     __FUNCTION__, (long)sna->time,
 	     priv->cpu_damage,
@@ -12178,6 +12179,7 @@ static void sna_accel_flush(struct sna *sna)
 	     sna->kgem.nbatch,
 	     sna->kgem.busy));
 
+	busy = priv->cpu_damage || priv->gpu_bo->rq;
 	if (!sna->kgem.busy && !busy)
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 	sna->kgem.busy = busy;
commit 08010b23a3e2bc37d202251923fac814b18fa501
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:10:35 2012 +0100

    uxa/dri: Silence static analyser for potential NULL intel_pixmap
    
    If the intel_pixmap was NULL we should have failed to create the DRI2
    buffer, so we can safely assert here to keep the analyser quiet.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 2ed5559..6bf76d0 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -919,6 +919,13 @@ intel_glamor_create_back_pixmap(ScreenPtr screen,
 	return back_pixmap;
 }
 
+static drm_intel_bo *get_pixmap_bo(I830DRI2BufferPrivatePtr priv)
+{
+	drm_intel_bo *bo = intel_get_pixmap_bo(priv->pixmap);
+	assert(bo != NULL); /* guaranteed by construction of the DRI2 buffer */
+	return bo;
+}
+
 /*
  * Our internal swap routine takes care of actually exchanging, blitting, or
  * flipping buffers as necessary.
@@ -935,7 +942,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 	if (!intel->use_triple_buffer) {
 		info->type = DRI2_SWAP;
 		if (!intel_do_pageflip(intel,
-				       intel_get_pixmap_bo(priv->pixmap),
+				       get_pixmap_bo(priv),
 				       info, info->pipe))
 			return FALSE;
 
@@ -990,7 +997,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 		intel->back_buffer = NULL;
 	}
 
-	old_back = intel_get_pixmap_bo(priv->pixmap);
+	old_back = get_pixmap_bo(priv);
 	if (!intel_do_pageflip(intel, old_back, info, info->pipe)) {
 		intel->back_buffer = new_back;
 		return FALSE;
commit 06b1b875ba13227ddaf7f28dbdcdaa3eb49f0857
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:05:13 2012 +0100

    uxa/i915: check for failure to allocate temporary destination
    
    If the target drawable is too large for the render pipeline, we need to
    create a temporary surface. This may fail, so abort if it does.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_video.c b/src/i915_video.c
index c73615e..ae2e6bb 100644
--- a/src/i915_video.c
+++ b/src/i915_video.c
@@ -74,6 +74,8 @@ I915DisplayVideoTextured(ScrnInfoPtr scrn,
 					      dstRegion->extents.y2 - dyo,
 					      pixmap->drawable.depth,
 					      CREATE_PIXMAP_USAGE_SCRATCH);
+		if (target == NULL)
+			return;
 
 		pix_xoff = -dxo;
 		pix_yoff = -dyo;
commit c553dcae2dd714cac413ffc7c7779cd78c9a3e61
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 17:04:01 2012 +0100

    sna: Silence a few unused function warnings
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 000738b..da49e12 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -900,19 +900,6 @@ sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 	free(info);
 }
 
-static void
-sna_dri_exchange_attachment(DRI2BufferPtr front, DRI2BufferPtr back)
-{
-	int tmp;
-
-	DBG(("%s(%d <--> %d)\n",
-	     __FUNCTION__, front->attachment, back->attachment));
-
-	tmp = front->attachment;
-	front->attachment = back->attachment;
-	back->attachment = tmp;
-}
-
 /*
  * Our internal swap routine takes care of actually exchanging, blitting, or
  * flipping buffers as necessary.
@@ -1714,6 +1701,19 @@ blit_fallback:
 }
 
 #if DRI2INFOREC_VERSION >= 7
+static void
+sna_dri_exchange_attachment(DRI2BufferPtr front, DRI2BufferPtr back)
+{
+	int tmp;
+
+	DBG(("%s(%d <--> %d)\n",
+	     __FUNCTION__, front->attachment, back->attachment));
+
+	tmp = front->attachment;
+	front->attachment = back->attachment;
+	back->attachment = tmp;
+}
+
 static Bool
 sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 		   DRI2BufferPtr front, DRI2BufferPtr back,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index d7e6d40..33fee42 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -265,12 +265,6 @@ done:
 	free(tile);
 }
 
-static inline int split(int x, int y)
-{
-	int n = x / y + 1;
-	return (x + n - 1) / n;
-}
-
 Bool
 sna_tiling_composite(uint32_t op,
 		     PicturePtr src,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index bfb4e0c..780a0fa 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1540,14 +1540,6 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 	}
 }
 
-static inline uint8_t clip255(int x)
-{
-	if (x > 255)
-		return 255;
-
-	return x;
-}
-
 inline static void
 inplace_subrow(struct active_list *active, int8_t *row,
 	       int width, int *min, int *max)
commit 536033b4edb40db18210e5272eb8ce9703e5e364
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 16:02:47 2012 +0100

    legacy/i810/dri: Check for malloc failure for BusIdString
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 24632d4..ba11245 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -240,10 +240,16 @@ I810DRIScreenInit(ScreenPtr pScreen)
       pDRIInfo->busIdString = DRICreatePCIBusID(pI810->PciInfo);
    } else {
       pDRIInfo->busIdString = malloc(64);
-      sprintf(pDRIInfo->busIdString, "PCI:%d:%d:%d",
-	      ((pI810->PciInfo->domain << 8) | pI810->PciInfo->bus),
-	      pI810->PciInfo->dev, pI810->PciInfo->func
-	      );
+      if (pDRIInfo->busIdString)
+	 sprintf(pDRIInfo->busIdString, "PCI:%d:%d:%d",
+		 ((pI810->PciInfo->domain << 8) | pI810->PciInfo->bus),
+		 pI810->PciInfo->dev, pI810->PciInfo->func
+		);
+   }
+   if (!pDRIInfo->busIdString) {
+      DRIDestroyInfoRec(pI810->pDRIInfo);
+      pI810->pDRIInfo = NULL;
+      return FALSE;
    }
    pDRIInfo->ddxDriverMajorVersion = I810_MAJOR_VERSION;
    pDRIInfo->ddxDriverMinorVersion = I810_MINOR_VERSION;
commit b2da80c1d1058dca5d3d85693918e53d9fa61dd3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 15:59:54 2012 +0100

    sna: Add some missing 'fall through' comments
    
    Static analysers are dumb and presume readers are too.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 0037ba0..e58cdd6 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1065,6 +1065,7 @@ gen3_composite_emit_shader(struct sna *sna,
 		case SHADER_LINEAR:
 		case SHADER_RADIAL:
 			gen3_fs_dcl(FS_S0 + t);
+			/* fall through */
 		case SHADER_OPACITY:
 			gen3_fs_dcl(FS_T0 + t);
 			break;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index c26c128..000738b 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1075,6 +1075,7 @@ static void sna_dri_vblank_handle(int fd,
 						 get_private(info->back)->bo,
 						 true);
 		info->type = DRI2_SWAP_THROTTLE;
+		/* fall through to SwapComplete */
 	case DRI2_SWAP_THROTTLE:
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
 		     __FUNCTION__, info->type, frame, tv_sec, tv_usec));
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 9db1891..4898223 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -589,6 +589,7 @@ sna_get_pixel_from_rgba(uint32_t * pixel,
 	switch (format) {
 	case PICT_x8r8g8b8:
 		alpha = 0xffff;
+		/* fall through to re-use a8r8g8b8 expansion */
 	case PICT_a8r8g8b8:
 		*pixel = ((alpha >> 8 << 24) |
 			  (red >> 8 << 16) |
commit c433fb4521e2fb65a555489bd09ba6bb9448b72f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 15:17:30 2012 +0100

    uxa/dri: Fix up typo from f2513cb0f
    
    s/true/FALSE/? Wrong.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index a30c62d..2ed5559 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -406,7 +406,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 		pixmap = get_front_buffer(drawable);
 
 		if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
-			is_glamor_pixmap = FALSE;
+			is_glamor_pixmap = TRUE;
 			drawable = &pixmap->drawable;
 			pixmap = NULL;
 		}
commit 1675f441049d559d891fe378e437c12db21df9e7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 12:55:13 2012 +0100

    uxa/dri: Transfer reference of the new_back buffer to the flip_chain
    
    In order to prevent a leak of the bo when the chain is eventually
    torn-down when the client exits.
    
    Reported-by: Andreas Lampersperger <lampersperger.andreas at heidenhain.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50670
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index d2b828e..a30c62d 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1005,6 +1005,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 		intel->back_buffer = intel->front_buffer;
 		drm_intel_bo_reference(intel->back_buffer);
 		intel_set_pixmap_bo(priv->pixmap, new_back);
+		drm_intel_bo_unreference(new_back);
 	}
 	else
 		intel_exchange_pixmap_buffers(intel, priv->pixmap,
commit 18726a4975ab2ddf85eaa6eb1602dcbe599217f7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 10:58:49 2012 +0100

    sna: Perform CopyArea directly onto a CPU bo if available
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e2a2c12..2ead80a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3887,6 +3887,33 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		}
 
 		goto out;
+	} else if (dst_priv->cpu_bo &&
+		   src_priv && DAMAGE_IS_ALL(src_priv->gpu_damage) && !src_priv->clear) {
+		if (!sna->render.copy_boxes(sna, alu,
+					    src_pixmap, src_priv->gpu_bo, src_dx, src_dy,
+					    dst_pixmap, dst_priv->cpu_bo, dst_dx, dst_dy,
+					    box, n)) {
+			DBG(("%s: fallback - accelerated copy boxes failed\n",
+			     __FUNCTION__));
+			goto fallback;
+		}
+
+		if (replaces) {
+			sna_damage_all(&dst_priv->cpu_damage,
+				       dst_pixmap->drawable.width,
+				       dst_pixmap->drawable.height);
+			dst_priv->undamaged = false;
+		} else {
+			RegionTranslate(&region, dst_dx, dst_dy);
+			assert_pixmap_contains_box(dst_pixmap,
+						   RegionExtents(&region));
+			sna_damage_add(&dst_priv->cpu_damage, &region);
+			RegionTranslate(&region, -dst_dx, -dst_dy);
+		}
+		if (dst_priv->flush)
+			list_move(&dst_priv->list, &sna->dirty_pixmaps);
+
+		goto out;
 	}
 
 fallback:
commit 57d7d5de78bcf01d75d7a7de03fe50a2a9bd1b7e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 00:08:17 2012 +0100

    sna: Use GPU for readback onto CPU bo
    
    Time to blt from GTT to LLC 16384 bytes:	 125.000µs (snb)
    Time to blt from GTT to LLC 16384 bytes:	  71.000µs (ivb)
    Time to blt from GTT to LLC 1048576 bytes:	1400.000µs (snb)
    Time to blt from GTT to LLC 1048576 bytes:	 938.000µs (ivb)
    
    Time to copy from GTT to LLC 16384 bytes:	 118.000µs (snb)
    Time to copy from GTT to LLC 16384 bytes:	 134.000µs (ivb)
    Time to copy from GTT to LLC 1048576 bytes:	6723.000µs (snb)
    Time to copy from GTT to LLC 1048576 bytes:	7424.000µs (ivb)
    
    And conversely,
    
    Time to blt from LLC to GTT 16384 bytes:	 10.000µs (snb)
    Time to blt from LLC to GTT 16384 bytes:	  8.000µs (ivb)
    Time to blt from LLC to GTT 1048576 bytes:	217.000µs (snb)
    Time to blt from LLC to GTT 1048576 bytes:	135.000µs (ivb)
    
    Time to copy from LLC to GTT 16384 bytes:	  4.000µs (snb)
    Time to copy from LLC to GTT 16384 bytes:	  4.000µs (ivb)
    Time to copy from LLC to GTT 1048576 bytes:	270.000µs (snb)
    Time to copy from LLC to GTT 1048576 bytes:	179.500µs (ivb)
    
    It seems clear then that even with the extra synchronisation cost
    copying from the GTT is much preferable with the GPU than using the
    uncached reads by the CPU. Streaming write-combines from the CPU into
    the GTT seem about as efficient as we can manage, so continue to use the
    mapping unless busy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index afd9ed7..e2a2c12 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -930,8 +930,19 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
 }
 
-static bool use_cpu_bo_for_xfer(struct sna_pixmap *priv)
+static inline bool use_cpu_bo_for_write(struct sna *sna,
+					struct sna_pixmap *priv)
 {
+	return priv->cpu_bo != NULL && sna->kgem.gen >= 30;
+}
+
+static inline bool use_cpu_bo_for_read(struct sna_pixmap *priv)
+{
+#if 0
+	if (pixmap->devPrivate.ptr == NULL)
+		return TRUE;
+#endif
+
 	if (priv->cpu_bo == NULL)
 		return FALSE;
 
@@ -1112,7 +1123,7 @@ skip_inplace_map:
 		if (n) {
 			Bool ok = FALSE;
 
-			if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
+			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -1503,7 +1514,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = FALSE;
-			if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
+			if (use_cpu_bo_for_write(sna, priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -1604,7 +1615,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (n) {
 					Bool ok = FALSE;
 
-					if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
+					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
@@ -1626,7 +1637,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				int n = REGION_NUM_RECTS(r);
 				Bool ok = FALSE;
 
-				if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
+				if (use_cpu_bo_for_write(sna, priv))
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
 								    pixmap, priv->cpu_bo, 0, 0,
@@ -1648,7 +1659,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					int n = REGION_NUM_RECTS(&need);
 					Bool ok = FALSE;
 
-					if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
+					if (use_cpu_bo_for_write(sna, priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
@@ -1878,7 +1889,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 		if (n) {
 			Bool ok = FALSE;
 
-			if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
+			if (use_cpu_bo_for_read(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1916,7 +1927,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		Bool ok = FALSE;
-		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
+		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -1945,7 +1956,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 		box = REGION_RECTS(&i);
 		ok = FALSE;
-		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
+		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -2441,7 +2452,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
 		ok = FALSE;
-		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
+		if (use_cpu_bo_for_read(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
commit f2513cb0fdb0d1214854fd4e4dcd477ba8583862
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jun 6 10:41:35 2012 +0100

    uxa/dri: Do not use undeclared stdbool features
    
    The header isn't pulled in, so stop using the undefined values of
    true/false.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 4890fe4..d2b828e 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -263,7 +263,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 	DRI2BufferPtr buffers;
 	I830DRI2BufferPrivatePtr privates;
 	PixmapPtr pixmap, pDepthPixmap;
-	bool is_glamor_pixmap = false;
+	Bool is_glamor_pixmap = FALSE;
 	int i;
 
 	buffers = calloc(count, sizeof *buffers);
@@ -282,7 +282,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 			pixmap = get_front_buffer(drawable);
 
 			if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
-				is_glamor_pixmap = true;
+				is_glamor_pixmap = TRUE;
 				drawable = &pixmap->drawable;
 				pixmap = NULL;
 			}
@@ -390,7 +390,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 	DRI2Buffer2Ptr buffer;
 	I830DRI2BufferPrivatePtr privates;
 	PixmapPtr pixmap;
-	bool is_glamor_pixmap = false;
+	Bool is_glamor_pixmap = FALSE;
 
 	buffer = calloc(1, sizeof *buffer);
 	if (buffer == NULL)
@@ -406,7 +406,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 		pixmap = get_front_buffer(drawable);
 
 		if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
-			is_glamor_pixmap = true;
+			is_glamor_pixmap = FALSE;
 			drawable = &pixmap->drawable;
 			pixmap = NULL;
 		}
commit 1dafb4777f8378c87f34feae667582498220204c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 21:25:40 2012 +0100

    sna/damage: Add some assertions to validate that each damage box is non-empty
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50744
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index b97edbe..dd67364 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -671,8 +671,10 @@ __sna_damage_add_boxes(struct sna_damage *damage,
 		break;
 	}
 
+	assert(box[0].x2 > box[0].x1 && box[0].y2 > box[0].y1);
 	extents = box[0];
 	for (i = 1; i < n; i++) {
+		assert(box[i].x2 > box[i].x1 && box[i].y2 > box[i].y1);
 		if (extents.x1 > box[i].x1)
 			extents.x1 = box[i].x1;
 		if (extents.x2 < box[i].x2)
@@ -738,11 +740,13 @@ __sna_damage_add_rectangles(struct sna_damage *damage,
 
 	assert(n);
 
+	assert(r[0].width && r[0].height);
 	extents.x1 = r[0].x;
 	extents.x2 = r[0].x + r[0].width;
 	extents.y1 = r[0].y;
 	extents.y2 = r[0].y + r[0].height;
 	for (i = 1; i < n; i++) {
+		assert(r[i].width && r[i].height);
 		if (extents.x1 > r[i].x)
 			extents.x1 = r[i].x;
 		if (extents.x2 < r[i].x + r[i].width)
@@ -1074,6 +1078,8 @@ fastcall struct sna_damage *_sna_damage_subtract(struct sna_damage *damage,
 inline static struct sna_damage *__sna_damage_subtract_box(struct sna_damage *damage,
 							   const BoxRec *box)
 {
+	assert(box->x2 > box->x1 && box->y2 > box->y1);
+
 	if (damage == NULL)
 		return NULL;
 
@@ -1156,8 +1162,10 @@ static struct sna_damage *__sna_damage_subtract_boxes(struct sna_damage *damage,
 
 	assert(n);
 
+	assert(box[0].x2 > box[0].x1 && box[0].y2 > box[0].y1);
 	extents = box[0];
 	for (i = 1; i < n; i++) {
+		assert(box[i].x2 > box[i].x1 && box[i].y2 > box[i].y1);
 		if (extents.x1 > box[i].x1)
 			extents.x1 = box[i].x1;
 		if (extents.x2 < box[i].x2)
commit c4eb5528a456b65c673f7c984d14a622ac67cdca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 16:04:16 2012 +0100

    uxa: Check for DPMS off before scheduling a WAIT_ON_EVENT
    
    Regression from commit 3f3bde4f0c72f6f31aae322bcdc20b95eade6631
    Author: Chris Wilson <chris at chris-wilson.co.uk>
    Date:   Thu May 24 11:58:46 2012 +0100
    
        uxa: Only consider an output valid if the kernel reports it attached
    
    When backporting from SNA, a key difference that UXA does not track DPMS
    state in its enabled flag and that a DPMS off CRTC is still bound to the
    fb. So we do need to rescan the outputs and check that we have a
    connector enabled *and* the pipe is running prior to emitting a scanline
    wait.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50668
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 7d75abb..6f3f7e6 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1716,13 +1716,28 @@ int intel_crtc_to_pipe(xf86CrtcPtr crtc)
 Bool intel_crtc_on(xf86CrtcPtr crtc)
 {
 	struct intel_crtc *intel_crtc = crtc->driver_private;
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
 	drmModeCrtcPtr drm_crtc;
 	Bool ret;
+	int i;
 
 	if (!crtc->enabled)
 		return FALSE;
 
 	/* Kernel manages CRTC status based on output config */
+	ret = FALSE;
+	for (i = 0; i < xf86_config->num_output; i++) {
+		xf86OutputPtr output = xf86_config->output[i];
+		if (output->crtc == crtc &&
+		    intel_output_dpms_status(output) == DPMSModeOn) {
+			ret = TRUE;
+			break;
+		}
+	}
+	if (!ret)
+		return FALSE;
+
+	/* And finally check with the kernel that the fb is bound */
 	drm_crtc = drmModeGetCrtc(intel_crtc->mode->fd, crtc_id(intel_crtc));
 	if (drm_crtc == NULL)
 		return FALSE;
commit 7c51cabaecac52348766c622e80ed14b9854e54d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 12:59:46 2012 +0100

    sna: Try to create Y-tiled pixmaps for initial source bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index e06e3c2..e28823f 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -338,7 +338,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		}
 
 		if (priv->source_count++*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
-		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_X,
+		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_Y,
 							    pixmap->drawable.width,
 							    pixmap->drawable.height,
 							    pixmap->drawable.bitsPerPixel)) {
@@ -396,7 +396,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		migrate = true;
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-				       I915_TILING_X,
+				       I915_TILING_Y,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height,
 				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
@@ -413,7 +413,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 			count = priv->source_count++;
 			if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 			    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-					       I915_TILING_X,
+					       I915_TILING_Y,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
commit a26c05cc8936cab28d83c6beeff906a910353338
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 12:48:33 2012 +0100

    sna/gen2+: Tweak placement of operations for CPU-bound large pixmaps
    
    Try to avoid uncessary migration to the GPU of large pixmaps that are
    wholly bound to the CPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 7b3e1ec..e67dd95 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1633,6 +1633,13 @@ gen2_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index d8d1e09..0037ba0 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2658,7 +2658,8 @@ gen3_composite_fallback(struct sna *sna,
 
 	if (mask &&
 	    mask->componentAlpha && PICT_FORMAT_RGB(mask->format) &&
-	    op != PictOpOver)
+	    op != PictOpOver &&
+	    gen3_blend_op[op].src_blend != BLENDFACT_ZERO)
 	{
 		DBG(("%s: component-alpha mask with op=%d, should fallback\n",
 		     __FUNCTION__, op));
@@ -2700,6 +2701,13 @@ gen3_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 80f6a95..aafe6d9 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2274,6 +2274,13 @@ gen4_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6746a58..d51a83b 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2314,6 +2314,13 @@ gen5_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 87a2c9c..d3b3e2a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2518,6 +2518,13 @@ gen6_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e30d941..0029895 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2611,6 +2611,13 @@ gen7_composite_fallback(struct sna *sna,
 		return TRUE;
 	}
 
+	if (too_large(dst_pixmap->drawable.width,
+		      dst_pixmap->drawable.height) &&
+	    (priv == NULL || DAMAGE_IS_ALL(priv->cpu_damage))) {
+		DBG(("%s: dst is on the CPU and too large\n", __FUNCTION__));
+		return TRUE;
+	}
+
 	DBG(("%s: dst is not on the GPU and the operation should not fallback\n",
 	     __FUNCTION__));
 	return FALSE;
commit a21bdbe3e312b40b936c5c68c84f5c1bc0f9fb88
Author: Dave Airlie <airlied at redhat.com>
Date:   Tue May 22 15:44:28 2012 +0100

    sna: port to compat api
    
    This ports SNA to the new compat-api.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a389acf..8da4f52 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -83,7 +83,7 @@ DevPrivateKeyRec sna_gc_index;
 DevPrivateKeyRec sna_glyph_key;
 DevPrivateKeyRec sna_glyph_image_key;
 
-static Bool sna_enter_vt(int scrnIndex, int flags);
+static Bool sna_enter_vt(VT_FUNC_ARGS_DECL);
 
 /* temporary */
 extern void xf86SetCursor(ScreenPtr screen, CursorPtr pCurs, int x, int y);
@@ -162,6 +162,7 @@ sna_load_palette(ScrnInfoPtr scrn, int numColors, int *indices,
  */
 static Bool sna_create_screen_resources(ScreenPtr screen)
 {
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna_from_screen(screen);
 
 	DBG(("%s(%dx%d@%d)\n", __FUNCTION__,
@@ -204,7 +205,7 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 
 	sna_copy_fbcon(sna);
 
-	if (!sna_enter_vt(screen->myNum, 0)) {
+	if (!sna_enter_vt(VT_FUNC_ARGS(0))) {
 		xf86DrvMsg(screen->myNum, X_ERROR,
 			   "[intel] Failed to become DRM master\n");
 		goto cleanup_front;
@@ -575,25 +576,27 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 }
 
 static void
-sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
+sna_block_handler(BLOCKHANDLER_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86ScreenToScrn(screenInfo.screens[i]);
+	SCREEN_PTR(arg);
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
 	struct timeval **tv = timeout;
 
 	DBG(("%s (tv=%ld.%06ld)\n", __FUNCTION__,
 	     *tv ? (*tv)->tv_sec : -1, *tv ? (*tv)->tv_usec : 0));
 
-	sna->BlockHandler(i, data, timeout, read_mask);
+	sna->BlockHandler(BLOCKHANDLER_ARGS);
 
 	if (*tv == NULL || ((*tv)->tv_usec | (*tv)->tv_sec))
 		sna_accel_block_handler(sna, tv);
 }
 
 static void
-sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
+sna_wakeup_handler(WAKEUPHANDLER_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86ScreenToScrn(screenInfo.screens[i]);
+	SCREEN_PTR(arg);
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
@@ -602,7 +605,7 @@ sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
 	if ((int)result < 0)
 		return;
 
-	sna->WakeupHandler(i, data, result, read_mask);
+	sna->WakeupHandler(WAKEUPHANDLER_ARGS);
 
 	sna_accel_wakeup_handler(sna, read_mask);
 
@@ -720,9 +723,9 @@ sna_uevent_fini(ScrnInfoPtr scrn)
 }
 #endif /* HAVE_UDEV */
 
-static void sna_leave_vt(int scrnIndex, int flags)
+static void sna_leave_vt(VT_FUNC_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	struct sna *sna = to_sna(scrn);
 	int ret;
 
@@ -750,9 +753,9 @@ static Bool sna_dri_has_pending_events(struct sna *sna)
 	return poll(&pfd, 1, 0) == 1;
 }
 
-static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
+static Bool sna_close_screen(CLOSE_SCREEN_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
@@ -766,7 +769,7 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 		sna_dri_wakeup(sna);
 
 	if (scrn->vtSema == TRUE)
-		sna_leave_vt(scrnIndex, 0);
+		sna_leave_vt(VT_FUNC_ARGS(0));
 
 	sna_accel_close(sna);
 
@@ -776,7 +779,7 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 	screen->devPrivate = NULL;
 
 	screen->CloseScreen = sna->CloseScreen;
-	(*screen->CloseScreen) (scrnIndex, screen);
+	(*screen->CloseScreen) (CLOSE_SCREEN_ARGS);
 
 	if (sna->directRenderingOpen) {
 		sna_dri_close(sna, screen);
@@ -788,7 +791,7 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 		screen->DestroyPixmap(sna->front);
 		sna->front = NULL;
 	}
-	xf86GARTCloseScreen(scrnIndex);
+	xf86GARTCloseScreen(scrn->scrnIndex);
 
 	scrn->vtSema = FALSE;
 	return TRUE;
@@ -825,7 +828,7 @@ agp_aperture_size(struct pci_device *dev, int gen)
 }
 
 static Bool
-sna_screen_init(int scrnIndex, ScreenPtr screen, int argc, char **argv)
+sna_screen_init(SCREEN_INIT_ARGS_DECL)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
@@ -946,13 +949,13 @@ sna_screen_init(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	return TRUE;
 }
 
-static void sna_adjust_frame(int scrnIndex, int x, int y, int flags)
+static void sna_adjust_frame(ADJUST_FRAME_ARGS_DECL)
 {
 }
 
-static void sna_free_screen(int scrnIndex, int flags)
+static void sna_free_screen(FREE_SCREEN_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
@@ -970,9 +973,9 @@ static void sna_free_screen(int scrnIndex, int flags)
 /*
  * This gets called when gaining control of the VT, and from ScreenInit().
  */
-static Bool sna_enter_vt(int scrnIndex, int flags)
+static Bool sna_enter_vt(VT_FUNC_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
@@ -991,14 +994,15 @@ static Bool sna_enter_vt(int scrnIndex, int flags)
 	return TRUE;
 }
 
-static Bool sna_switch_mode(int scrnIndex, DisplayModePtr mode, int flags)
+static Bool sna_switch_mode(SWITCH_MODE_ARGS_DECL)
 {
+	SCRN_INFO_PTR(arg);
 	DBG(("%s\n", __FUNCTION__));
-	return xf86SetSingleMode(xf86Screens[scrnIndex], mode, RR_Rotate_0);
+	return xf86SetSingleMode(scrn, mode, RR_Rotate_0);
 }
 
 static ModeStatus
-sna_valid_mode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
+sna_valid_mode(SCRN_ARG_TYPE arg, DisplayModePtr mode, Bool verbose, int flags)
 {
 	return MODE_OK;
 }
@@ -1015,9 +1019,9 @@ sna_valid_mode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
  * DoApmEvent() in common/xf86PM.c, including if we want to see events other
  * than suspend/resume.
  */
-static Bool sna_pm_event(int scrnIndex, pmEvent event, Bool undo)
+static Bool sna_pm_event(SCRN_ARG_TYPE arg, pmEvent event, Bool undo)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
@@ -1029,12 +1033,12 @@ static Bool sna_pm_event(int scrnIndex, pmEvent event, Bool undo)
 	case XF86_APM_SYS_STANDBY:
 	case XF86_APM_USER_STANDBY:
 		if (!undo && !sna->suspended) {
-			scrn->LeaveVT(scrnIndex, 0);
+			scrn->LeaveVT(VT_FUNC_ARGS(0));
 			sna->suspended = TRUE;
 			sleep(SUSPEND_SLEEP);
 		} else if (undo && sna->suspended) {
 			sleep(RESUME_SLEEP);
-			scrn->EnterVT(scrnIndex, 0);
+			scrn->EnterVT(VT_FUNC_ARGS(0));
 			sna->suspended = FALSE;
 		}
 		break;
@@ -1043,7 +1047,7 @@ static Bool sna_pm_event(int scrnIndex, pmEvent event, Bool undo)
 	case XF86_APM_CRITICAL_RESUME:
 		if (sna->suspended) {
 			sleep(RESUME_SLEEP);
-			scrn->EnterVT(scrnIndex, 0);
+			scrn->EnterVT(VT_FUNC_ARGS(0));
 			sna->suspended = FALSE;
 			/*
 			 * Turn the screen saver off when resuming.  This seems to be
commit 8f5001493e18b6b3b97ca5f0c923678a51975bd0
Author: Dave Airlie <airlied at redhat.com>
Date:   Tue May 22 15:35:01 2012 +0100

    intel: port legacy and uxa to new API.
    
    This ports the legacy and uxa driver to the new server API.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/compat-api.h b/src/compat-api.h
index 1bb7724..2daf910 100644
--- a/src/compat-api.h
+++ b/src/compat-api.h
@@ -38,4 +38,68 @@
 #define xf86ScrnToScreen(s) screenInfo.screens[(s)->scrnIndex]
 #endif
 
+#ifndef XF86_SCRN_INTERFACE
+
+#define SCRN_ARG_TYPE int
+#define SCRN_INFO_PTR(arg1) ScrnInfoPtr scrn = xf86Screens[(arg1)]
+
+#define SCREEN_ARG_TYPE int
+#define SCREEN_PTR(arg1) ScreenPtr screen = screenInfo.screens[(arg1)]
+
+#define SCREEN_INIT_ARGS_DECL int scrnIndex, ScreenPtr screen, int argc, char **argv
+
+#define BLOCKHANDLER_ARGS_DECL int arg, pointer blockData, pointer timeout, pointer read_mask
+#define BLOCKHANDLER_ARGS arg, blockData, timeout, read_mask
+
+#define WAKEUPHANDLER_ARGS_DECL int arg, pointer wakeupData, unsigned long result, pointer read_mask
+#define WAKEUPHANDLER_ARGS arg, wakeupData, result, read_mask
+
+#define CLOSE_SCREEN_ARGS_DECL int scrnIndex, ScreenPtr screen
+#define CLOSE_SCREEN_ARGS scrnIndex, screen
+
+#define ADJUST_FRAME_ARGS_DECL int arg, int x, int y, int flags
+#define ADJUST_FRAME_ARGS(arg, x, y) (arg)->scrnIndex, x, y, 0
+
+#define SWITCH_MODE_ARGS_DECL int arg, DisplayModePtr mode, int flags
+#define SWITCH_MODE_ARGS(arg, m) (arg)->scrnIndex, m, 0
+
+#define FREE_SCREEN_ARGS_DECL int arg, int flags
+
+#define VT_FUNC_ARGS_DECL int arg, int flags
+#define VT_FUNC_ARGS(flags) scrn->scrnIndex, (flags)
+
+#define XF86_ENABLEDISABLEFB_ARG(x) ((x)->scrnIndex)
+
+#else
+#define SCRN_ARG_TYPE ScrnInfoPtr
+#define SCRN_INFO_PTR(arg1) ScrnInfoPtr scrn = (arg1)
+
+#define SCREEN_ARG_TYPE ScreenPtr
+#define SCREEN_PTR(arg1) ScreenPtr screen = (arg1)
+
+#define SCREEN_INIT_ARGS_DECL ScreenPtr screen, int argc, char **argv
+
+#define BLOCKHANDLER_ARGS_DECL ScreenPtr arg, pointer timeout, pointer read_mask
+#define BLOCKHANDLER_ARGS arg, timeout, read_mask
+
+#define WAKEUPHANDLER_ARGS_DECL ScreenPtr arg, unsigned long result, pointer read_mask
+#define WAKEUPHANDLER_ARGS arg, result, read_mask
+
+#define CLOSE_SCREEN_ARGS_DECL ScreenPtr screen
+#define CLOSE_SCREEN_ARGS screen
+
+#define ADJUST_FRAME_ARGS_DECL ScrnInfoPtr arg, int x, int y
+#define ADJUST_FRAME_ARGS(arg, x, y) arg, x, y
+
+#define SWITCH_MODE_ARGS_DECL ScrnInfoPtr arg, DisplayModePtr mode
+#define SWITCH_MODE_ARGS(arg, m) arg, m
+
+#define FREE_SCREEN_ARGS_DECL ScrnInfoPtr arg
+
+#define VT_FUNC_ARGS_DECL ScrnInfoPtr arg
+#define VT_FUNC_ARGS(flags) scrn
+
+#define XF86_ENABLEDISABLEFB_ARG(x) (x)
+
+#endif
 #endif
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 190517f..67cec48 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -80,9 +80,9 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "intel_glamor.h"
 #include "intel_options.h"
 
-static void i830AdjustFrame(int scrnIndex, int x, int y, int flags);
-static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen);
-static Bool I830EnterVT(int scrnIndex, int flags);
+static void i830AdjustFrame(ADJUST_FRAME_ARGS_DECL);
+static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL);
+static Bool I830EnterVT(VT_FUNC_ARGS_DECL);
 
 /* temporary */
 extern void xf86SetCursor(ScreenPtr screen, CursorPtr pCurs, int x, int y);
@@ -677,15 +677,15 @@ void IntelEmitInvarientState(ScrnInfoPtr scrn)
 }
 
 static void
-I830BlockHandler(int i, pointer blockData, pointer pTimeout, pointer pReadmask)
+I830BlockHandler(BLOCKHANDLER_ARGS_DECL)
 {
-	ScreenPtr screen = screenInfo.screens[i];
-	ScrnInfoPtr scrn = xf86Screens[i];
+	SCREEN_PTR(arg);
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	screen->BlockHandler = intel->BlockHandler;
 
-	(*screen->BlockHandler) (i, blockData, pTimeout, pReadmask);
+	(*screen->BlockHandler) (BLOCKHANDLER_ARGS);
 
 	intel->BlockHandler = screen->BlockHandler;
 	screen->BlockHandler = I830BlockHandler;
@@ -837,7 +837,7 @@ I830UeventFini(ScrnInfoPtr scrn)
 #endif /* HAVE_UDEV */
 
 static Bool
-I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
+I830ScreenInit(SCREEN_INIT_ARGS_DECL)
 {
 	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
@@ -1007,16 +1007,16 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	 * later memory should be bound when allocating, e.g rotate_mem */
 	scrn->vtSema = TRUE;
 
-	return I830EnterVT(scrnIndex, 0);
+	return I830EnterVT(VT_FUNC_ARGS(0));
 }
 
-static void i830AdjustFrame(int scrnIndex, int x, int y, int flags)
+static void i830AdjustFrame(ADJUST_FRAME_ARGS_DECL)
 {
 }
 
-static void I830FreeScreen(int scrnIndex, int flags)
+static void I830FreeScreen(FREE_SCREEN_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (intel) {
@@ -1029,9 +1029,9 @@ static void I830FreeScreen(int scrnIndex, int flags)
 	}
 }
 
-static void I830LeaveVT(int scrnIndex, int flags)
+static void I830LeaveVT(VT_FUNC_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	int ret;
 
@@ -1048,9 +1048,9 @@ static void I830LeaveVT(int scrnIndex, int flags)
 /*
  * This gets called when gaining control of the VT, and from ScreenInit().
  */
-static Bool I830EnterVT(int scrnIndex, int flags)
+static Bool I830EnterVT(VT_FUNC_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	int ret;
 
@@ -1067,16 +1067,16 @@ static Bool I830EnterVT(int scrnIndex, int flags)
 	return TRUE;
 }
 
-static Bool I830SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
+static Bool I830SwitchMode(SWITCH_MODE_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 
 	return xf86SetSingleMode(scrn, mode, RR_Rotate_0);
 }
 
-static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen)
+static Bool I830CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 #if HAVE_UDEV
@@ -1084,7 +1084,7 @@ static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen)
 #endif
 
 	if (scrn->vtSema == TRUE) {
-		I830LeaveVT(scrnIndex, 0);
+		I830LeaveVT(VT_FUNC_ARGS(0));
 	}
 
 	DeleteCallback(&FlushCallback, intel_flush_callback, scrn);
@@ -1141,7 +1141,7 @@ static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen)
 	i965_free_video(scrn);
 
 	screen->CloseScreen = intel->CloseScreen;
-	(*screen->CloseScreen) (scrnIndex, screen);
+	(*screen->CloseScreen) (CLOSE_SCREEN_ARGS);
 
 	if (intel->directRenderingOpen
 	    && intel->directRenderingType == DRI_DRI2) {
@@ -1149,18 +1149,19 @@ static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen)
 		I830DRI2CloseScreen(screen);
 	}
 
-	xf86GARTCloseScreen(scrnIndex);
+	xf86GARTCloseScreen(scrn->scrnIndex);
 
 	scrn->vtSema = FALSE;
 	return TRUE;
 }
 
 static ModeStatus
-I830ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
+I830ValidMode(SCRN_ARG_TYPE arg, DisplayModePtr mode, Bool verbose, int flags)
 {
+	SCRN_INFO_PTR(arg);
 	if (mode->Flags & V_INTERLACE) {
 		if (verbose) {
-			xf86DrvMsg(scrnIndex, X_PROBED,
+			xf86DrvMsg(scrn->scrnIndex, X_PROBED,
 				   "Removing interlaced mode \"%s\"\n",
 				   mode->name);
 		}
@@ -1181,9 +1182,9 @@ I830ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
  * DoApmEvent() in common/xf86PM.c, including if we want to see events other
  * than suspend/resume.
  */
-static Bool I830PMEvent(int scrnIndex, pmEvent event, Bool undo)
+static Bool I830PMEvent(SCRN_ARG_TYPE arg, pmEvent event, Bool undo)
 {
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+	SCRN_INFO_PTR(arg);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	switch (event) {
@@ -1193,12 +1194,12 @@ static Bool I830PMEvent(int scrnIndex, pmEvent event, Bool undo)
 	case XF86_APM_SYS_STANDBY:
 	case XF86_APM_USER_STANDBY:
 		if (!undo && !intel->suspended) {
-			scrn->LeaveVT(scrnIndex, 0);
+			scrn->LeaveVT(VT_FUNC_ARGS(0));
 			intel->suspended = TRUE;
 			sleep(SUSPEND_SLEEP);
 		} else if (undo && intel->suspended) {
 			sleep(RESUME_SLEEP);
-			scrn->EnterVT(scrnIndex, 0);
+			scrn->EnterVT(VT_FUNC_ARGS(0));
 			intel->suspended = FALSE;
 		}
 		break;
@@ -1207,7 +1208,7 @@ static Bool I830PMEvent(int scrnIndex, pmEvent event, Bool undo)
 	case XF86_APM_CRITICAL_RESUME:
 		if (intel->suspended) {
 			sleep(RESUME_SLEEP);
-			scrn->EnterVT(scrnIndex, 0);
+			scrn->EnterVT(VT_FUNC_ARGS(0));
 			intel->suspended = FALSE;
 			/*
 			 * Turn the screen saver off when resuming.  This seems to be
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index f4caf56..874551b 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -287,8 +287,8 @@ extern Bool I810UnbindGARTMemory(ScrnInfoPtr pScrn);
 
 extern int I810CheckAvailableMemory(ScrnInfoPtr pScrn);
 
-extern Bool I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags);
-extern void I810AdjustFrame(int scrnIndex, int x, int y, int flags);
+extern Bool I810SwitchMode(SWITCH_MODE_ARGS_DECL);
+extern void I810AdjustFrame(ADJUST_FRAME_ARGS_DECL);
 
 extern void I810SetupForScreenToScreenCopy(ScrnInfoPtr pScrn, int xdir,
 					   int ydir, int rop,
diff --git a/src/legacy/i810/i810_dga.c b/src/legacy/i810/i810_dga.c
index baf0011..336588c 100644
--- a/src/legacy/i810/i810_dga.c
+++ b/src/legacy/i810/i810_dga.c
@@ -148,8 +148,8 @@ I810_SetMode(ScrnInfoPtr pScrn, DGAModePtr pMode)
    if (!pMode) {			/* restore the original mode */
       if (pI810->DGAactive) {
 	 pScrn->currentMode = I810SavedDGAModes[index];
-	 pScrn->SwitchMode(index, pScrn->currentMode, 0);
-	 pScrn->AdjustFrame(index, 0, 0, 0);
+	 pScrn->SwitchMode(SWITCH_MODE_ARGS(pScrn, pScrn->currentMode));
+	 pScrn->AdjustFrame(ADJUST_FRAME_ARGS(pScrn, 0, 0));
 	 pI810->DGAactive = FALSE;
       }
    } else {
@@ -157,8 +157,7 @@ I810_SetMode(ScrnInfoPtr pScrn, DGAModePtr pMode)
 	 I810SavedDGAModes[index] = pScrn->currentMode;
 	 pI810->DGAactive = TRUE;
       }
-
-      pScrn->SwitchMode(index, pMode->mode, 0);
+      pScrn->SwitchMode(SWITCH_MODE_ARGS(pScrn, pMode->mode));
    }
 
    return TRUE;
@@ -178,7 +177,7 @@ I810_SetViewport(ScrnInfoPtr pScrn, int x, int y, int flags)
    I810Ptr pI810 = I810PTR(pScrn);
    vgaHWPtr hwp = VGAHWPTR(pScrn);
 
-   pScrn->AdjustFrame(pScrn->pScreen->myNum, x, y, flags);
+   pScrn->AdjustFrame(ADJUST_FRAME_ARGS(pScrn, x, y));
 
    /* wait for retrace */
    while ((hwp->readST01(hwp) & 0x08)) ;
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 4a73e9b..519a4f0 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -77,18 +77,17 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "../legacy.h"
 
-static Bool I810PreInit(ScrnInfoPtr scrn, int flags);
-static Bool I810ScreenInit(int Index, ScreenPtr screen, int argc,
-			   char **argv);
-static Bool I810EnterVT(int scrnIndex, int flags);
-static void I810LeaveVT(int scrnIndex, int flags);
-static Bool I810CloseScreen(int scrnIndex, ScreenPtr screen);
-static Bool I810SaveScreen(ScreenPtr screen, Bool unblank);
-static void I810FreeScreen(int scrnIndex, int flags);
-static void I810DisplayPowerManagementSet(ScrnInfoPtr scrn,
+static Bool I810PreInit(ScrnInfoPtr pScrn, int flags);
+static Bool I810ScreenInit(SCREEN_INIT_ARGS);
+static Bool I810EnterVT(VT_FUNC_ARGS_DECL);
+static void I810LeaveVT(VT_FUNC_ARGS_DECL);
+static Bool I810CloseScreen(CLOSE_SCREEN_ARGS_DECL);
+static Bool I810SaveScreen(ScreenPtr pScreen, Bool unblank);
+static void I810FreeScreen(FREE_SCREEN_ARGS_DECL);
+static void I810DisplayPowerManagementSet(ScrnInfoPtr pScrn,
 					  int PowerManagermentMode,
 					  int flags);
-static ModeStatus I810ValidMode(int scrnIndex, DisplayModePtr mode,
+static ModeStatus I810ValidMode(SCRN_ARG_TYPE arg, DisplayModePtr mode,
 				Bool verbose, int flags);
 
 typedef enum {
@@ -1570,7 +1569,7 @@ I810AllocateFront(ScrnInfoPtr scrn)
 }
 
 static Bool
-I810ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
+I810ScreenInit(SCREEN_INIT_ARGS_DECL)
 {
    ScrnInfoPtr scrn;
    vgaHWPtr hwp;
@@ -1649,7 +1648,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
       return FALSE;
 
    I810SaveScreen(screen, FALSE);
-   I810AdjustFrame(scrnIndex, scrn->frameX0, scrn->frameY0, 0);
+   I810AdjustFrame(ADJUST_FRAME_ARGS(scrn, scrn->frameX0, scrn->frameY0));
 
    if (!fbScreenInit(screen, pI810->FbBase + scrn->fbOffset,
 		     scrn->virtualX, scrn->virtualY,
@@ -1785,14 +1784,14 @@ I810ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 }
 
 Bool
-I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
+I810SwitchMode(SWITCH_MODE_ARGS_DECL)
 {
-   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   SCRN_INFO_PTR(arg);
 #if 0
    I810Ptr pI810 = I810PTR(scrn);
 #endif
    if (I810_DEBUG & DEBUG_VERBOSE_CURSOR)
-      ErrorF("I810SwitchMode %p %x\n", (void *)mode, flags);
+      ErrorF("I810SwitchMode %p\n", (void *)mode);
 
 #if 0
 /* 
@@ -1836,9 +1835,9 @@ I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
 }
 
 void
-I810AdjustFrame(int scrnIndex, int x, int y, int flags)
+I810AdjustFrame(ADJUST_FRAME_ARGS_DECL)
 {
-   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   SCRN_INFO_PTR(arg);
    I810Ptr pI810 = I810PTR(scrn);
    vgaHWPtr hwp = VGAHWPTR(scrn);
    int Base;
@@ -1856,7 +1855,7 @@ I810AdjustFrame(int scrnIndex, int x, int y, int flags)
    Base = (y * scrn->displayWidth + x) >> 2;
 
    if (I810_DEBUG & DEBUG_VERBOSE_CURSOR)
-      ErrorF("I810AdjustFrame %d,%d %x\n", x, y, flags);
+      ErrorF("I810AdjustFrame %d,%d\n", x, y);
 
    switch (scrn->bitsPerPixel) {
    case 8:
@@ -1888,9 +1887,9 @@ I810AdjustFrame(int scrnIndex, int x, int y, int flags)
 /* These functions are usually called with the lock **not held**.
  */
 static Bool
-I810EnterVT(int scrnIndex, int flags)
+I810EnterVT(VT_FUNC_ARGS_DECL)
 {
-   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   SCRN_INFO_PTR(arg);
 
 #ifdef HAVE_DRI1
    I810Ptr pI810 = I810PTR(scrn);
@@ -1909,21 +1908,21 @@ I810EnterVT(int scrnIndex, int flags)
    if (pI810->directRenderingEnabled) {
       if (I810_DEBUG & DEBUG_VERBOSE_DRI)
 	 ErrorF("calling dri unlock\n");
-      DRIUnlock(screenInfo.screens[scrnIndex]);
+      DRIUnlock(xf86ScrnToScreen(scrn));
       pI810->LockHeld = 0;
    }
 #endif
 
    if (!I810ModeInit(scrn, scrn->currentMode))
       return FALSE;
-   I810AdjustFrame(scrnIndex, scrn->frameX0, scrn->frameY0, 0);
+   I810AdjustFrame(ADJUST_FRAME_ARGS(scrn, scrn->frameX0, scrn->frameY0));
    return TRUE;
 }
 
 static void
-I810LeaveVT(int scrnIndex, int flags)
+I810LeaveVT(VT_FUNC_ARGS_DECL)
 {
-   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   SCRN_INFO_PTR(arg);
    vgaHWPtr hwp = VGAHWPTR(scrn);
    I810Ptr pI810 = I810PTR(scrn);
 
@@ -1934,7 +1933,7 @@ I810LeaveVT(int scrnIndex, int flags)
    if (pI810->directRenderingEnabled) {
       if (I810_DEBUG & DEBUG_VERBOSE_DRI)
 	 ErrorF("calling dri lock\n");
-      DRILock(screenInfo.screens[scrnIndex], 0);
+      DRILock(xf86ScrnToScreen(scrn), 0);
       pI810->LockHeld = 1;
    }
 #endif
@@ -1957,9 +1956,9 @@ I810LeaveVT(int scrnIndex, int flags)
 }
 
 static Bool
-I810CloseScreen(int scrnIndex, ScreenPtr screen)
+I810CloseScreen(CLOSE_SCREEN_ARGS_DECL)
 {
-   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
    vgaHWPtr hwp = VGAHWPTR(scrn);
    I810Ptr pI810 = I810PTR(scrn);
    XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
@@ -2015,30 +2014,32 @@ I810CloseScreen(int scrnIndex, ScreenPtr screen)
    /* Need to actually close the gart fd, or the unbound memory will just sit
     * around.  Will prevent the Xserver from recycling.
     */
-   xf86GARTCloseScreen(scrnIndex);
+   xf86GARTCloseScreen(scrn->scrnIndex);
 
    free(pI810->LpRing);
    pI810->LpRing = NULL;
 
    scrn->vtSema = FALSE;
    screen->CloseScreen = pI810->CloseScreen;
-   return (*screen->CloseScreen) (scrnIndex, screen);
+   return (*screen->CloseScreen) (CLOSE_SCREEN_ARGS);
 }
 
 static void
-I810FreeScreen(int scrnIndex, int flags)
+I810FreeScreen(FREE_SCREEN_ARGS_DECL)
 {
-   I810FreeRec(xf86Screens[scrnIndex]);
+   SCRN_INFO_PTR(arg);
+   I810FreeRec(scrn);
    if (xf86LoaderCheckSymbol("vgaHWFreeHWRec"))
-      vgaHWFreeHWRec(xf86Screens[scrnIndex]);
+     vgaHWFreeHWRec(scrn);
 }
 
 static ModeStatus
-I810ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
+I810ValidMode(SCRN_ARG_TYPE arg, DisplayModePtr mode, Bool verbose, int flags)
 {
+   SCRN_INFO_PTR(arg);
    if (mode->Flags & V_INTERLACE) {
       if (verbose) {
-	 xf86DrvMsg(scrnIndex, X_PROBED,
+	 xf86DrvMsg(scrn->scrnIndex, X_PROBED,
 		    "Removing interlaced mode \"%s\"\n", mode->name);
       }
       return MODE_BAD;
diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 613cbf2..440f9f7 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -78,7 +78,7 @@ static int I810PutImage( ScrnInfoPtr,
 static int I810QueryImageAttributes(ScrnInfoPtr, 
 	int, unsigned short *, unsigned short *,  int *, int *);
 
-static void I810BlockHandler(int, pointer, pointer, pointer);
+static void I810BlockHandler(BLOCKHANDLER_ARGS_DECL);
 
 #define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
 
@@ -1139,21 +1139,17 @@ I810QueryImageAttributes(
 }
 
 static void
-I810BlockHandler (
-    int i,
-    pointer     blockData,
-    pointer     pTimeout,
-    pointer     pReadmask
-){
-    ScreenPtr   screen = screenInfo.screens[i];
-    ScrnInfoPtr pScrn = xf86Screens[i];
+I810BlockHandler (BLOCKHANDLER_ARGS_DECL)
+{
+    SCREEN_PTR(arg);
+    ScrnInfoPtr pScrn = xf86ScreenToScrn(screen);
     I810Ptr      pI810 = I810PTR(pScrn);
     I810PortPrivPtr pPriv = GET_PORT_PRIVATE(pScrn);
     I810OverlayRegPtr overlay = (I810OverlayRegPtr) (pI810->FbBase + pI810->OverlayStart); 
 
     screen->BlockHandler = pI810->BlockHandler;
     
-    (*screen->BlockHandler) (i, blockData, pTimeout, pReadmask);
+    (*screen->BlockHandler) (BLOCKHANDLER_ARGS);
 
     screen->BlockHandler = I810BlockHandler;
 
diff --git a/uxa/uxa.c b/uxa/uxa.c
index 1cc82ab..2635b50 100644
--- a/uxa/uxa.c
+++ b/uxa/uxa.c
@@ -363,11 +363,11 @@ void uxa_set_force_fallback(ScreenPtr screen, Bool value)
  * uxa_close_screen() unwraps its wrapped screen functions and tears down UXA's
  * screen private, before calling down to the next CloseSccreen.
  */
-static Bool uxa_close_screen(int i, ScreenPtr pScreen)
+static Bool uxa_close_screen(CLOSE_SCREEN_ARGS_DECL)
 {
-	uxa_screen_t *uxa_screen = uxa_get_screen(pScreen);
+	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 #ifdef RENDER
-	PictureScreenPtr ps = GetPictureScreenIfSet(pScreen);
+	PictureScreenPtr ps = GetPictureScreenIfSet(screen);
 #endif
 	int n;
 
@@ -380,28 +380,28 @@ static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 	for (n = 0; n < uxa_screen->solid_cache_size; n++)
 		FreePicture(uxa_screen->solid_cache[n].picture, 0);
 
-	uxa_glyphs_fini(pScreen);
+	uxa_glyphs_fini(screen);
 
-	if (pScreen->devPrivate) {
+	if (screen->devPrivate) {
 		/* Destroy the pixmap created by miScreenInit() *before*
 		 * chaining up as we finalize ourselves here and so this
 		 * is the last chance we have of releasing our resources
 		 * associated with the Pixmap. So do it first.
 		 */
-		(void) (*pScreen->DestroyPixmap) (pScreen->devPrivate);
-		pScreen->devPrivate = NULL;
+		(void) (*screen->DestroyPixmap) (screen->devPrivate);
+		screen->devPrivate = NULL;
 	}
 
-	pScreen->CreateGC = uxa_screen->SavedCreateGC;
-	pScreen->CloseScreen = uxa_screen->SavedCloseScreen;
-	pScreen->GetImage = uxa_screen->SavedGetImage;
-	pScreen->GetSpans = uxa_screen->SavedGetSpans;
-	pScreen->CreatePixmap = uxa_screen->SavedCreatePixmap;
-	pScreen->DestroyPixmap = uxa_screen->SavedDestroyPixmap;
-	pScreen->CopyWindow = uxa_screen->SavedCopyWindow;
-	pScreen->ChangeWindowAttributes =
+	screen->CreateGC = uxa_screen->SavedCreateGC;
+	screen->CloseScreen = uxa_screen->SavedCloseScreen;
+	screen->GetImage = uxa_screen->SavedGetImage;
+	screen->GetSpans = uxa_screen->SavedGetSpans;
+	screen->CreatePixmap = uxa_screen->SavedCreatePixmap;
+	screen->DestroyPixmap = uxa_screen->SavedDestroyPixmap;
+	screen->CopyWindow = uxa_screen->SavedCopyWindow;
+	screen->ChangeWindowAttributes =
 	    uxa_screen->SavedChangeWindowAttributes;
-	pScreen->BitmapToRegion = uxa_screen->SavedBitmapToRegion;
+	screen->BitmapToRegion = uxa_screen->SavedBitmapToRegion;
 #ifdef RENDER
 	if (ps) {
 		ps->Composite = uxa_screen->SavedComposite;
@@ -416,7 +416,7 @@ static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 
 	free(uxa_screen);
 
-	return (*pScreen->CloseScreen) (i, pScreen);
+	return (*screen->CloseScreen) (CLOSE_SCREEN_ARGS);
 }
 
 /**
@@ -435,7 +435,7 @@ uxa_driver_t *uxa_driver_alloc(void)
 }
 
 /**
- * @param pScreen screen being initialized
+ * @param screen screen being initialized
  * @param pScreenInfo UXA driver record
  *
  * uxa_driver_init sets up UXA given a driver record filled in by the driver.
commit c9824827422f8ec4f46c9cba42d871c98dc54761
Author: Dave Airlie <airlied at redhat.com>
Date:   Tue Jun 5 10:38:21 2012 +0100

    sna: drop using block/wakeup data.
    
    These went away in the new server API, and really if this
    made any measurable difference, I'd be impressed.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index df7f42f..f8ec796 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -252,9 +252,7 @@ struct sna {
 	struct intel_chipset chipset;
 
 	ScreenBlockHandlerProcPtr BlockHandler;
-	void *BlockData;
 	ScreenWakeupHandlerProcPtr WakeupHandler;
-	void *WakeupData;
 	CloseScreenProcPtr CloseScreen;
 
 	PicturePtr clear;
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 59ee57f..a389acf 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -577,13 +577,14 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 static void
 sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
 {
-	struct sna *sna = data;
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screenInfo.screens[i]);
+	struct sna *sna = to_sna(scrn);
 	struct timeval **tv = timeout;
 
 	DBG(("%s (tv=%ld.%06ld)\n", __FUNCTION__,
 	     *tv ? (*tv)->tv_sec : -1, *tv ? (*tv)->tv_usec : 0));
 
-	sna->BlockHandler(i, sna->BlockData, timeout, read_mask);
+	sna->BlockHandler(i, data, timeout, read_mask);
 
 	if (*tv == NULL || ((*tv)->tv_usec | (*tv)->tv_sec))
 		sna_accel_block_handler(sna, tv);
@@ -592,7 +593,8 @@ sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
 static void
 sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
 {
-	struct sna *sna = data;
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screenInfo.screens[i]);
+	struct sna *sna = to_sna(scrn);
 
 	DBG(("%s\n", __FUNCTION__));
 
@@ -600,7 +602,7 @@ sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
 	if ((int)result < 0)
 		return;
 
-	sna->WakeupHandler(i, sna->WakeupData, result, read_mask);
+	sna->WakeupHandler(i, data, result, read_mask);
 
 	sna_accel_wakeup_handler(sna, read_mask);
 
@@ -900,14 +902,10 @@ sna_screen_init(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	scrn->vtSema = TRUE;
 
 	sna->BlockHandler = screen->BlockHandler;
-	sna->BlockData = screen->blockData;
 	screen->BlockHandler = sna_block_handler;
-	screen->blockData = sna;
 
 	sna->WakeupHandler = screen->WakeupHandler;
-	sna->WakeupData = screen->wakeupData;
 	screen->WakeupHandler = sna_wakeup_handler;
-	screen->wakeupData = sna;
 
 	screen->SaveScreen = xf86SaveScreen;
 	sna->CloseScreen = screen->CloseScreen;
commit d9850a05003008d86d0b226bcd775aebdd43bccd
Author: Dave Airlie <airlied at redhat.com>
Date:   Tue Jun 5 10:04:48 2012 +0100

    uxa: drop enable disable access hook
    
    This looks to be unused.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/uxa/uxa-priv.h b/uxa/uxa-priv.h
index b74a625..91ef4e0 100644
--- a/uxa/uxa-priv.h
+++ b/uxa/uxa-priv.h
@@ -131,7 +131,6 @@ typedef struct {
 	AddTrapsProcPtr SavedAddTraps;
 	UnrealizeGlyphProcPtr SavedUnrealizeGlyph;
 #endif
-	EnableDisableFBAccessProcPtr SavedEnableDisableFBAccess;
 
 	Bool force_fallback;
 	Bool fallback_debug;
diff --git a/uxa/uxa.c b/uxa/uxa.c
index 0ba6869..1cc82ab 100644
--- a/uxa/uxa.c
+++ b/uxa/uxa.c
@@ -366,7 +366,6 @@ void uxa_set_force_fallback(ScreenPtr screen, Bool value)
 static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 {
 	uxa_screen_t *uxa_screen = uxa_get_screen(pScreen);
-	ScrnInfoPtr scrn = xf86ScreenToScrn(pScreen);
 #ifdef RENDER
 	PictureScreenPtr ps = GetPictureScreenIfSet(pScreen);
 #endif
@@ -403,7 +402,6 @@ static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 	pScreen->ChangeWindowAttributes =
 	    uxa_screen->SavedChangeWindowAttributes;
 	pScreen->BitmapToRegion = uxa_screen->SavedBitmapToRegion;
-	scrn->EnableDisableFBAccess = uxa_screen->SavedEnableDisableFBAccess;
 #ifdef RENDER
 	if (ps) {
 		ps->Composite = uxa_screen->SavedComposite;
commit 99df720216787c52d16a350dd25469e2dcea2c47
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 10:24:34 2012 +0100

    sna: Add inline keyword in conjunction with attribute(always_inline)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/compiler.h b/src/sna/compiler.h
index 653435f..ff80365 100644
--- a/src/sna/compiler.h
+++ b/src/sna/compiler.h
@@ -32,7 +32,7 @@
 #define likely(expr) (__builtin_expect (!!(expr), 1))
 #define unlikely(expr) (__builtin_expect (!!(expr), 0))
 #define noinline __attribute__((noinline))
-#define force_inline __attribute__((always_inline))
+#define force_inline inline __attribute__((always_inline))
 #define fastcall __attribute__((regparm(3)))
 #define must_check __attribute__((warn_unused_result))
 #define constant __attribute__((const))
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e2212b9..bfb4e0c 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3672,14 +3672,14 @@ struct inplace {
 	};
 };
 
-static inline uint8_t
+static force_inline uint8_t
 mul_8_8(uint8_t a, uint8_t b)
 {
     uint16_t t = a * (uint16_t)b + 0x7f;
     return ((t >> 8) + t) >> 8;
 }
 
-static uint8_t coverage_opacity(int coverage, uint8_t opacity)
+static force_inline uint8_t coverage_opacity(int coverage, uint8_t opacity)
 {
 	coverage = coverage * 256 / FAST_SAMPLES_XY;
 	return mul_8_8(coverage - (coverage >> 8), opacity);
commit 47d7da5a986a9f133cd3dc8314ff243001586a36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jun 5 10:18:40 2012 +0100

    sna: Maintain the operation on the CPU if not completely replacing all-damaged
    
    We may as well continue the operation on the CPU if already bound to try to
    reduce needless migrations (where the cost of the upload will outweigh
    further use).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 973a657..8528217 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -790,6 +790,13 @@ sna_composite_rectangles(CARD8		 op,
 		goto fallback;
 	}
 
+	if (DAMAGE_IS_ALL(priv->cpu_damage) &&
+	    (region.extents.x2 - region.extents.x1 < pixmap->drawable.width ||
+	     region.extents.y2 - region.extents.y1 < pixmap->drawable.height)) {
+		DBG(("%s: fallback due to completely damaged CPU\n", __FUNCTION__));
+		goto fallback;
+	}
+
 	/* If we going to be overwriting any CPU damage with a subsequent
 	 * operation, then we may as well delete it without moving it
 	 * first to the GPU.
commit 81f09347f2ab59cf0a3eaca7be83ded555655e93
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 16:45:05 2012 +0100

    uxa/i965: Silence static analyser by asserting the bo exists for the video
    
    This is already checked at the beginning of PutImageTextured, so this
    check upon the return value of intel_get_pixmap_bo() should only be
    required to keep static analysers happy.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_video.c b/src/i965_video.c
index c94f530..d9350ce 100644
--- a/src/i965_video.c
+++ b/src/i965_video.c
@@ -388,6 +388,7 @@ static void i965_create_dst_surface_state(ScrnInfoPtr scrn,
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct brw_surface_state dest_surf_state;
 	drm_intel_bo *pixmap_bo = intel_get_pixmap_bo(pixmap);
+	assert(pixmap_bo != NULL);
 
 	memset(&dest_surf_state, 0, sizeof(dest_surf_state));
 
@@ -484,6 +485,7 @@ static void gen7_create_dst_surface_state(ScrnInfoPtr scrn,
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct gen7_surface_state dest_surf_state;
 	drm_intel_bo *pixmap_bo = intel_get_pixmap_bo(pixmap);
+	assert(pixmap_bo != NULL);
 
 	memset(&dest_surf_state, 0, sizeof(dest_surf_state));
 
commit 1f43de322b400dcd64eb4545a978ad9b1c7de185
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 15:39:39 2012 +0100

    sna: Exclude consideration of tiling flags from overwriting BLT commands
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 82c61df..e84c87f 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -431,7 +431,7 @@ static void sna_blt_copy_one(struct sna *sna,
 	/* Compare against a previous fill */
 	if (kgem->nbatch >= 6 &&
 	    blt->overwrites &&
-	    kgem->batch[kgem->nbatch-6] == ((blt->cmd & ~XY_SRC_COPY_BLT_CMD) | XY_COLOR_BLT) &&
+	    kgem->batch[kgem->nbatch-6] == (XY_COLOR_BLT | (blt->cmd & BLT_WRITE_ALPHA | BLT_WRITE_RGB)) &&
 	    kgem->batch[kgem->nbatch-4] == ((uint32_t)dst_y << 16 | (uint16_t)dst_x) &&
 	    kgem->batch[kgem->nbatch-3] == ((uint32_t)(dst_y+height) << 16 | (uint16_t)(dst_x+width)) &&
 	    kgem->reloc[kgem->nreloc-1].target_handle == blt->bo[1]->handle) {
commit ebb1c9d5f82e8822f7400ff11a887ab047a0d78e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 15:28:31 2012 +0100

    sna: Make the bo-is-busy DBG more useful by saying which bo it is
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index deff5df..e4a5b4c 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -474,8 +474,8 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 
 static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
-		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	DBG_HDR(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
+		 bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
 	return bo->rq;
 }
 
commit 6dc5ddfe807b307ca10af971c4f84498b2fb82a2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 13:49:35 2012 +0100

    sna: Remove bogus check for color == 0
    
    This was written when the RGBA color value was being passed around and
    not the pointer to the xRenderColor. As such, the NULL deref check
    doesn't gain much and the check for rgba==0 irrelevant in this scenario.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index c3c6666..e06e3c2 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -154,10 +154,8 @@ no_render_fill_boxes(struct sna *sna,
 	     __FUNCTION__, op,
 	     color->red, color->green, color->blue, color->alpha));
 
-	if (color == 0)
-		op = PictOpClear;
-
 	if (op == PictOpClear) {
+		pixel = 0;
 		alu = GXclear;
 		op = PictOpSrc;
 	}
@@ -170,7 +168,8 @@ no_render_fill_boxes(struct sna *sna,
 	if (op != PictOpSrc)
 		return FALSE;
 
-	if (!sna_get_pixel_from_rgba(&pixel,
+	if (alu == GXcopy &&
+	    !sna_get_pixel_from_rgba(&pixel,
 				     color->red,
 				     color->green,
 				     color->blue,
commit 9b3937228c692598899fb39bfe448a9b457315db
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 13:47:58 2012 +0100

    sna: Simplify selecting default tiling for framebuffers
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index eb85cb4..afd9ed7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -477,21 +477,19 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap)
 {
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	uint32_t tiling = default_tiling(pixmap);
-	uint32_t bit;
+	uint32_t tiling, bit;
 
 	/* Use tiling by default, but disable per user request */
 	if (pixmap->usage_hint == SNA_CREATE_FB) {
-		tiling = I915_TILING_X;
+		tiling = -I915_TILING_X;
 		bit = SNA_TILING_FB;
-	} else
+	} else {
+		tiling = default_tiling(pixmap);
 		bit = SNA_TILING_2D;
+	}
 	if ((sna->tiling && (1 << bit)) == 0)
 		tiling = I915_TILING_NONE;
 
-	if (pixmap->usage_hint == SNA_CREATE_FB)
-		tiling = -tiling;
-
 	/* Also adjust tiling if it is not supported or likely to
 	 * slow us down,
 	 */
commit ff91fd4a2749f1ea8722189aaec938e51bce4222
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:41:05 2012 +0100

    uxa: Check for failure from drmModeGetConnector()
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index fcb3239..7d75abb 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -717,6 +717,12 @@ intel_output_detect(xf86OutputPtr output)
 	drmModeFreeConnector(intel_output->mode_output);
 	intel_output->mode_output =
 		drmModeGetConnector(mode->fd, intel_output->output_id);
+	if (intel_output->mode_output == NULL) {
+		/* and hope we are safe everywhere else */
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "drmModeGetConnector failed, reporting output disconnected\n");
+		return XF86OutputStatusDisconnected;
+	}
 
 	switch (intel_output->mode_output->connection) {
 	case DRM_MODE_CONNECTED:
commit 4712a4008aaff7b3c57a8dd0a5e639992c9b6d30
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:40:56 2012 +0100

    shadow

diff --git a/src/intel_shadow.c b/src/intel_shadow.c
index 1342b79..6892567 100644
--- a/src/intel_shadow.c
+++ b/src/intel_shadow.c
@@ -182,7 +182,7 @@ void intel_shadow_create(struct intel_screen_private *intel)
 		free(intel->shadow_buffer);
 		intel->shadow_buffer = buffer;
 	} else {
-		free(bufer);
+		free(buffer);
 		stride = intel->shadow_stride;
 	}
 
commit 63a55e9db97b647cbf7385403a0f30ee4d24dc3b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:36:11 2012 +0100

    uxa/shadow: Free the buffer along the unexpected failure to attach to the pixmap
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_shadow.c b/src/intel_shadow.c
index 393a038..1342b79 100644
--- a/src/intel_shadow.c
+++ b/src/intel_shadow.c
@@ -181,8 +181,10 @@ void intel_shadow_create(struct intel_screen_private *intel)
 				       stride, buffer)) {
 		free(intel->shadow_buffer);
 		intel->shadow_buffer = buffer;
-	} else
+	} else {
+		free(bufer);
 		stride = intel->shadow_stride;
+	}
 
 	if (!intel->shadow_damage) {
 		intel->shadow_damage =
commit c9bd4ed7d7d5c9ba6924e911652e03abc9267865
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:29:58 2012 +0100

    legacy/i810: Silence a compiler warning for missing 'const'
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_common.h b/src/legacy/i810/i810_common.h
index 14b2993..efa2a7d 100644
--- a/src/legacy/i810/i810_common.h
+++ b/src/legacy/i810/i810_common.h
@@ -77,7 +77,7 @@ static inline void memcpy_volatile(volatile void *dst, const void *src,
     int i;
     
     for (i = 0; i < len; i++)
-	((volatile char *)dst)[i] = ((volatile char *)src)[i];
+	((volatile char *)dst)[i] = ((const volatile char *)src)[i];
 }
 
 /* Memory mapped register access macros */
commit 6f8b411b8a833cca6fce8888ce515db6413d57ca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:21:27 2012 +0100

    uxa: Preferred order for calloc is (count, size)
    
    And assert that the allocation succeeds for good measure. A great big
    dollop of graceful failure is missing...
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index 7bfbcbd..75c99e2 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2342,8 +2342,10 @@ void gen4_render_state_init(ScrnInfoPtr scrn)
 
 	intel->surface_used = 0;
 
-	if (intel->gen4_render_state == NULL)
-		intel->gen4_render_state = calloc(sizeof(*render), 1);
+	if (intel->gen4_render_state == NULL) {
+		intel->gen4_render_state = calloc(1, sizeof(*render));
+		assert(intel->gen4_render_state != NULL);
+	}
 
 	if (INTEL_INFO(intel)->gen >= 60)
 		return gen6_render_state_init(scrn);
commit 72a29aa74a1488269c2184300e573d060c49e232
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:21:27 2012 +0100

    sna: Check that the buffer was allocated prior to caching it
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 7feaa24..c3c6666 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1139,7 +1139,8 @@ sna_render_picture_extract(struct sna *sna,
 						      &box,
 						      pixmap->devKind,
 						      pixmap->drawable.bitsPerPixel);
-			if (pixmap->usage_hint == 0 &&
+			if (bo != NULL &&
+			    pixmap->usage_hint == 0 &&
 			    box.x2 - box.x1 == pixmap->drawable.width &&
 			    box.y2 - box.y1 == pixmap->drawable.height) {
 				struct sna_pixmap *priv = sna_pixmap(pixmap);
commit 9bff89b1892cee90a80909255876240126b700b2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:21:27 2012 +0100

    legacy/i810: Be paranoid and guard against xf86GetPciInfoForEntity
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 6ead393..4a73e9b 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -254,7 +254,7 @@ I810PreInit(ScrnInfoPtr scrn, int flags)
    pI810 = I810PTR(scrn);
 
    pI810->pEnt = xf86GetEntityInfo(scrn->entityList[0]);
-   if (pI810->pEnt->location.type != BUS_PCI)
+   if (pI810->pEnt == NULL || pI810->pEnt->location.type != BUS_PCI)
       return FALSE;
 
    if (flags & PROBE_DETECT) {
commit ed6004f7ad09329bd4097331aaff38edc83107b6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:21:27 2012 +0100

    uxa: Be paranoid and guard against xf86GetPciInfoForEntity
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index fadc0a6..190517f 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -480,6 +480,8 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
+	if (pEnt == NULL || pEnt->location.type != BUS_PCI)
+		return FALSE;
 
 	if (flags & PROBE_DETECT)
 		return TRUE;
@@ -497,9 +499,6 @@ static Bool I830PreInit(ScrnInfoPtr scrn, int flags)
 
 	scrn->displayWidth = 640;	/* default it */
 
-	if (intel->pEnt->location.type != BUS_PCI)
-		return FALSE;
-
 	intel->PciInfo = xf86GetPciInfoForEntity(intel->pEnt->index);
 
 	if (!intel_open_drm_master(scrn))
commit abb8d893742f4ba9567330da9706adda4e636e75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:21:27 2012 +0100

    sna: Be paranoid and guard against xf86GetPciInfoForEntity
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index a1581f3..59ee57f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -426,7 +426,7 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
-	if (pEnt->location.type != BUS_PCI)
+	if (pEnt == NULL || pEnt->location.type != BUS_PCI)
 		return FALSE;
 
 	if (flags & PROBE_DETECT)
commit 782cd6eafa9917a5bdb181d66b7f4c05392950ba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:20:17 2012 +0100

    uxa: check for failure to allocate drmModeCrtc
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 609cbf7..fcb3239 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -680,6 +680,11 @@ intel_crtc_init(ScrnInfoPtr scrn, struct intel_mode *mode, int num)
 
 	intel_crtc->mode_crtc = drmModeGetCrtc(mode->fd,
 					       mode->mode_res->crtcs[num]);
+	if (intel_crtc->mode_crtc == NULL) {
+		free(intel_crtc);
+		return;
+	}
+
 	intel_crtc->mode = mode;
 	crtc->driver_private = intel_crtc;
 
commit a99bf0125f8f166ff2d65449ecd376c99c3024b8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:15:51 2012 +0100

    uxa/i965: Drop superfluous 'state' variable
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index c4c1dd3..7bfbcbd 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2538,20 +2538,21 @@ gen6_composite_create_blend_state(intel_screen_private *intel)
 static drm_intel_bo *
 gen6_composite_create_depth_stencil_state(intel_screen_private *intel)
 {
-	struct gen6_depth_stencil_state *state;
 	drm_intel_bo *depth_stencil_bo;
 	int ret;
 
-	depth_stencil_bo = drm_intel_bo_alloc(intel->bufmgr,
-					"gen6 DEPTH_STENCIL state",
-					sizeof(*state),
-					4096);
+	depth_stencil_bo =
+		drm_intel_bo_alloc(intel->bufmgr,
+				   "gen6 DEPTH_STENCIL state",
+				   sizeof(struct gen6_depth_stencil_state),
+				   4096);
 	assert(depth_stencil_bo);
 
 	ret = drm_intel_bo_map(depth_stencil_bo, TRUE);
 	assert(ret == 0);
 
-	state = memset(depth_stencil_bo->virtual, 0, sizeof(*state));
+	memset(depth_stencil_bo->virtual, 0,
+	       sizeof(struct gen6_depth_stencil_state));
 	drm_intel_bo_unmap(depth_stencil_bo);
 
 	return depth_stencil_bo;
commit 15a00ba047fdb7d388a322f2e33894d20787af2f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:09:52 2012 +0100

    sna/gen2: Remove double 'const'
    
    So constant, we const'ed it twice.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index e1b31d7..7b3e1ec 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -977,7 +977,7 @@ static void gen2_vertex_flush(struct sna *sna,
 }
 
 inline static int gen2_get_rectangles(struct sna *sna,
-				      const const struct sna_composite_op *op,
+				      const struct sna_composite_op *op,
 				      int want)
 {
 	struct gen2_render_state *state = &sna->render_state.gen2;
commit 99129c369c16d9ff7588a0e59f9285cc339b7eb3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:08:11 2012 +0100

    sna/gen4+: Use an explicit invalid value for alu
    
    Since the static analyser also doesn't like comparing a uint8_t against
    ~0.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index f982b41..80f6a95 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2916,7 +2916,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	    (prefer_blt(sna) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen4_check_dst_format(format))) {
-		uint8_t alu = ~0;
+		uint8_t alu = GXinvalid;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -2929,7 +2929,7 @@ gen4_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != ~0 &&
+		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 56db4a0..6746a58 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3250,7 +3250,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen5_check_dst_format(format))) {
-		uint8_t alu = ~0;
+		uint8_t alu = GXinvalid;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3263,7 +3263,7 @@ gen5_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != ~0 &&
+		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index cc6cc50..87a2c9c 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3661,7 +3661,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna, dst_bo) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen6_check_dst_format(format))) {
-		uint8_t alu = ~0;
+		uint8_t alu = GXinvalid;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3674,7 +3674,7 @@ gen6_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != ~0 &&
+		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 50eb07e..e30d941 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3743,7 +3743,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna, dst_bo) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen7_check_dst_format(format))) {
-		uint8_t alu = ~0;
+		uint8_t alu = GXinvalid;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3756,7 +3756,7 @@ gen7_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != ~0 &&
+		if (alu != GXinvalid &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index b9360c3..9db1891 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -7,6 +7,8 @@
 
 #define GRADIENT_CACHE_SIZE 16
 
+#define GXinvalid 0xff
+
 struct sna;
 struct sna_glyph;
 struct sna_video;
commit fae9c054d47203b68a6c09647945d23074ea4df9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:04:30 2012 +0100

    legacy/i810: Remove unused variable
    
    In order to reduce the volumes of output from static analysers.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index a98ed43..24632d4 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -659,9 +659,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
    pI810->cursorARGBHandle = agpHandle;
 
    if (agpHandle != DRM_AGP_NO_HANDLE) {
- 	int r;
-
-      if ((r = drmAgpBind(pI810->drmSubFD, agpHandle, tom)) == 0) {
+      if (drmAgpBind(pI810->drmSubFD, agpHandle, tom) == 0) {
 	 xf86DrvMsg(pScrn->scrnIndex, X_INFO,
 		    "[agp] GART: Allocated 16K for ARGB mouse cursor image\n");
 	 pI810->CursorARGBStart = tom;
commit 880ec2c9a56d3de33c7fbbfa4c043082fbcf7a78
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 12:04:30 2012 +0100

    legacy/i810: Reorder DRI teardown code to avoid potential NULL derefs
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 28a3b91..a98ed43 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -898,30 +898,42 @@ I810DRICloseScreen(ScreenPtr pScreen)
 {
    ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
-   I810DRIPtr pI810DRI = (I810DRIPtr) pI810->pDRIInfo->devPrivate;
 
-   if (pI810DRI->irq) {
-       drmCtlUninstHandler(pI810->drmSubFD);
-       pI810DRI->irq = 0;
-   }
+   if (pI810->pDRIInfo) {
+       I810DRIPtr pI810DRI = (I810DRIPtr) pI810->pDRIInfo->devPrivate;
+
+       if (pI810DRI) {
+	   if (pI810DRI->irq) {
+	       drmCtlUninstHandler(pI810->drmSubFD);
+	       pI810DRI->irq = 0;
+	   }
 
-   I810CleanupDma(pScrn);
+	   free(pI810->pDRIInfo->devPrivate);
+	   pI810->pDRIInfo->devPrivate = NULL;
+       }
+
+       I810CleanupDma(pScrn);
+
+       DRICloseScreen(pScreen);
+       DRIDestroyInfoRec(pI810->pDRIInfo);
+       pI810->pDRIInfo = NULL;
+   }
 
    if (pI810->dcacheHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->dcacheHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->dcacheHandle);
    if (pI810->backHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->backHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->backHandle);
    if (pI810->zHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->zHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->zHandle);
    if (pI810->cursorHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->cursorHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->cursorHandle);
    if (pI810->xvmcHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->xvmcHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->xvmcHandle);
    if (pI810->sysmemHandle!=DRM_AGP_NO_HANDLE)
-      drmAgpFree(pI810->drmSubFD, pI810->sysmemHandle);
+       drmAgpFree(pI810->drmSubFD, pI810->sysmemHandle);
 
    if (pI810->agpAcquired == TRUE)
-      drmAgpRelease(pI810->drmSubFD);
+       drmAgpRelease(pI810->drmSubFD);
 
    pI810->backHandle = DRM_AGP_NO_HANDLE;
    pI810->zHandle = DRM_AGP_NO_HANDLE;
@@ -930,17 +942,6 @@ I810DRICloseScreen(ScreenPtr pScreen)
    pI810->sysmemHandle = DRM_AGP_NO_HANDLE;
    pI810->agpAcquired = FALSE;
    pI810->dcacheHandle = DRM_AGP_NO_HANDLE;
-
-   DRICloseScreen(pScreen);
-
-   if (pI810->pDRIInfo) {
-      if (pI810->pDRIInfo->devPrivate) {
-	 free(pI810->pDRIInfo->devPrivate);
-	 pI810->pDRIInfo->devPrivate = NULL;
-      }
-      DRIDestroyInfoRec(pI810->pDRIInfo);
-      pI810->pDRIInfo = NULL;
-   }
 }
 
 static Bool
commit 7424ea7dd08e304baa4efa09e887b833737ac9f6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:59:24 2012 +0100

    uxa/dri: Free wait_info along DRI2WaitMSC error paths
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index a177815..4890fe4 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1522,7 +1522,7 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 				   strerror(errno));
 			limit--;
 		}
-		goto out_complete;
+		goto out_free;
 	}
 
 	current_msc = vbl.reply.sequence;
@@ -1556,7 +1556,7 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 					   strerror(errno));
 				limit--;
 			}
-			goto out_complete;
+			goto out_free;
 		}
 
 		wait_info->frame = vbl.reply.sequence;
@@ -1595,7 +1595,7 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 				   strerror(errno));
 			limit--;
 		}
-		goto out_complete;
+		goto out_free;
 	}
 
 	wait_info->frame = vbl.reply.sequence;
@@ -1603,6 +1603,8 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 
 	return TRUE;
 
+out_free:
+	i830_dri2_del_frame_event(draw, wait_info);
 out_complete:
 	DRI2WaitMSCComplete(client, draw, target_msc, 0, 0);
 	return TRUE;
commit ebd6dea009479e612c67d78416b8680bb23dba94
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:56:11 2012 +0100

    uxa/dri: Make sure is_glamor_pixmap is always initialised
    
    The code paths are too twisty to be sure otherwise.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 389ecdd..a177815 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -263,7 +263,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 	DRI2BufferPtr buffers;
 	I830DRI2BufferPrivatePtr privates;
 	PixmapPtr pixmap, pDepthPixmap;
-	int is_glamor_pixmap = FALSE;
+	bool is_glamor_pixmap = false;
 	int i;
 
 	buffers = calloc(count, sizeof *buffers);
@@ -282,7 +282,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 			pixmap = get_front_buffer(drawable);
 
 			if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
-				is_glamor_pixmap = TRUE;
+				is_glamor_pixmap = true;
 				drawable = &pixmap->drawable;
 				pixmap = NULL;
 			}
@@ -390,7 +390,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 	DRI2Buffer2Ptr buffer;
 	I830DRI2BufferPrivatePtr privates;
 	PixmapPtr pixmap;
-	int is_glamor_pixmap;
+	bool is_glamor_pixmap = false;
 
 	buffer = calloc(1, sizeof *buffer);
 	if (buffer == NULL)
@@ -406,7 +406,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 		pixmap = get_front_buffer(drawable);
 
 		if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
-			is_glamor_pixmap = TRUE;
+			is_glamor_pixmap = true;
 			drawable = &pixmap->drawable;
 			pixmap = NULL;
 		}
commit 33f0b4b96662d953052232b1c0477ced0c326a62
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:54:28 2012 +0100

    legacy/i810: Free offscreen image info struct on failure
    
    Impossible with the current code, the server aborts on failure. However,
    this looks to be the simple answer to keep static analysers quiet.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 729aa8b..613cbf2 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -1411,6 +1411,7 @@ I810InitOffscreenImages(ScreenPtr screen)
     offscreenImages[0].num_attributes = 1;
     offscreenImages[0].attributes = Attributes;
 
-    xf86XVRegisterOffscreenImages(screen, offscreenImages, 1);
+    if (!xf86XVRegisterOffscreenImages(screen, offscreenImages, 1))
+	    free(offscreenImages);
 }
 
commit d24340747389db971c04349d1ee517f195c2b28e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:53:26 2012 +0100

    sna: Free clip boxes (if allocated) along error path
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 0d6fbbb..3c6044c 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -646,8 +646,11 @@ tile:
 								       tmp.drawable.bitsPerPixel,
 								       KGEM_BUFFER_WRITE_INPLACE,
 								       &ptr);
-					if (!src_bo)
+					if (!src_bo) {
+						if (clipped != stack)
+							free(clipped);
 						goto fallback;
+					}
 
 					c = clipped;
 					for (n = 0; n < nbox; n++) {
@@ -683,8 +686,11 @@ tile:
 
 					kgem_bo_destroy(&sna->kgem, src_bo);
 
-					if (!n)
+					if (!n) {
+						if (clipped != stack)
+							free(clipped);
 						goto fallback;
+					}
 				}
 			}
 
commit 1215abc5c30cc3e183bde59b1523c09b59c484e3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:49:15 2012 +0100

    sna: Make the ignored return value explicit
    
    The return from __kgem_throttle_retire() is just a hint as to whether
    any forward progress was made. In the case of the error path, though it
    is a last ditch effort before aborting, so we do not really care whether
    or not it succeeds, we must try again.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 537a6b0..3d1e5be 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -191,7 +191,7 @@ retry_gtt:
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
 		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
 		       __FUNCTION__, bo->handle, errno);
-		__kgem_throttle_retire(kgem, 0);
+		(void)__kgem_throttle_retire(kgem, 0);
 		if (kgem_expire_cache(kgem))
 			goto retry_gtt;
 
commit e230b460515a043a1b7353d153e864754ca5e064
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:48:23 2012 +0100

    ux/i965: create_sampler_state_bo() expects enums, so feed it the right types
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index 508a846..c4c1dd3 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2325,9 +2325,13 @@ void gen4_render_state_init(ScrnInfoPtr scrn)
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct gen4_render_state *render;
 	const struct wm_kernel_info *wm_kernels;
-	int i, j, k, l, m;
+	sampler_state_filter_t src_filter;
+	sampler_state_extend_t src_extend;
+	sampler_state_filter_t mask_filter;
+	sampler_state_extend_t mask_extend;
 	drm_intel_bo *sf_kernel_bo, *sf_kernel_mask_bo;
 	drm_intel_bo *border_color_bo;
+	int m;
 
 	intel->needs_3d_invariant = TRUE;
 
@@ -2389,20 +2393,20 @@ void gen4_render_state_init(ScrnInfoPtr scrn)
 	 * kernel.
 	 */
 	border_color_bo = sampler_border_color_create(intel);
-	for (i = 0; i < FILTER_COUNT; i++) {
-		for (j = 0; j < EXTEND_COUNT; j++) {
-			for (k = 0; k < FILTER_COUNT; k++) {
-				for (l = 0; l < EXTEND_COUNT; l++) {
+	for (src_filter = 0; src_filter < FILTER_COUNT; src_filter++) {
+		for (src_extend = 0; src_extend < EXTEND_COUNT; src_extend++) {
+			for (mask_filter = 0; mask_filter < FILTER_COUNT; mask_filter++) {
+				for (mask_extend = 0; mask_extend < EXTEND_COUNT; mask_extend++) {
 					drm_intel_bo *sampler_state_bo;
 
 					sampler_state_bo =
 					    i965_create_sampler_state(intel,
-								      i, j,
-								      k, l,
+								      src_filter, src_extend,
+								      mask_filter, mask_extend,
 								      border_color_bo);
 
 					for (m = 0; m < KERNEL_COUNT; m++) {
-						render->wm_state_bo[m][i][j][k][l] =
+						render->wm_state_bo[m][src_filter][src_extend][mask_filter][mask_extend] =
 							gen4_create_wm_state
 							(intel,
 							 wm_kernels[m]. has_mask,
@@ -2875,7 +2879,11 @@ gen6_render_state_init(ScrnInfoPtr scrn)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct gen4_render_state *render;
-	int i, j, k, l, m;
+	sampler_state_filter_t src_filter;
+	sampler_state_filter_t mask_filter;
+	sampler_state_extend_t src_extend;
+	sampler_state_extend_t mask_extend;
+	int m;
 	drm_intel_bo *border_color_bo;
 	const struct wm_kernel_info *wm_kernels;
 
@@ -2899,14 +2907,14 @@ gen6_render_state_init(ScrnInfoPtr scrn)
 
 	border_color_bo = sampler_border_color_create(intel);
 
-	for (i = 0; i < FILTER_COUNT; i++) {
-		for (j = 0; j < EXTEND_COUNT; j++) {
-			for (k = 0; k < FILTER_COUNT; k++) {
-				for (l = 0; l < EXTEND_COUNT; l++) {
-					render->ps_sampler_state_bo[i][j][k][l] =
+	for (src_filter = 0; src_filter < FILTER_COUNT; src_filter++) {
+		for (src_extend = 0; src_extend < EXTEND_COUNT; src_extend++) {
+			for (mask_filter = 0; mask_filter < FILTER_COUNT; mask_filter++) {
+				for (mask_extend = 0; mask_extend < EXTEND_COUNT; mask_extend++) {
+					render->ps_sampler_state_bo[src_filter][src_extend][mask_filter][mask_extend] =
 						i965_create_sampler_state(intel,
-								i, j,
-								k, l,
+									  src_filter, src_extend,
+									  mask_filter, mask_extend,
 								border_color_bo);
 				}
 			}
commit 072d7a8b42f04600c9d8054f3648642a1aaff57a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:40:01 2012 +0100

    sna/debug: Assert the non-existence of the relocation handle at source
    
    This should help the static analyzer pinpoint the blame and make it
    quieter.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem_debug.c b/src/sna/kgem_debug.c
index e833a6f..e46ffca 100644
--- a/src/sna/kgem_debug.c
+++ b/src/sna/kgem_debug.c
@@ -48,6 +48,7 @@ kgem_debug_get_reloc_entry(struct kgem *kgem, uint32_t offset)
 		if (kgem->reloc[i].offset == offset)
 			return kgem->reloc+i;
 
+	assert(!"valid relocation entry, unknown batch offset");
 	return NULL;
 }
 
@@ -265,7 +266,6 @@ decode_2d(struct kgem *kgem, uint32_t offset)
 		kgem_debug_print(data, offset, 3, "(%d,%d)\n",
 			  data[3] & 0xffff, data[3] >> 16);
 		reloc = kgem_debug_get_reloc_entry(kgem, offset+4);
-		assert(reloc);
 		kgem_debug_print(data, offset, 4, "dst offset 0x%08x [handle=%d, delta=%d, read=%x, write=%x (fenced? %d, tiling? %d)]\n",
 				 data[4],
 				 reloc->target_handle, reloc->delta,
commit 4a3c355e056339aed68b70470556633dea899b1b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jun 4 11:32:42 2012 +0100

    sna: Silence a compiler warning for loss of 'const' qualifier
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index 65d586c..e9d06eb 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -150,7 +150,7 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	   int16_t dst_x, int16_t dst_y,
 	   uint16_t width, uint16_t height)
 {
-	uint8_t *src_bytes;
+	const uint8_t *src_bytes;
 	uint8_t *dst_bytes;
 	int byte_width;
 
commit 791029cc16bca36b8dec82297ff7e07a972c51ab
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 13:28:43 2012 +0100

    sna/trapezoids: Implement trapezoidal opaque fills inplace
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/compiler.h b/src/sna/compiler.h
index 364ea62..653435f 100644
--- a/src/sna/compiler.h
+++ b/src/sna/compiler.h
@@ -32,6 +32,7 @@
 #define likely(expr) (__builtin_expect (!!(expr), 1))
 #define unlikely(expr) (__builtin_expect (!!(expr), 0))
 #define noinline __attribute__((noinline))
+#define force_inline __attribute__((always_inline))
 #define fastcall __attribute__((regparm(3)))
 #define must_check __attribute__((warn_unused_result))
 #define constant __attribute__((const))
@@ -39,6 +40,7 @@
 #define likely(expr) (expr)
 #define unlikely(expr) (expr)
 #define noinline
+#define force_inline
 #define fastcall
 #define must_check
 #define constant
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index d65c249..e2212b9 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3666,7 +3666,10 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 struct inplace {
 	uint32_t stride;
 	uint8_t *ptr;
-	uint8_t opacity;
+	union {
+		uint8_t opacity;
+		uint32_t color;
+	};
 };
 
 static inline uint8_t
@@ -3838,6 +3841,110 @@ tor_blt_add_clipped(struct sna *sna,
 	pixman_region_fini(&region);
 }
 
+#define ONE_HALF 0x7f
+#define RB_MASK 0x00ff00ff
+#define RB_ONE_HALF 0x007f007f
+#define RB_MASK_PLUS_ONE 0x01000100
+#define G_SHIFT 8
+
+static force_inline uint32_t
+mul8x2_8 (uint32_t a, uint8_t b)
+{
+	uint32_t t = (a & RB_MASK) * b + RB_ONE_HALF;
+	return ((t + ((t >> G_SHIFT) & RB_MASK)) >> G_SHIFT) & RB_MASK;
+}
+
+static force_inline uint32_t
+add8x2_8x2(uint32_t a, uint32_t b)
+{
+	uint32_t t = a + b;
+	t |= RB_MASK_PLUS_ONE - ((t >> G_SHIFT) & RB_MASK);
+	return t & RB_MASK;
+}
+
+static force_inline uint32_t
+lerp8x4(uint32_t src, uint8_t a, uint32_t dst)
+{
+	return (add8x2_8x2(mul8x2_8(src, a),
+			   mul8x2_8(dst, ~a)) |
+		add8x2_8x2(mul8x2_8(src >> G_SHIFT, a),
+			   mul8x2_8(dst >> G_SHIFT, ~a)) << G_SHIFT);
+}
+
+static void
+tor_blt_lerp32(struct sna *sna,
+	       struct sna_composite_spans_op *op,
+	       pixman_region16_t *clip,
+	       const BoxRec *box,
+	       int coverage)
+{
+	struct inplace *in = (struct inplace *)op;
+	uint32_t *ptr = (uint32_t *)in->ptr;
+	int stride = in->stride / sizeof(uint32_t);
+	int h, w, i;
+
+	if (coverage == 0)
+		return;
+
+	ptr += box->y1 * stride + box->x1;
+
+	h = box->y2 - box->y1;
+	w = box->x2 - box->x1;
+	if (coverage == FAST_SAMPLES_XY) {
+		if ((w | h) == 1) {
+			*ptr = in->color;
+		} else {
+			if (w < 16) {
+				do {
+					for (i = 0; i < w; i++)
+						ptr[i] = in->color;
+					ptr += stride;
+				} while (--h);
+			} else {
+				pixman_fill(ptr, stride, 32,
+					    0, 0, w, h, in->color);
+			}
+		}
+	} else {
+		coverage = coverage * 256 / FAST_SAMPLES_XY;
+		coverage -= coverage >> 8;
+
+		if ((w | h) == 1) {
+			*ptr = lerp8x4(in->color, coverage, *ptr);
+		} else if (w == 1) {
+			do {
+				*ptr = lerp8x4(in->color, coverage, *ptr);
+				ptr += stride;
+			} while (--h);
+		} else{
+			do {
+				for (i = 0; i < w; i++)
+					ptr[i] = lerp8x4(in->color, coverage, ptr[i]);
+				ptr += stride;
+			} while (--h);
+		}
+	}
+}
+
+static void
+tor_blt_lerp32_clipped(struct sna *sna,
+		       struct sna_composite_spans_op *op,
+		       pixman_region16_t *clip,
+		       const BoxRec *box,
+		       int coverage)
+{
+	pixman_region16_t region;
+	int n;
+
+	pixman_region_init_rects(&region, box, 1);
+	RegionIntersect(&region, &region, clip);
+	n = REGION_NUM_RECTS(&region);
+	box = REGION_RECTS(&region);
+	while (n--)
+		tor_blt_lerp32(sna, op, NULL, box++, coverage);
+	pixman_region_fini(&region);
+}
+
 struct mono_inplace_composite {
 	pixman_image_t *src, *dst;
 	int dx, dy;
@@ -4130,6 +4237,134 @@ unbounded_pass:
 }
 
 static bool
+trapezoid_span_inplace__x8r8g8b8(CARD8 op,
+				 uint32_t color,
+				 PicturePtr src,
+				 PicturePtr dst,
+				 PictFormatPtr maskFormat,
+				 int ntrap, xTrapezoid *traps)
+{
+	struct tor tor;
+	span_func_t span;
+	RegionRec region;
+	int16_t dst_x, dst_y;
+	int dx, dy;
+	int n;
+
+	if (op == PictOpOver && (color >> 24) == 0xff)
+		op = PictOpSrc;
+	if (op == PictOpOver) {
+		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
+		if (priv && priv->clear && priv->clear_color == 0)
+			op = PictOpSrc;
+	}
+
+	switch (op) {
+	case PictOpSrc:
+		break;
+	default:
+		DBG(("%s: fallback -- can not perform op [%d] in place\n",
+		     __FUNCTION__, op));
+		return false;
+	}
+
+	DBG(("%s: format=%x, op=%d, color=%x\n",
+	     __FUNCTION__, dst->format, op, color));
+
+	if (maskFormat == NULL && ntrap > 1) {
+		DBG(("%s: individual rasterisation requested\n",
+		     __FUNCTION__));
+		do {
+			/* XXX unwind errors? */
+			if (!trapezoid_span_inplace__x8r8g8b8(op, color,
+							      src, dst, NULL,
+							      1, traps++))
+				return false;
+		} while (--ntrap);
+		return true;
+	}
+
+	trapezoids_bounds(ntrap, traps, &region.extents);
+	if (region.extents.y1 >= region.extents.y2 ||
+	    region.extents.x1 >= region.extents.x2)
+		return true;
+
+	DBG(("%s: extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	if (!sna_compute_composite_extents(&region.extents,
+					   src, NULL, dst,
+					   0, 0,
+					   0, 0,
+					   region.extents.x1, region.extents.y1,
+					   region.extents.x2 - region.extents.x1,
+					   region.extents.y2 - region.extents.y1))
+		return true;
+
+	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	if (tor_init(&tor, &region.extents, 2*ntrap))
+		return true;
+
+	dx = dst->pDrawable->x * FAST_SAMPLES_X;
+	dy = dst->pDrawable->y * FAST_SAMPLES_Y;
+
+	for (n = 0; n < ntrap; n++) {
+		xTrapezoid t;
+
+		if (!project_trapezoid_onto_grid(&traps[n], dx, dy, &t))
+			continue;
+
+		if (pixman_fixed_to_int(traps[n].top) >= region.extents.y2 - dst->pDrawable->y ||
+		    pixman_fixed_to_int(traps[n].bottom) < region.extents.y1 - dst->pDrawable->y)
+			continue;
+
+		tor_add_edge(&tor, &t, &t.left, 1);
+		tor_add_edge(&tor, &t, &t.right, -1);
+	}
+
+	switch (op) {
+	case PictOpSrc:
+		if (dst->pCompositeClip->data)
+			span = tor_blt_lerp32_clipped;
+		else
+			span = tor_blt_lerp32;
+		break;
+	}
+
+	DBG(("%s: move-to-cpu\n", __FUNCTION__));
+	region.data = NULL;
+	if (sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
+					    MOVE_WRITE | MOVE_READ)) {
+		PixmapPtr pixmap;
+		struct inplace inplace;
+
+		pixmap = get_drawable_pixmap(dst->pDrawable);
+
+		get_drawable_deltas(dst->pDrawable, pixmap, &dst_x, &dst_y);
+
+		inplace.ptr = pixmap->devPrivate.ptr;
+		inplace.ptr += dst_y * pixmap->devKind + dst_x;
+		inplace.stride = pixmap->devKind;
+		inplace.color = color;
+
+		DBG(("%s: render inplace op=%d, color=%08x\n",
+		     __FUNCTION__, op, color));
+		tor_render(NULL, &tor, (void*)&inplace,
+			   dst->pCompositeClip, span, false);
+
+		tor_fini(&tor);
+	}
+
+	return true;
+}
+
+static bool
 trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		       PictFormatPtr maskFormat, INT16 src_x, INT16 src_y,
 		       int ntrap, xTrapezoid *traps,
@@ -4178,6 +4413,11 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 	}
 
+	if (dst->format == PICT_a8r8g8b8 || dst->format == PICT_x8r8g8b8)
+		return trapezoid_span_inplace__x8r8g8b8(op, color,
+							src, dst, maskFormat,
+							ntrap, traps);
+
 	if (dst->format != PICT_a8) {
 		DBG(("%s: fallback -- can not perform operation in place, format=%x\n",
 		     __FUNCTION__, dst->format));
@@ -4185,36 +4425,49 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	pixmap = get_drawable_pixmap(dst->pDrawable);
-	priv = sna_pixmap(pixmap);
-	if (priv == NULL) {
-		DBG(("%s: fallback -- unattached\n", __FUNCTION__));
-		return false;
-	}
 
 	unbounded = false;
-	switch (op) {
-	case PictOpAdd:
-		if (priv->clear && priv->clear_color == 0) {
+	priv = sna_pixmap(pixmap);
+	if (priv) {
+		switch (op) {
+		case PictOpAdd:
+			if (priv->clear && priv->clear_color == 0) {
+				unbounded = true;
+				op = PictOpSrc;
+			}
+			if ((color >> 24) == 0)
+				return true;
+			break;
+		case PictOpIn:
+			if (priv->clear && priv->clear_color == 0)
+				return true;
+			if (priv->clear && priv->clear_color == 0xff)
+				op = PictOpSrc;
 			unbounded = true;
-			op = PictOpSrc;
+			break;
+		case PictOpSrc:
+			unbounded = true;
+			break;
+		default:
+			DBG(("%s: fallback -- can not perform op [%d] in place\n",
+			     __FUNCTION__, op));
+			return false;
+		}
+	} else {
+		switch (op) {
+		case PictOpAdd:
+			if ((color >> 24) == 0)
+				return true;
+			break;
+		case PictOpIn:
+		case PictOpSrc:
+			unbounded = true;
+			break;
+		default:
+			DBG(("%s: fallback -- can not perform op [%d] in place\n",
+			     __FUNCTION__, op));
+			return false;
 		}
-		if ((color >> 24) == 0)
-			return true;
-		break;
-	case PictOpIn:
-		if (priv->clear && priv->clear_color == 0)
-			return true;
-		if (priv->clear && priv->clear_color == 0xff)
-			op = PictOpSrc;
-		unbounded = true;
-		break;
-	case PictOpSrc:
-		unbounded = true;
-		break;
-	default:
-		DBG(("%s: fallback -- can not perform op [%d] in place\n",
-		     __FUNCTION__, op));
-		return false;
 	}
 
 	DBG(("%s: format=%x, op=%d, color=%x\n",
commit 1f78a934a423911e18d340f0585e31941f6e8663
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 13:27:52 2012 +0100

    sna: Clear the counters prior to querying the property
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index c93f472..fbf35cc 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1085,6 +1085,8 @@ sna_output_attach_edid(xf86OutputPtr output)
 
 		VG_CLEAR(prop);
 		prop.prop_id = koutput->props[i];
+		prop.count_values = 0;
+		prop.count_enum_blobs = 0;
 		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
 			continue;
 
@@ -1280,6 +1282,8 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 
 		VG_CLEAR(prop);
 		prop.prop_id = koutput->props[i];
+		prop.count_values = 0;
+		prop.count_enum_blobs = 0;
 		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
 			continue;
 
commit a1953f1d4abc6e158a5e3ca53d3207548842254c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 12:02:55 2012 +0100

    sna: Prefer to use memset() for extremely large clears
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 9dc3808..973a657 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -868,7 +868,12 @@ fallback:
 						  dst->format))
 			goto fallback_composite;
 
-		do {
+		if (pixel == 0 &&
+		    box->x2 - box->x1 == pixmap->drawable.width &&
+		    box->y2 - box->y1 == pixmap->drawable.height) {
+			memset(pixmap->devPrivate.ptr, 0,
+			       pixmap->devKind*pixmap->drawable.height);
+		} else do {
 			DBG(("%s: fallback fill: (%d, %d)x(%d, %d) %08x\n",
 			     __FUNCTION__,
 			     box->x1, box->y1,
commit ce85cd1a36e31795a966ea8983c2d6f803a4eccd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 11:46:06 2012 +0100

    sna: Add some DBG to retreiving EDID
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 2e1be46..c93f472 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1072,13 +1072,16 @@ sna_output_attach_edid(xf86OutputPtr output)
 	struct sna *sna = to_sna(output->scrn);
 	struct sna_output *sna_output = output->driver_private;
 	drmModeConnectorPtr koutput = sna_output->mode_output;
-	drmModePropertyBlobPtr edid_blob = NULL;
+	void *raw = NULL;
+	int raw_length = 0;
 	xf86MonPtr mon = NULL;
 	int i;
 
 	/* look for an EDID property */
 	for (i = 0; i < koutput->count_props; i++) {
 		struct drm_mode_get_property prop;
+		struct drm_mode_get_blob blob;
+		void *tmp;
 
 		VG_CLEAR(prop);
 		prop.prop_id = koutput->props[i];
@@ -1091,23 +1094,41 @@ sna_output_attach_edid(xf86OutputPtr output)
 		if (strcmp(prop.name, "EDID"))
 			continue;
 
-		drmModeFreePropertyBlob(edid_blob);
-		edid_blob = drmModeGetPropertyBlob(sna->kgem.fd,
-						   koutput->prop_values[i]);
-	}
+		VG_CLEAR(blob);
+		blob.length = 0;
+		blob.data =0;
+		blob.blob_id = koutput->prop_values[i];
+
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob))
+			continue;
 
-	if (edid_blob) {
-		mon = xf86InterpretEDID(output->scrn->scrnIndex,
-					edid_blob->data);
+		DBG(("%s: retreiving blob (property %d, id=%d, value=%ld), length=%d\n",
+		     __FUNCTION__, i, koutput->props[i], (long)koutput->prop_values[i],
+		     blob.length));
 
-		if (mon && edid_blob->length > 128)
+		tmp = malloc(blob.length);
+		if (tmp == NULL)
+			continue;
+
+		blob.data = (uintptr_t)tmp;
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPBLOB, &blob)) {
+			free(tmp);
+			continue;
+		}
+
+		free(raw);
+		raw = tmp;
+		raw_length = blob.length;
+	}
+
+	if (raw) {
+		mon = xf86InterpretEDID(output->scrn->scrnIndex, raw);
+		if (mon && raw_length > 128)
 			mon->flags |= MONITOR_EDID_COMPLETE_RAWDATA;
 	}
 
 	xf86OutputSetEDID(output, mon);
-
-	if (0&&edid_blob)
-		drmModeFreePropertyBlob(edid_blob);
+	free(raw);
 }
 
 static DisplayModePtr
commit e8eb273bd6153c232a9ffc558e3b7fd4beaab01b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 10:43:03 2012 +0100

    sna/gen7: Add DBG for ring switching
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 2ee5b6f..50eb07e 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4233,8 +4233,11 @@ gen7_render_context_switch(struct kgem *kgem,
 	if (!new_mode)
 		return;
 
-	if (kgem->mode)
+	if (kgem->mode) {
+		DBG(("%s: switch rings %d -> %d\n",
+		     __FUNCTION__, kgem->mode, new_mode));
 		kgem_submit(kgem);
+	}
 
 	kgem->ring = new_mode;
 }
commit 722afa6bc910a2ccfbb0442872a878d1b6b78315
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 10:04:02 2012 +0100

    sna: Remove some unused members from the KMS state tracking
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9fd59f8..2e1be46 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -54,17 +54,14 @@
 #endif
 
 struct sna_crtc {
-	struct sna *sna;
 	struct drm_mode_modeinfo kmode;
 	PixmapPtr shadow;
 	uint32_t shadow_fb_id;
 	uint32_t cursor;
-	xf86CrtcPtr crtc;
-	int num;
-	int id;
-	int pipe;
-	int plane;
-	int active;
+	uint8_t id;
+	uint8_t pipe;
+	uint8_t plane;
+	uint8_t active;
 	struct list link;
 };
 
@@ -76,12 +73,10 @@ struct sna_property {
 };
 
 struct sna_output {
-	struct sna_mode *mode;
-	int output_id;
+	int id;
 	drmModeConnectorPtr mode_output;
 	int num_props;
 	struct sna_property *props;
-	void *private_data;
 
 	Bool has_panel_limits;
 	int panel_hdisplay;
@@ -91,7 +86,6 @@ struct sna_output {
 	const char *backlight_iface;
 	int backlight_active_level;
 	int backlight_max;
-	xf86OutputPtr output;
 	struct list link;
 };
 
@@ -104,7 +98,7 @@ sna_output_dpms(xf86OutputPtr output, int mode);
  * List of available kernel interfaces in priority order
  */
 static const char *backlight_interfaces[] = {
-	"sna", /* prefer our own native backlight driver */
+	"intel", /* prefer our own native backlight driver */
 	"asus-laptop",
 	"eeepc",
 	"thinkpad_screen",
@@ -493,15 +487,16 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 }
 
 static void
-sna_crtc_restore(struct sna *sna)
+sna_crtc_restore(ScrnInfoPtr scrn)
 {
-	ScrnInfoPtr scrn = sna->scrn;
+	struct sna *sna = to_sna(scrn);
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
 	struct kgem_bo *bo;
 	int i;
 
-	DBG(("%s (fb_pixmap=%d, front=%d)\n", __FUNCTION__,
-	     sna->mode.fb_pixmap, sna->front->drawable.serialNumber));
+	DBG(("%s (fb_pixmap=%ld, front=%ld)\n", __FUNCTION__,
+	     (long)sna->mode.fb_pixmap,
+	     (long)sna->front->drawable.serialNumber));
 
 	if (sna->mode.fb_pixmap == sna->front->drawable.serialNumber)
 		return;
@@ -548,7 +543,7 @@ sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 	     __FUNCTION__, sna_crtc->pipe, mode, mode == DPMSModeOn));
 
 	if (mode != DPMSModeOff)
-		sna_crtc_restore(sna_crtc->sna);
+		sna_crtc_restore(crtc->scrn);
 	else
 		sna_crtc->active = false;
 }
@@ -984,7 +979,6 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (sna_crtc == NULL)
 		return;
 
-	sna_crtc->num = num;
 	sna_crtc->id = mode->mode_res->crtcs[num];
 
 	VG_CLEAR(get_pipe);
@@ -1012,8 +1006,6 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 
 	sna_crtc->cursor = gem_create(sna->kgem.fd, 64*64*4);
 
-	sna_crtc->sna = sna;
-	sna_crtc->crtc = crtc;
 	list_add(&sna_crtc->link, &mode->crtcs);
 
 	DBG(("%s: attached crtc[%d] id=%d, pipe=%d\n",
@@ -1039,7 +1031,7 @@ sna_output_detect(xf86OutputPtr output)
 
 	drmModeFreeConnector(sna_output->mode_output);
 	sna_output->mode_output =
-		drmModeGetConnector(sna->kgem.fd, sna_output->output_id);
+		drmModeGetConnector(sna->kgem.fd, sna_output->id);
 
 	switch (sna_output->mode_output->connection) {
 	case DRM_MODE_CONNECTED:
@@ -1286,7 +1278,7 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 						  dpms);
 
 		drmModeConnectorSetProperty(sna->kgem.fd,
-					    sna_output->output_id,
+					    sna_output->id,
 					    prop.prop_id,
 					    dpms);
 
@@ -1497,7 +1489,7 @@ sna_output_set_property(xf86OutputPtr output, Atom property,
 				return FALSE;
 			val = *(uint32_t *)value->data;
 
-			drmModeConnectorSetProperty(sna->kgem.fd, sna_output->output_id,
+			drmModeConnectorSetProperty(sna->kgem.fd, sna_output->id,
 						    p->mode_prop->prop_id, (uint64_t)val);
 			return TRUE;
 		} else if (p->mode_prop->flags & DRM_MODE_PROP_ENUM) {
@@ -1515,7 +1507,7 @@ sna_output_set_property(xf86OutputPtr output, Atom property,
 			/* search for matching name string, then set its value down */
 			for (j = 0; j < p->mode_prop->count_enums; j++) {
 				if (!strcmp(p->mode_prop->enums[j].name, name)) {
-					drmModeConnectorSetProperty(sna->kgem.fd, sna_output->output_id,
+					drmModeConnectorSetProperty(sna->kgem.fd, sna_output->id,
 								    p->mode_prop->prop_id, p->mode_prop->enums[j].value);
 					return TRUE;
 				}
@@ -1683,9 +1675,8 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (!sna_output)
 		goto cleanup_output;
 
-	sna_output->output_id = mode->mode_res->connectors[num];
+	sna_output->id = mode->mode_res->connectors[num];
 	sna_output->mode_output = koutput;
-	sna_output->mode = mode;
 
 	output->mm_width = koutput->mmWidth;
 	output->mm_height = koutput->mmHeight;
@@ -1700,7 +1691,6 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	output->possible_clones = enc.possible_clones;
 	output->interlaceAllowed = TRUE;
 
-	sna_output->output = output;
 	list_add(&sna_output->link, &mode->outputs);
 
 	return;
@@ -1853,11 +1843,11 @@ static int do_page_flip(struct sna *sna, void *data, int ref_crtc_hw_id)
 		 * completion event. All other crtc's events will be discarded.
 		 */
 		evdata = (uintptr_t)data;
-		evdata |= sna_crtc_to_pipe(crtc->crtc) == ref_crtc_hw_id;
+		evdata |= crtc->pipe == ref_crtc_hw_id;
 
 		DBG(("%s: crtc %d [ref? %d] --> fb %d\n",
 		     __FUNCTION__, crtc_id(crtc),
-		     sna_crtc_to_pipe(crtc->crtc) == ref_crtc_hw_id,
+		     crtc->pipe == ref_crtc_hw_id,
 		     sna->mode.fb_id));
 		if (drmModePageFlip(sna->kgem.fd,
 				    crtc_id(crtc),
commit 26e7bb3f25f0c83d39ff505fa7e05bfcb976e39c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 09:16:51 2012 +0100

    sna: Add a DBG message to indicate flushing for GPU idle
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7b7190d..eb85cb4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12459,9 +12459,11 @@ set_tv:
 
 void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 {
+	DBG(("%s\n", __FUNCTION__));
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
 	if (!sna->kgem.need_retire) {
+		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
 		kgem_submit(&sna->kgem);
 		sna->kgem.flush_now = 0;
 	}
commit 58fc03b8c36688e9fa7925aa82b83d36c9decb7e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 08:53:24 2012 +0100

    sna: Prevent NULL deref with early termination and DBG enabled
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index f8e386b..a1581f3 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -213,6 +213,7 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 	return TRUE;
 
 cleanup_front:
+	screen->SetScreenPixmap(NULL);
 	screen->DestroyPixmap(sna->front);
 	sna->front = NULL;
 	return FALSE;
@@ -326,7 +327,8 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	err = drmSetInterfaceVersion(fd, &sv);
 	if (err != 0) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "[drm] failed to set drm interface version.\n");
+			   "[drm] failed to set drm interface version: %s [%d].\n",
+			   strerror(-err), -err);
 		drmClose(fd);
 		return -1;
 	}
@@ -362,8 +364,10 @@ static void sna_close_drm_master(ScrnInfoPtr scrn)
 {
 	struct sna_device *dev = sna_device(scrn);
 
-	DBG(("%s(open_count=%d)\n", __FUNCTION__, dev->open_count));
+	if (dev == NULL)
+		return;
 
+	DBG(("%s(open_count=%d)\n", __FUNCTION__, dev->open_count));
 	if (--dev->open_count)
 		return;
 
commit 1be2afb81df887ed664ac31d0641da808cfb8888
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 08:04:02 2012 +0100

    legacy/i810/video: Release memory after creating adaptors
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 2999ee0..729aa8b 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -155,40 +155,34 @@ static Atom xvBrightness, xvContrast, xvColorKey;
 void I810InitVideo(ScreenPtr screen)
 {
     ScrnInfoPtr pScrn = xf86ScreenToScrn(screen);
-    XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL;
-    XF86VideoAdaptorPtr newAdaptor = NULL;
+    XF86VideoAdaptorPtr *adaptors;
     int num_adaptors;
-	
-    if (pScrn->bitsPerPixel != 8) 
-    {
+
+    num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors);
+
+    if (pScrn->bitsPerPixel != 8) {
+	XF86VideoAdaptorPtr newAdaptor;
+
 	newAdaptor = I810SetupImageVideo(screen);
 	I810InitOffscreenImages(screen);
-    }
 
-    num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors);
+	if (newAdaptor) {
+	    XF86VideoAdaptorPtr *newAdaptors;
 
-    if(newAdaptor) {
-	if(!num_adaptors) {
-	    num_adaptors = 1;
-	    adaptors = &newAdaptor;
-	} else {
-	    newAdaptors =  /* need to free this someplace */
-		malloc((num_adaptors + 1) * sizeof(XF86VideoAdaptorPtr*));
-	    if(newAdaptors) {
-		memcpy(newAdaptors, adaptors, num_adaptors * 
-					sizeof(XF86VideoAdaptorPtr));
-		newAdaptors[num_adaptors] = newAdaptor;
+	    newAdaptors =
+		realloc(adaptors,
+			(num_adaptors + 1) * sizeof(XF86VideoAdaptorPtr));
+	    if (newAdaptors != NULL) {
+		newAdaptors[num_adaptors++] = newAdaptor;
 		adaptors = newAdaptors;
-		num_adaptors++;
 	    }
 	}
     }
 
-    if(num_adaptors)
-        xf86XVScreenInit(screen, adaptors, num_adaptors);
+    if (num_adaptors)
+	xf86XVScreenInit(screen, adaptors, num_adaptors);
 
-    if(newAdaptors)
-	free(newAdaptors);
+    free(adaptors);
 }
 
 /* *INDENT-OFF* */
commit 261d086265fff6c9b28a67d2fcceed8f107d5cb0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    legacy/i810/dri: Propagate failure from allocating texture memory
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 2b3f1e1..28a3b91 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -809,7 +809,12 @@ I810DRIScreenInit(ScreenPtr pScreen)
       return FALSE;
    }
 
-   I810AllocLow(&(pI810->TexMem), &(pI810->SysMem), pI810DRI->textureSize);
+   if (!I810AllocLow(&(pI810->TexMem), &(pI810->SysMem), pI810DRI->textureSize)) {
+      xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+		 "[agp] Texure memory allocation failed\n");
+      DRICloseScreen(pScreen);
+      return FALSE;
+   }
 
    if (drmAddMap(pI810->drmSubFD, (drm_handle_t) pI810->TexMem.Start,
 		 pI810->TexMem.Size, DRM_AGP, 0, 
commit 2ea12b56d42686f75deb33fc126d09acee7e2899
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    legacy/i810: Fix memset(sizeof(*ptr)) rather than memset(sizeof(ptr))
    
    Clear the entire structure and not the first 4 bytes...
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 2f02dd9..2b3f1e1 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -959,7 +959,7 @@ I810DRIFinishScreenInit(ScreenPtr pScreen)
    ScrnInfoPtr        pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr info  = I810PTR(pScrn);
 
-   memset(sPriv, 0, sizeof(sPriv));
+   memset(sPriv, 0, sizeof(*sPriv));
 
    /* Have shadow run only while there is 3d active.
     */
commit d3e15d1460faad192781d74fdc2b5092e9c148ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 08:14:23 2012 +0100

    uxa/i965: Make the unhandled allocation failures explicit
    
    Add assertions to the point of allocation and write failures.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index f9d3158..508a846 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -778,13 +778,16 @@ static drm_intel_bo *gen4_create_sf_state(intel_screen_private *intel,
 {
 	struct brw_sf_unit_state *sf_state;
 	drm_intel_bo *sf_state_bo;
+	int ret;
 
 	sf_state_bo = drm_intel_bo_alloc(intel->bufmgr, "gen4 SF state",
 					 sizeof(*sf_state), 4096);
-	drm_intel_bo_map(sf_state_bo, TRUE);
-	sf_state = sf_state_bo->virtual;
+	assert(sf_state_bo);
 
-	memset(sf_state, 0, sizeof(*sf_state));
+	ret = drm_intel_bo_map(sf_state_bo, TRUE);
+	assert(ret == 0);
+
+	sf_state = memset(sf_state_bo->virtual, 0, sizeof(*sf_state));
 	sf_state->thread0.grf_reg_count = BRW_GRF_BLOCKS(SF_KERNEL_NUM_GRF);
 	sf_state->thread0.kernel_start_pointer =
 	    intel_emit_reloc(sf_state_bo,
@@ -820,6 +823,7 @@ static drm_intel_bo *gen4_create_sf_state(intel_screen_private *intel,
 	drm_intel_bo_unmap(sf_state_bo);
 
 	return sf_state_bo;
+	(void)ret;
 }
 
 static drm_intel_bo *sampler_border_color_create(intel_screen_private *intel)
@@ -982,11 +986,16 @@ static drm_intel_bo *gen4_create_sampler_state(intel_screen_private *intel,
 {
 	drm_intel_bo *sampler_state_bo;
 	struct brw_sampler_state *sampler_state;
+	int ret;
 
 	sampler_state_bo =
 	    drm_intel_bo_alloc(intel->bufmgr, "gen4 sampler state",
 			       sizeof(struct brw_sampler_state) * 2, 4096);
-	drm_intel_bo_map(sampler_state_bo, TRUE);
+	assert(sampler_state_bo);
+
+	ret = drm_intel_bo_map(sampler_state_bo, TRUE);
+	assert(ret == 0);
+
 	sampler_state = sampler_state_bo->virtual;
 
 	gen4_sampler_state_init(sampler_state_bo,
@@ -999,6 +1008,7 @@ static drm_intel_bo *gen4_create_sampler_state(intel_screen_private *intel,
 	drm_intel_bo_unmap(sampler_state_bo);
 
 	return sampler_state_bo;
+	(void)ret;
 }
 
 static drm_intel_bo *
@@ -1011,11 +1021,16 @@ gen7_create_sampler_state(intel_screen_private *intel,
 {
 	drm_intel_bo *sampler_state_bo;
 	struct gen7_sampler_state *sampler_state;
+	int ret;
 
 	sampler_state_bo =
 	    drm_intel_bo_alloc(intel->bufmgr, "gen7 sampler state",
 			       sizeof(struct gen7_sampler_state) * 2, 4096);
-	drm_intel_bo_map(sampler_state_bo, TRUE);
+	assert(sampler_state_bo);
+
+	ret = drm_intel_bo_map(sampler_state_bo, TRUE);
+	assert(ret == 0);
+
 	sampler_state = sampler_state_bo->virtual;
 
 	gen7_sampler_state_init(sampler_state_bo,
@@ -1028,6 +1043,7 @@ gen7_create_sampler_state(intel_screen_private *intel,
 	drm_intel_bo_unmap(sampler_state_bo);
 
 	return sampler_state_bo;
+	(void)ret;
 }
 
 static inline drm_intel_bo *
@@ -1096,13 +1112,16 @@ static drm_intel_bo *gen4_create_wm_state(intel_screen_private *intel,
 {
 	struct brw_wm_unit_state *state;
 	drm_intel_bo *wm_state_bo;
+	int ret;
 
 	wm_state_bo = drm_intel_bo_alloc(intel->bufmgr, "gen4 WM state",
 					 sizeof(*state), 4096);
-	drm_intel_bo_map(wm_state_bo, TRUE);
-	state = wm_state_bo->virtual;
+	assert(wm_state_bo);
 
-	memset(state, 0, sizeof(*state));
+	ret = drm_intel_bo_map(wm_state_bo, TRUE);
+	assert(ret == 0);
+
+	state = memset(wm_state_bo->virtual, 0, sizeof(*state));
 	state->thread0.grf_reg_count = BRW_GRF_BLOCKS(PS_KERNEL_NUM_GRF);
 	state->thread0.kernel_start_pointer =
 	    intel_emit_reloc(wm_state_bo,
@@ -1162,21 +1181,27 @@ static drm_intel_bo *gen4_create_wm_state(intel_screen_private *intel,
 	drm_intel_bo_unmap(wm_state_bo);
 
 	return wm_state_bo;
+	(void)ret;
 }
 
 static drm_intel_bo *gen4_create_cc_viewport(intel_screen_private *intel)
 {
 	drm_intel_bo *bo;
 	struct brw_cc_viewport vp;
+	int ret;
 
 	vp.min_depth = -1.e35;
 	vp.max_depth = 1.e35;
 
 	bo = drm_intel_bo_alloc(intel->bufmgr, "gen4 render unit state",
 				sizeof(vp), 4096);
-	drm_intel_bo_subdata(bo, 0, sizeof(vp), &vp);
+	assert(bo);
+
+	ret = drm_intel_bo_subdata(bo, 0, sizeof(vp), &vp);
+	assert(ret == 0);
 
 	return bo;
+	(void)ret;
 }
 
 static drm_intel_bo *gen4_create_vs_unit_state(intel_screen_private *intel)
@@ -1204,14 +1229,18 @@ static drm_intel_bo *gen4_create_vs_unit_state(intel_screen_private *intel)
 static drm_intel_bo *gen4_create_cc_unit_state(intel_screen_private *intel)
 {
 	drm_intel_bo *cc_state_bo, *cc_vp_bo;
-	int i, j;
+	int i, j, ret;
 
 	cc_vp_bo = gen4_create_cc_viewport(intel);
 
 	cc_state_bo = drm_intel_bo_alloc(intel->bufmgr, "gen4 CC state",
 					 sizeof(struct gen4_cc_unit_state),
 					 4096);
-	drm_intel_bo_map(cc_state_bo, TRUE);
+	assert(cc_state_bo);
+
+	ret = drm_intel_bo_map(cc_state_bo, TRUE);
+	assert(ret == 0);
+
 	for (i = 0; i < BRW_BLENDFACTOR_COUNT; i++) {
 		for (j = 0; j < BRW_BLENDFACTOR_COUNT; j++) {
 			cc_state_init(cc_state_bo,
@@ -1225,6 +1254,7 @@ static drm_intel_bo *gen4_create_cc_unit_state(intel_screen_private *intel)
 	drm_intel_bo_unreference(cc_vp_bo);
 
 	return cc_state_bo;
+	(void)ret;
 }
 
 static uint32_t i965_get_card_format(PicturePtr picture)
@@ -1724,9 +1754,12 @@ static Bool i965_composite_check_aperture(intel_screen_private *intel)
 
 static void i965_surface_flush(struct intel_screen_private *intel)
 {
-	drm_intel_bo_subdata(intel->surface_bo,
-			     0, intel->surface_used,
-			     intel->surface_data);
+	int ret;
+
+	ret = drm_intel_bo_subdata(intel->surface_bo,
+				   0, intel->surface_used,
+				   intel->surface_data);
+	assert(ret == 0);
 	intel->surface_used = 0;
 
 	assert (intel->surface_reloc != 0);
@@ -1740,6 +1773,10 @@ static void i965_surface_flush(struct intel_screen_private *intel)
 	intel->surface_bo =
 		drm_intel_bo_alloc(intel->bufmgr, "surface data",
 				   sizeof(intel->surface_data), 4096);
+	assert(intel->surface_bo);
+
+	return;
+	(void)ret;
 }
 
 static void
@@ -2297,6 +2334,8 @@ void gen4_render_state_init(ScrnInfoPtr scrn)
 	intel->surface_bo =
 		drm_intel_bo_alloc(intel->bufmgr, "surface data",
 				   sizeof(intel->surface_data), 4096);
+	assert(intel->surface_bo);
+
 	intel->surface_used = 0;
 
 	if (intel->gen4_render_state == NULL)
@@ -2433,14 +2472,18 @@ gen6_composite_create_cc_state(intel_screen_private *intel)
 {
 	struct gen6_color_calc_state *state;
 	drm_intel_bo *cc_bo;
+	int ret;
 
 	cc_bo = drm_intel_bo_alloc(intel->bufmgr,
 				"gen6 CC state",
 				sizeof(*state),
 				4096);
-	drm_intel_bo_map(cc_bo, TRUE);
-	state = cc_bo->virtual;
-	memset(state, 0, sizeof(*state));
+	assert(cc_bo);
+
+	ret = drm_intel_bo_map(cc_bo, TRUE);
+	assert(ret == 0);
+
+	state = memset(cc_bo->virtual, 0, sizeof(*state));
 	state->constant_r = 1.0;
 	state->constant_g = 0.0;
 	state->constant_b = 1.0;
@@ -2448,21 +2491,25 @@ gen6_composite_create_cc_state(intel_screen_private *intel)
 	drm_intel_bo_unmap(cc_bo);
 
 	return cc_bo;
+	(void)ret;
 }
 
 static drm_intel_bo *
 gen6_composite_create_blend_state(intel_screen_private *intel)
 {
 	drm_intel_bo *blend_bo;
-	int src, dst;
+	int src, dst, ret;
 
 	blend_bo = drm_intel_bo_alloc(intel->bufmgr,
 				"gen6 BLEND state",
 				BRW_BLENDFACTOR_COUNT * BRW_BLENDFACTOR_COUNT * GEN6_BLEND_STATE_PADDED_SIZE,
 				4096);
-	drm_intel_bo_map(blend_bo, TRUE);
-	memset(blend_bo->virtual, 0, blend_bo->size);
+	assert(blend_bo);
 
+	ret = drm_intel_bo_map(blend_bo, TRUE);
+	assert(ret == 0);
+
+	memset(blend_bo->virtual, 0, blend_bo->size);
 	for (src = 0; src < BRW_BLENDFACTOR_COUNT; src++) {
 		for (dst = 0; dst < BRW_BLENDFACTOR_COUNT; dst++) {
 			uint32_t blend_state_offset = (src * BRW_BLENDFACTOR_COUNT + dst) * GEN6_BLEND_STATE_PADDED_SIZE;
@@ -2481,6 +2528,7 @@ gen6_composite_create_blend_state(intel_screen_private *intel)
 
 	drm_intel_bo_unmap(blend_bo);
 	return blend_bo;
+	(void)ret;
 }
 
 static drm_intel_bo *
@@ -2488,17 +2536,22 @@ gen6_composite_create_depth_stencil_state(intel_screen_private *intel)
 {
 	struct gen6_depth_stencil_state *state;
 	drm_intel_bo *depth_stencil_bo;
+	int ret;
 
 	depth_stencil_bo = drm_intel_bo_alloc(intel->bufmgr,
 					"gen6 DEPTH_STENCIL state",
 					sizeof(*state),
 					4096);
-	drm_intel_bo_map(depth_stencil_bo, TRUE);
-	state = depth_stencil_bo->virtual;
-	memset(state, 0, sizeof(*state));
+	assert(depth_stencil_bo);
+
+	ret = drm_intel_bo_map(depth_stencil_bo, TRUE);
+	assert(ret == 0);
+
+	state = memset(depth_stencil_bo->virtual, 0, sizeof(*state));
 	drm_intel_bo_unmap(depth_stencil_bo);
 
 	return depth_stencil_bo;
+	(void)ret;
 }
 
 static void
diff --git a/src/intel.h b/src/intel.h
index 9e9de04..20d8282 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -601,11 +601,16 @@ static inline drm_intel_bo *intel_bo_alloc_for_data(intel_screen_private *intel,
 						    const char *name)
 {
 	drm_intel_bo *bo;
+	int ret;
 
 	bo = drm_intel_bo_alloc(intel->bufmgr, name, size, 4096);
-	if (bo)
-		drm_intel_bo_subdata(bo, 0, size, data);
+	assert(bo);
+
+	ret = drm_intel_bo_subdata(bo, 0, size, data);
+	assert(ret == 0);
+
 	return bo;
+	(void)ret;
 }
 
 void intel_debug_flush(ScrnInfoPtr scrn);
commit 46fbb3d31bf73df4bd0259a146e9929f62a9c488
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    uxa: NameForAtom may return NULL
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 89f7259..609cbf7 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1176,6 +1176,8 @@ intel_output_set_property(xf86OutputPtr output, Atom property,
 				return FALSE;
 			memcpy(&atom, value->data, 4);
 			name = NameForAtom(atom);
+			if (name == NULL)
+				return FALSE;
 
 			/* search for matching name string, then set its value down */
 			for (j = 0; j < p->mode_prop->count_enums; j++) {
commit 8451d5dd3c992bcfad9f71aa890b6b225884b978
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    uxa: Use unsigned bitfields for single bits
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index 4bdeab3..9e9de04 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -88,9 +88,9 @@ struct intel_pixmap {
 	uint16_t stride;
 	uint8_t tiling;
 	int8_t busy :2;
-	int8_t dirty :1;
-	int8_t offscreen :1;
-	int8_t pinned :1;
+	uint8_t dirty :1;
+	uint8_t offscreen :1;
+	uint8_t pinned :1;
 };
 
 #if HAS_DEVPRIVATEKEYREC
commit d66ca06865973833398560a04186e0067d9cb206
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    uxa: Intialize is_glamor_pixmap
    
    It is not clear whether is_glamor_pixmap is always initialized prior to
    use, so set it to a safe^W likely value.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 2a95edb..389ecdd 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -263,8 +263,8 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 	DRI2BufferPtr buffers;
 	I830DRI2BufferPrivatePtr privates;
 	PixmapPtr pixmap, pDepthPixmap;
+	int is_glamor_pixmap = FALSE;
 	int i;
-	int is_glamor_pixmap;
 
 	buffers = calloc(count, sizeof *buffers);
 	if (buffers == NULL)
commit 25a17b2ca5e3945150151c4308761891c0e20bab
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    uxa: Remove 'render_source_is_solid' dead code
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i965_render.c b/src/i965_render.c
index 98231b8..f9d3158 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -1847,47 +1847,45 @@ i965_emit_composite_primitive(intel_screen_private *intel,
 	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
 	Bool is_affine = intel->gen4_render_state->composite_op.is_affine;
 
-	if (! intel->render_source_is_solid) {
-		if (is_affine) {
-			if (!intel_get_transformed_coordinates(srcX, srcY,
-							      intel->transform[0],
-							      &src_x[0],
-							      &src_y[0]))
-				return;
-
-			if (!intel_get_transformed_coordinates(srcX, srcY + h,
-							      intel->transform[0],
-							      &src_x[1],
-							      &src_y[1]))
-				return;
-
-			if (!intel_get_transformed_coordinates(srcX + w, srcY + h,
-							      intel->transform[0],
-							      &src_x[2],
-							      &src_y[2]))
-				return;
-		} else {
-			if (!intel_get_transformed_coordinates_3d(srcX, srcY,
-								 intel->transform[0],
-								 &src_x[0],
-								 &src_y[0],
-								 &src_w[0]))
-				return;
-
-			if (!intel_get_transformed_coordinates_3d(srcX, srcY + h,
-								 intel->transform[0],
-								 &src_x[1],
-								 &src_y[1],
-								 &src_w[1]))
-				return;
-
-			if (!intel_get_transformed_coordinates_3d(srcX + w, srcY + h,
-								 intel->transform[0],
-								 &src_x[2],
-								 &src_y[2],
-								 &src_w[2]))
-				return;
-		}
+	if (is_affine) {
+		if (!intel_get_transformed_coordinates(srcX, srcY,
+						       intel->transform[0],
+						       &src_x[0],
+						       &src_y[0]))
+			return;
+
+		if (!intel_get_transformed_coordinates(srcX, srcY + h,
+						       intel->transform[0],
+						       &src_x[1],
+						       &src_y[1]))
+			return;
+
+		if (!intel_get_transformed_coordinates(srcX + w, srcY + h,
+						       intel->transform[0],
+						       &src_x[2],
+						       &src_y[2]))
+			return;
+	} else {
+		if (!intel_get_transformed_coordinates_3d(srcX, srcY,
+							  intel->transform[0],
+							  &src_x[0],
+							  &src_y[0],
+							  &src_w[0]))
+			return;
+
+		if (!intel_get_transformed_coordinates_3d(srcX, srcY + h,
+							  intel->transform[0],
+							  &src_x[1],
+							  &src_y[1],
+							  &src_w[1]))
+			return;
+
+		if (!intel_get_transformed_coordinates_3d(srcX + w, srcY + h,
+							  intel->transform[0],
+							  &src_x[2],
+							  &src_y[2],
+							  &src_w[2]))
+			return;
 	}
 
 	if (intel->render_mask) {
diff --git a/src/intel.h b/src/intel.h
index 253a6bf..4bdeab3 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -268,8 +268,6 @@ typedef struct intel_screen_private {
 
 	PixmapPtr render_source, render_mask, render_dest;
 	PicturePtr render_source_picture, render_mask_picture, render_dest_picture;
-	CARD32 render_source_solid;
-	Bool render_source_is_solid;
 	Bool needs_3d_invariant;
 	Bool needs_render_state_emit;
 	Bool needs_render_vertex_emit;
commit ba0eb230836fa5f94a2f50da5880fdd3b9dfd72e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    uxa: Fix reallocation of XVAdaptors array
    
    Prevent the leak and remove some unsightly code in the process.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_video.c b/src/intel_video.c
index 83d1eab..09674e5 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -337,13 +337,12 @@ void I830InitVideo(ScreenPtr screen)
 	/* Give our adaptor list enough space for the overlay and/or texture video
 	 * adaptors.
 	 */
-	newAdaptors =
-	    malloc((num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr *));
-	if (newAdaptors == NULL)
+	newAdaptors = realloc(adaptors,
+			      (num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr));
+	if (newAdaptors == NULL) {
+		free(adaptors);
 		return;
-
-	memcpy(newAdaptors, adaptors,
-	       num_adaptors * sizeof(XF86VideoAdaptorPtr));
+	}
 	adaptors = newAdaptors;
 
 	/* Add the adaptors supported by our hardware.  First, set up the atoms
commit affb1f82c1401bac3f54654a342e3b16b8bd374b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 08:27:59 2012 +0100

    sna/gradient: Reuse old gradient bo if allocation of new fails
    
    Prefer a stall to a crash.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 0fda6ce..9ac0328 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -235,6 +235,7 @@ static void
 sna_render_finish_solid(struct sna *sna, bool force)
 {
 	struct sna_solid_cache *cache = &sna->render.solid_cache;
+	struct kgem_bo *old;
 	int i;
 
 	DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
@@ -253,16 +254,25 @@ sna_render_finish_solid(struct sna *sna, bool force)
 		kgem_bo_destroy(&sna->kgem, cache->bo[i]);
 		cache->bo[i] = NULL;
 	}
-	kgem_bo_destroy(&sna->kgem, cache->cache_bo);
+
+	old = cache->cache_bo;
 
 	DBG(("sna_render_finish_solid reset\n"));
 
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
+	if (cache->cache_bo == NULL) {
+		cache->cache_bo = old;
+		old = NULL;
+	}
+
 	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	if (force)
 		cache->size = 1;
+
+	if (old)
+		kgem_bo_destroy(&sna->kgem, old);
 }
 
 struct kgem_bo *
commit e51f984a4c15de42c5bee3aeacee553e2dd4eca0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jun 2 08:23:35 2012 +0100

    sna: Check gradient allocations during init
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index a52cfb5..0fda6ce 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -336,10 +336,12 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 						 cache->cache_bo,
 						 sizeof(uint32_t)*i,
 						 sizeof(uint32_t));
+		if (cache->bo[i] == NULL)
+			return FALSE;
+
 		cache->bo[i]->pitch = 4;
 	}
-	kgem_bo_write(&sna->kgem, cache->cache_bo, color, sizeof(color));
-	return TRUE;
+	return kgem_bo_write(&sna->kgem, cache->cache_bo, color, sizeof(color));
 }
 
 static Bool sna_solid_cache_init(struct sna *sna)
@@ -360,6 +362,9 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	cache->color[0] = 0xffffffff;
 	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 0, sizeof(uint32_t));
+	if (cache->bo[0] == NULL)
+		return FALSE;
+
 	cache->bo[0]->pitch = 4;
 	cache->dirty = 1;
 	cache->size = 1;
commit 2c5647a18e7856b604bce70270463fc128c2cfcd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    sna: NameForAtom may return NULL
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 5ed3179..9fd59f8 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1509,6 +1509,8 @@ sna_output_set_property(xf86OutputPtr output, Atom property,
 				return FALSE;
 			memcpy(&atom, value->data, 4);
 			name = NameForAtom(atom);
+			if (name == NULL)
+				return FALSE;
 
 			/* search for matching name string, then set its value down */
 			for (j = 0; j < p->mode_prop->count_enums; j++) {
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index bc117a4..33f3f71 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -387,7 +387,7 @@ XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
 		return NULL;
 	}
 
-	if (sna->kgem.wedged) {
+	if (wedged(sna)) {
 		xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
 			   "cannot enable XVideo whilst the GPU is wedged\n");
 		return FALSE;
commit 7867bff00caeff9e1fbe569ca3d37b94f704a4cd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    sna: Replace invalid signed value with unsigned ~0 for uint8_t
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 553cea8..f982b41 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2916,7 +2916,7 @@ gen4_render_fill_boxes(struct sna *sna,
 	    (prefer_blt(sna) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen4_check_dst_format(format))) {
-		uint8_t alu = -1;
+		uint8_t alu = ~0;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -2929,7 +2929,7 @@ gen4_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != -1 &&
+		if (alu != ~0 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 8b970f4..56db4a0 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3250,7 +3250,7 @@ gen5_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen5_check_dst_format(format))) {
-		uint8_t alu = -1;
+		uint8_t alu = ~0;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3263,7 +3263,7 @@ gen5_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != -1 &&
+		if (alu != ~0 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index df2eeb2..cc6cc50 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3661,7 +3661,7 @@ gen6_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna, dst_bo) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen6_check_dst_format(format))) {
-		uint8_t alu = -1;
+		uint8_t alu = ~0;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3674,7 +3674,7 @@ gen6_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != -1 &&
+		if (alu != ~0 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 6bf25d3..2ee5b6f 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3743,7 +3743,7 @@ gen7_render_fill_boxes(struct sna *sna,
 	    (prefer_blt_fill(sna, dst_bo) ||
 	     too_large(dst->drawable.width, dst->drawable.height) ||
 	     !gen7_check_dst_format(format))) {
-		uint8_t alu = -1;
+		uint8_t alu = ~0;
 
 		pixel = 0;
 		if (op == PictOpClear)
@@ -3756,7 +3756,7 @@ gen7_render_fill_boxes(struct sna *sna,
 						 format))
 			alu = GXcopy;
 
-		if (alu != -1 &&
+		if (alu != ~0 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
commit ca72b372237287af0f4475801a9b64efca975832
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 23:22:31 2012 +0100

    sna: Drop return value from 3D point transform
    
    And just fixup the computed coordinates in the face of an invalid
    matrix.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index dd66a46..553cea8 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -980,29 +980,24 @@ gen4_emit_composite_primitive(struct sna *sna,
 						&src_x[2],
 						&src_y[2]);
 	} else {
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1],
-							op->src.transform,
-							&src_x[0],
-							&src_y[0],
-							&src_w[0]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[1],
-							&src_y[1],
-							&src_w[1]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[2],
-							&src_y[2],
-							&src_w[2]))
-			return;
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
+						   r->src.y + op->src.offset[1],
+						   op->src.transform,
+						   &src_x[0],
+						   &src_y[0],
+						   &src_w[0]);
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
+						   r->src.y + op->src.offset[1] + r->height,
+						   op->src.transform,
+						   &src_x[1],
+						   &src_y[1],
+						   &src_w[1]);
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
+						   r->src.y + op->src.offset[1] + r->height,
+						   op->src.transform,
+						   &src_x[2],
+						   &src_y[2],
+						   &src_w[2]);
 	}
 
 	if (op->mask.bo) {
@@ -1025,29 +1020,24 @@ gen4_emit_composite_primitive(struct sna *sna,
 							&mask_x[2],
 							&mask_y[2]);
 		} else {
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1],
-								op->mask.transform,
-								&mask_x[0],
-								&mask_y[0],
-								&mask_w[0]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[1],
-								&mask_y[1],
-								&mask_w[1]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[2],
-								&mask_y[2],
-								&mask_w[2]))
-				return;
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
+							   r->mask.y + op->mask.offset[1],
+							   op->mask.transform,
+							   &mask_x[0],
+							   &mask_y[0],
+							   &mask_w[0]);
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
+							   r->mask.y + op->mask.offset[1] + r->height,
+							   op->mask.transform,
+							   &mask_x[1],
+							   &mask_y[1],
+							   &mask_w[1]);
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
+							   r->mask.y + op->mask.offset[1] + r->height,
+							   op->mask.transform,
+							   &mask_x[2],
+							   &mask_y[2],
+							   &mask_w[2]);
 		}
 	}
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 9c30041..8b970f4 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -983,29 +983,24 @@ gen5_emit_composite_primitive(struct sna *sna,
 						&src_x[2],
 						&src_y[2]);
 	} else {
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1],
-							op->src.transform,
-							&src_x[0],
-							&src_y[0],
-							&src_w[0]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[1],
-							&src_y[1],
-							&src_w[1]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[2],
-							&src_y[2],
-							&src_w[2]))
-			return;
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
+						   r->src.y + op->src.offset[1],
+						   op->src.transform,
+						   &src_x[0],
+						   &src_y[0],
+						   &src_w[0]);
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
+						   r->src.y + op->src.offset[1] + r->height,
+						   op->src.transform,
+						   &src_x[1],
+						   &src_y[1],
+						   &src_w[1]);
+		sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
+						   r->src.y + op->src.offset[1] + r->height,
+						   op->src.transform,
+						   &src_x[2],
+						   &src_y[2],
+						   &src_w[2]);
 	}
 
 	if (op->mask.bo) {
@@ -1028,29 +1023,25 @@ gen5_emit_composite_primitive(struct sna *sna,
 							&mask_x[2],
 							&mask_y[2]);
 		} else {
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1],
-								op->mask.transform,
-								&mask_x[0],
-								&mask_y[0],
-								&mask_w[0]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[1],
-								&mask_y[1],
-								&mask_w[1]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[2],
-								&mask_y[2],
-								&mask_w[2]))
-				return;
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
+							   r->mask.y + op->mask.offset[1],
+							   op->mask.transform,
+							   &mask_x[0],
+							   &mask_y[0],
+							   &mask_w[0]);
+
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
+							   r->mask.y + op->mask.offset[1] + r->height,
+							   op->mask.transform,
+							   &mask_x[1],
+							   &mask_y[1],
+							   &mask_w[1]);
+			sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
+							   r->mask.y + op->mask.offset[1] + r->height,
+							   op->mask.transform,
+							   &mask_x[2],
+							   &mask_y[2],
+							   &mask_w[2]);
 		}
 	}
 
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d9fd9d1..df7f42f 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -500,7 +500,7 @@ sna_get_transformed_coordinates(int x, int y,
 				const PictTransform *transform,
 				float *x_out, float *y_out);
 
-Bool
+void
 sna_get_transformed_coordinates_3d(int x, int y,
 				   const PictTransform *transform,
 				   float *x_out, float *y_out, float *z_out);
diff --git a/src/sna/sna_transform.c b/src/sna/sna_transform.c
index 644d404..54852b1 100644
--- a/src/sna/sna_transform.c
+++ b/src/sna/sna_transform.c
@@ -114,7 +114,7 @@ sna_get_transformed_coordinates(int x, int y,
 /**
  * Returns the un-normalized floating-point coordinates transformed by the given transform.
  */
-Bool
+void
 sna_get_transformed_coordinates_3d(int x, int y,
 				   const PictTransform *transform,
 				   float *x_out, float *y_out, float *w_out)
@@ -126,13 +126,13 @@ sna_get_transformed_coordinates_3d(int x, int y,
 	} else {
 		int64_t result[3];
 
-		if (!_sna_transform_point(transform, x, y, result))
-			return FALSE;
-
-		*x_out = result[0] / 65536.;
-		*y_out = result[1] / 65536.;
-		*w_out = result[2] / 65536.;
+		if (_sna_transform_point(transform, x, y, result)) {
+			*x_out = result[0] / 65536.;
+			*y_out = result[1] / 65536.;
+			*w_out = result[2] / 65536.;
+		} else {
+			*x_out = *y_out = 0;
+			*w_out = 1.;
+		}
 	}
-
-	return TRUE;
 }
commit b83e2a79f2a082630e0d3147abec8d3599f3f4b4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:17:49 2012 +0100

    uxa: Static analysis warning fixes
    
    A smattering of bugs and confusing code.
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index 5605edf..0ad991a 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -665,7 +665,7 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 	drm_intel_bo *bo_table[] = {
 		NULL,		/* batch_bo */
 		intel_get_pixmap_bo(dest),
-		source ? intel_get_pixmap_bo(source) : NULL,
+		intel_get_pixmap_bo(source),
 		mask ? intel_get_pixmap_bo(mask) : NULL,
 	};
 	int tex_unit = 0;
diff --git a/src/intel_dri.c b/src/intel_dri.c
index 3261e54..2a95edb 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1034,9 +1034,6 @@ can_exchange(DrawablePtr drawable, DRI2BufferPtr front, DRI2BufferPtr back)
 	struct intel_pixmap *front_intel = intel_get_pixmap_private(front_pixmap);
 	struct intel_pixmap *back_intel = intel_get_pixmap_private(back_pixmap);
 
-	if (drawable == NULL)
-		return FALSE;
-
 	if (!DRI2CanFlip(drawable))
 		return FALSE;
 
@@ -1275,7 +1272,7 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	swap_info->event_data = data;
 	swap_info->front = front;
 	swap_info->back = back;
-	swap_info->pipe = I830DRI2DrawablePipe(draw);
+	swap_info->pipe = pipe;
 
 	if (!i830_dri2_add_frame_event(swap_info)) {
 	    free(swap_info);
commit 67e8ac365c4b7dd9c4eb2abe46aecc8cc04d4278
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:20:51 2012 +0100

    sna/gen3+: Guard against a kgem_bo_destroy(NULL)
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 8cafeb8..d8d1e09 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1651,7 +1651,8 @@ static int gen3_vertex_finish(struct sna *sna)
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		if (sna->render.vbo)
+			kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		return 0;
 	}
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 8ab707f..dd66a46 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -401,7 +401,8 @@ static int gen4_vertex_finish(struct sna *sna)
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		if (sna->render.vbo)
+			kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		return 0;
 	}
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 68c1bb7..9c30041 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -393,7 +393,8 @@ static int gen5_vertex_finish(struct sna *sna)
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		if (sna->render.vbo)
+			kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		return 0;
 	}
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d9068de..df2eeb2 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -967,7 +967,8 @@ static int gen6_vertex_finish(struct sna *sna)
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		if (sna->render.vbo)
+			kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		return 0;
 	}
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 3f31437..6bf25d3 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1105,7 +1105,8 @@ static int gen7_vertex_finish(struct sna *sna)
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		if (sna->render.vbo)
+			kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
 		return 0;
 	}
commit e91b27ae14a5366175a4fe9c87ee15a0a25b5a42
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:12:36 2012 +0100

    sna: Fix potential deference of NULL cpu_bo inside assertion
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7eeb7af..7b7190d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2486,7 +2486,7 @@ done:
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
 		if (priv->ptr) {
-			assert(!priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
 		}
 	}
commit 048161e69612324d6c077ec487a2a64aaaa7c1f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:09:21 2012 +0100

    sna/trapezoids: Check for malloc failure
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c7f6671..d65c249 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2598,8 +2598,11 @@ composite_aligned_boxes(struct sna *sna,
 	DBG(("%s\n", __FUNCTION__));
 
 	boxes = stack_boxes;
-	if (ntrap > (int)ARRAY_SIZE(stack_boxes))
+	if (ntrap > (int)ARRAY_SIZE(stack_boxes)) {
 		boxes = malloc(sizeof(BoxRec)*ntrap);
+		if (boxes == NULL)
+			return false;
+	}
 
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
commit 2896aa7b46931d3b95567121ccb07844725ab45c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:08:01 2012 +0100

    sna/io: Free the heap clip boxes on error paths
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3f39de5..0d6fbbb 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -261,14 +261,19 @@ fallback:
 								       tmp.drawable.bitsPerPixel,
 								       KGEM_BUFFER_LAST,
 								       &ptr);
-					if (!dst_bo)
+					if (!dst_bo) {
+						if (clipped != stack)
+							free(clipped);
 						goto fallback;
+					}
 
 					if (!sna->render.copy_boxes(sna, GXcopy,
 								    dst, src_bo, src_dx, src_dy,
 								    &tmp, dst_bo, -tile.x1, -tile.y1,
 								    clipped, c-clipped)) {
 						kgem_bo_destroy(&sna->kgem, dst_bo);
+						if (clipped != stack)
+							free(clipped);
 						goto fallback;
 					}
 
commit 1ef00ff720226c211444806d9fdf67ce2046685b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 17:05:55 2012 +0100

    sna/dri: Fix typo, check for NULL after allocations to spot failure!
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index c74ecc4..c26c128 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1369,7 +1369,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		}
 
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
-		if (!info)
+		if (info == NULL)
 			return FALSE;
 
 		info->type = type;
@@ -1415,7 +1415,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 				 info->event_data);
 	} else {
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
-		if (info)
+		if (info == NULL)
 			return FALSE;
 
 		info->sna = sna;
commit dc0fe44bebfdee63bcda4d185b85fe9e1cb16a71
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 16:54:41 2012 +0100

    sna: Fix a smattering of static analysis warnings
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ee18ebe..8cafeb8 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2477,7 +2477,6 @@ gen3_composite_set_target(struct sna *sna,
 	op->dst.format = dst->format;
 	op->dst.width = op->dst.pixmap->drawable.width;
 	op->dst.height = op->dst.pixmap->drawable.height;
-	priv = sna_pixmap(op->dst.pixmap);
 
 	op->dst.bo = NULL;
 	priv = sna_pixmap(op->dst.pixmap);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 0090639..7eeb7af 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3277,10 +3277,10 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 	if (priv == NULL) {
 		DBG(("%s: fbPutImage, unattached(%d, %d, %d, %d)\n",
 		     __FUNCTION__, x, y, w, h));
-		if (!sna_gc_move_to_cpu(gc, drawable))
-			goto out;
-
-		fbPutImage(drawable, gc, depth, x, y, w, h, left, format, bits);
+		if (sna_gc_move_to_cpu(gc, drawable))
+			fbPutImage(drawable, gc, depth,
+				   x, y, w, h, left,
+				   format, bits);
 		return;
 	}
 
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index ce1e284..6999548 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -174,7 +174,7 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 	ret = xf86XVClipVideoHelper(dst, &x1, &x2, &y1, &y2,
 				    crtc_region, frame->width, frame->height);
 	if (crtc_region != reg)
-		RegionUninit(&crtc_region_local);
+		RegionUninit(crtc_region);
 
 	frame->top = y1 >> 16;
 	frame->left = (x1 >> 16) & ~1;
@@ -539,13 +539,12 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 		return;
 
 	num_adaptors = xf86XVListGenericAdaptors(sna->scrn, &adaptors);
-	newAdaptors =
-	    malloc((num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr *));
-	if (newAdaptors == NULL)
+	newAdaptors = realloc(adaptors,
+			      (num_adaptors + 2) * sizeof(XF86VideoAdaptorPtr));
+	if (newAdaptors == NULL) {
+		free(adaptors);
 		return;
-
-	memcpy(newAdaptors, adaptors,
-	       num_adaptors * sizeof(XF86VideoAdaptorPtr));
+	}
 	adaptors = newAdaptors;
 
 	/* Set up textured video if we can do it at this depth and we are on
commit 8eed569fb386a9af48a8beb28666d72c6678e48c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 13:02:50 2012 +0100

    sna/trapezoids: Correct extents declaration for fallback
    
    Reported-by: Zdenek Kabelac <zkabelac at redhat.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 0fd1b03..c7f6671 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4428,7 +4428,7 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		region.extents.x1 = dst_x + dst->pDrawable->x;
 		region.extents.y1 = dst_y + dst->pDrawable->y;
 		region.extents.x2 = region.extents.x1 + extents.x2;
-		region.extents.y2 = region.extents.y2 + extents.y2;
+		region.extents.y2 = region.extents.y1 + extents.y2;
 		region.data = NULL;
 
 		DBG(("%s: move-to-cpu\n", __FUNCTION__));
commit 91419576eef562378cccf90968c4f0277139b03d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 12:09:19 2012 +0100

    sna: Tiny DBG message tweak
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bdaf0b5..0090639 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10791,7 +10791,7 @@ static inline bool sna_font_too_large(FontPtr font)
 	int top = max(FONTMAXBOUNDS(font, ascent), FONTASCENT(font));
 	int bot = max(FONTMAXBOUNDS(font, descent), FONTDESCENT(font));
 	int width = max(FONTMAXBOUNDS(font, characterWidth), -FONTMINBOUNDS(font, characterWidth));
-	DBG(("%s: (%d + %d) x %d: %d\n", __FUNCTION__,
+	DBG(("%s? (%d + %d) x %d: %d > 124\n", __FUNCTION__,
 	     top, bot, width, (top + bot) * (width + 7)/8));
 	return (top + bot) * (width + 7)/8 > 124;
 }
commit 4bdecc5b07a184ba136129e75a7fef914ac3b8d2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 11:06:04 2012 +0100

    test: Add a very basic test to exercise BLT text drawing
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/Makefile.am b/test/Makefile.am
index 59fae6b..b0e0e13 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -1,6 +1,7 @@
 stress_TESTS = \
 	basic-fillrect \
 	basic-rectangle \
+	basic-string \
 	basic-copyarea \
 	basic-copyarea-size \
 	basic-putimage \
diff --git a/test/basic-string.c b/test/basic-string.c
new file mode 100644
index 0000000..9f59c91
--- /dev/null
+++ b/test/basic-string.c
@@ -0,0 +1,102 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <X11/Xutil.h> /* for XDestroyImage */
+
+#include "test.h"
+
+static void draw_string(struct test_display *t, Drawable d, uint8_t alu,
+			int x, int y, uint32_t fg, uint32_t bg, int s, int fill)
+{
+	const char *strings[] = {
+		"Hello",
+		"World",
+		"Cairo's twin is Giza",
+	};
+	XGCValues val;
+	GC gc;
+
+	val.function = alu;
+	val.foreground = fg;
+	val.background = bg;
+
+	gc = XCreateGC(t->dpy, d, GCForeground | GCBackground | GCFunction, &val);
+	if (fill)
+		XDrawImageString(t->dpy, d, gc, x, y, strings[s%3], strlen(strings[s%3]));
+	else
+		XDrawString(t->dpy, d, gc, x, y, strings[s%3], strlen(strings[s%3]));
+	XFreeGC(t->dpy, gc);
+}
+
+static void clear(struct test_display *dpy, struct test_target *tt)
+{
+	XRenderColor render_color = {0};
+	XRenderFillRectangle(dpy->dpy, PictOpClear, tt->picture, &render_color,
+			     0, 0, tt->width, tt->height);
+}
+
+static void string_tests(struct test *t, int reps, int sets, enum target target)
+{
+	struct test_target real, ref;
+	int r, s;
+
+	printf("Testing general (%s): ", test_target_name(target));
+	fflush(stdout);
+
+	test_target_create_render(&t->real, target, &real);
+	clear(&t->real, &real);
+
+	test_target_create_render(&t->ref, target, &ref);
+	clear(&t->ref, &ref);
+
+	for (s = 0; s < sets; s++) {
+		for (r = 0; r < reps; r++) {
+			int x = rand() % (2*real.width) - real.width;
+			int y = rand() % (2*real.height) - real.height;
+			uint8_t alu = rand() % (GXset + 1);
+			uint32_t fg = rand();
+			uint32_t bg = rand();
+			int str = rand();
+			int fill = rand() & 1;
+
+			draw_string(&t->real, real.draw, alu, x, y, fg, bg, str, fill);
+			draw_string(&t->ref, ref.draw, alu, x, y, fg, bg, str, fill);
+		}
+
+		test_compare(t,
+			     real.draw, real.format,
+			     ref.draw, ref.format,
+			     0, 0, real.width, real.height,
+			     "");
+	}
+
+	printf("passed [%d iterations x %d]\n", reps, sets);
+
+	test_target_destroy_render(&t->real, &real);
+	test_target_destroy_render(&t->ref, &ref);
+}
+
+int main(int argc, char **argv)
+{
+	struct test test;
+	int i;
+
+	test_init(&test, argc, argv);
+
+	for (i = 0; i <= DEFAULT_ITERATIONS; i++) {
+		int reps = 1 << i;
+		int sets = 1 << (12 - i);
+		enum target t;
+
+		if (sets < 2)
+			sets = 2;
+
+		for (t = TARGET_FIRST; t <= TARGET_LAST; t++) {
+			string_tests(&test, reps, sets, t);
+		}
+	}
+
+	return 0;
+}
commit ea0f326d80f21f89c3cff3595a6e1afc42b52efd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 09:04:23 2012 +0100

    sna: Debug XDrawRectangle
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d7750c7..bdaf0b5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7782,6 +7782,9 @@ zero:
 	do {
 		xRectangle rr = *r++;
 
+		if ((rr.width | rr.height) == 0)
+			continue;
+
 		DBG(("%s - zero : r[%d] = (%d, %d) x (%d, %d)\n", __FUNCTION__,
 		     n, rr.x, rr.y, rr.width, rr.height));
 		rr.x += dx;
@@ -7794,11 +7797,11 @@ zero:
 			b = boxes;
 		}
 
-		if (rr.width <= 2 || rr.height <= 2) {
+		if (rr.width <= 1 || rr.height <= 1) {
 			b->x1 = rr.x;
 			b->y1 = rr.y;
-			b->x2 = rr.x + rr.width + 1;
-			b->y2 = rr.y + rr.height + 1;
+			b->x2 = rr.x + rr.width + (rr.height != 0);
+			b->y2 = rr.y + rr.height + (rr.width != 0);
 			DBG(("%s: blt (%d, %d), (%d, %d)\n",
 			     __FUNCTION__,
 			     b->x1, b->y1, b->x2,b->y2));
@@ -7847,14 +7850,18 @@ zero_clipped:
 
 				DBG(("%s - zero, clipped complex: r[%d] = (%d, %d) x (%d, %d)\n", __FUNCTION__,
 				     n, rr.x, rr.y, rr.width, rr.height));
+
+				if ((rr.width | rr.height) == 0)
+					continue;
+
 				rr.x += drawable->x;
 				rr.y += drawable->y;
 
-				if (rr.width <= 2 || rr.height <= 2) {
+				if (rr.width <= 1 || rr.height <= 1) {
 					box[0].x1 = rr.x;
 					box[0].y1 = rr.y;
-					box[0].x2 = rr.x + rr.width + 1;
-					box[0].y2 = rr.y + rr.height + 1;
+					box[0].x2 = rr.x + rr.width + (rr.height != 0);
+					box[0].y2 = rr.y + rr.height + (rr.width != 0);
 					count = 1;
 				} else {
 					box[0].x1 = rr.x;
@@ -7907,14 +7914,18 @@ zero_clipped:
 				xRectangle rr = *r++;
 				DBG(("%s - zero, clip: r[%d] = (%d, %d) x (%d, %d)\n", __FUNCTION__,
 				     n, rr.x, rr.y, rr.width, rr.height));
+
+				if ((rr.width | rr.height) == 0)
+					continue;
+
 				rr.x += drawable->x;
 				rr.y += drawable->y;
 
-				if (rr.width <= 2 || rr.height <= 2) {
+				if (rr.width <= 1 || rr.height <= 1) {
 					box[0].x1 = rr.x;
 					box[0].y1 = rr.y;
-					box[0].x2 = rr.x + rr.width + 1;
-					box[0].y2 = rr.y + rr.height + 1;
+					box[0].x2 = rr.x + rr.width + (rr.height != 0);
+					box[0].y2 = rr.y + rr.height + (rr.width != 0);
 					count = 1;
 				} else {
 					box[0].x1 = rr.x;
@@ -7968,6 +7979,10 @@ wide_clipped:
 
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
+		DBG(("%s: wide clipped: extents=((%d, %d), (%d, %d))\n",
+		     __FUNCTION__,
+		     clip.extents.x1, clip.extents.y1,
+		     clip.extents.x2, clip.extents.y2));
 		if (!RegionNotEmpty(&clip))
 			goto done;
 
@@ -7978,23 +7993,27 @@ wide_clipped:
 			do {
 				xRectangle rr = *r++;
 				int count;
+
+				if ((rr.width | rr.height) == 0)
+					continue;
+
 				rr.x += drawable->x;
 				rr.y += drawable->y;
 
 				if (rr.height <= offset2 || rr.width <= offset2) {
 					if (rr.height == 0) {
 						box[0].x1 = rr.x;
-						box[0].x2 = rr.x + rr.width + 1;
+						box[0].x2 = rr.x + rr.width;
 					} else {
 						box[0].x1 = rr.x - offset1;
-						box[0].x2 = box[0].x1 + rr.width + offset2;
+						box[0].x2 = rr.x + rr.width + offset3;
 					}
 					if (rr.width == 0) {
 						box[0].y1 = rr.y;
-						box[0].y2 = rr.y + rr.height + 1;
+						box[0].y2 = rr.y + rr.height;
 					} else {
 						box[0].y1 = rr.y - offset1;
-						box[0].y2 = box[0].y1 + rr.height + offset2;
+						box[0].y2 = rr.y + rr.height + offset3;
 					}
 					count = 1;
 				} else {
@@ -8009,8 +8028,8 @@ wide_clipped:
 					box[1].y2 = rr.y + rr.height - offset1;
 
 					box[2] = box[1];
-					box[3].x1 += rr.width;
-					box[3].x2 += rr.width;
+					box[2].x1 += rr.width;
+					box[2].x2 += rr.width;
 
 					box[3] = box[0];
 					box[3].y1 += rr.height;
@@ -8043,23 +8062,27 @@ wide_clipped:
 				}
 			} while (--n);
 		} else {
+			DBG(("%s: singular clip offset1=%d, offset2=%d, offset3=%d\n",
+			     __FUNCTION__, offset1, offset2, offset3));
 			do {
 				xRectangle rr = *r++;
 				int count;
 				rr.x += drawable->x;
 				rr.y += drawable->y;
 
+				DBG(("%s: r=(%d, %d)x(%d, %d)\n",
+				     __FUNCTION__, rr.x, rr.y, rr.width, rr.height));
 				if (rr.height <= offset2 || rr.width <= offset2) {
 					if (rr.height == 0) {
 						box[0].x1 = rr.x;
-						box[0].x2 = rr.x + rr.width + 1;
+						box[0].x2 = rr.x + rr.width;
 					} else {
 						box[0].x1 = rr.x - offset1;
 						box[0].x2 = box[0].x1 + rr.width + offset2;
 					}
 					if (rr.width == 0) {
 						box[0].y1 = rr.y;
-						box[0].y2 = rr.y + rr.height + 1;
+						box[0].y2 = rr.y + rr.height;
 					} else {
 						box[0].y1 = rr.y - offset1;
 						box[0].y2 = box[0].y1 + rr.height + offset2;
@@ -8070,19 +8093,36 @@ wide_clipped:
 					box[0].x2 = box[0].x1 + rr.width + offset2;
 					box[0].y1 = rr.y - offset1;
 					box[0].y2 = box[0].y1 + offset2;
+					DBG(("%s: box[0]=(%d, %d), (%d, %d)\n",
+					     __FUNCTION__,
+					     box[0].x1, box[0].y1,
+					     box[0].x2, box[0].y2));
 
 					box[1].x1 = rr.x - offset1;
 					box[1].x2 = box[1].x1 + offset2;
 					box[1].y1 = rr.y + offset3;
 					box[1].y2 = rr.y + rr.height - offset1;
+					DBG(("%s: box[1]=(%d, %d), (%d, %d)\n",
+					     __FUNCTION__,
+					     box[1].x1, box[1].y1,
+					     box[1].x2, box[1].y2));
 
 					box[2] = box[1];
-					box[3].x1 += rr.width;
-					box[3].x2 += rr.width;
+					box[2].x1 += rr.width;
+					box[2].x2 += rr.width;
+					DBG(("%s: box[2]=(%d, %d), (%d, %d)\n",
+					     __FUNCTION__,
+					     box[2].x1, box[2].y1,
+					     box[2].x2, box[2].y2));
 
 					box[3] = box[0];
 					box[3].y1 += rr.height;
 					box[3].y2 += rr.height;
+					DBG(("%s: box[3]=(%d, %d), (%d, %d)\n",
+					     __FUNCTION__,
+					     box[3].x1, box[3].y1,
+					     box[3].x2, box[3].y2));
+
 					count = 4;
 				}
 
@@ -8117,6 +8157,10 @@ wide:
 
 		do {
 			xRectangle rr = *r++;
+
+			if ((rr.width | rr.height) == 0)
+				continue;
+
 			rr.x += dx;
 			rr.y += dy;
 
@@ -8130,17 +8174,17 @@ wide:
 			if (rr.height <= offset2 || rr.width <= offset2) {
 				if (rr.height == 0) {
 					b->x1 = rr.x;
-					b->x2 = rr.x + rr.width + 1;
+					b->x2 = rr.x + rr.width;
 				} else {
 					b->x1 = rr.x - offset1;
-					b->x2 = rr.x + rr.width + offset2;
+					b->x2 = rr.x + rr.width + offset3;
 				}
 				if (rr.width == 0) {
 					b->y1 = rr.y;
-					b->y2 = rr.y + rr.height + 1;
+					b->y2 = rr.y + rr.height;
 				} else {
 					b->y1 = rr.y - offset1;
-					b->y2 = rr.y + rr.height + offset2;
+					b->y2 = rr.y + rr.height + offset3;
 				}
 				b++;
 			} else {
@@ -8149,18 +8193,18 @@ wide:
 				b[0].y1 = rr.y - offset1;
 				b[0].y2 = b[0].y1 + offset2;
 
-				b[1] = b[0];
-				b[1].y1 = rr.y + rr.height - offset1;
-				b[1].y2 = b[1].y1 + offset2;
+				b[1].x1 = rr.x - offset1;
+				b[1].x2 = b[1].x1 + offset2;
+				b[1].y1 = rr.y + offset3;
+				b[1].y2 = rr.y + rr.height - offset1;
 
-				b[2].x1 = rr.x - offset1;
-				b[2].x2 = b[2].x1 + offset2;
-				b[2].y1 = rr.y + offset3;
-				b[2].y2 = rr.y + rr.height - offset1;
+				b[2] = b[1];
+				b[2].x1 += rr.width;
+				b[2].x2 += rr.width;
 
-				b[3] = b[2];
-				b[3].x1 = rr.x + rr.width - offset1;
-				b[3].x2 = b[3].x1 + offset2;
+				b[3] = b[0];
+				b[3].y1 += rr.height;
+				b[3].y2 += rr.height;
 				b += 4;
 			}
 		} while (--n);
@@ -8217,9 +8261,11 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	     gc->lineStyle, gc->lineStyle == LineSolid,
 	     gc->joinStyle, gc->joinStyle == JoinMiter,
 	     gc->planemask, PM_IS_SOLID(drawable, gc->planemask)));
-	if (gc->lineStyle == LineSolid &&
-	    gc->joinStyle == JoinMiter &&
-	    PM_IS_SOLID(drawable, gc->planemask)) {
+
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		goto fallback;
+
+	if (gc->lineStyle == LineSolid && gc->joinStyle == JoinMiter) {
 		DBG(("%s: trying blt solid fill [%08lx] paths\n",
 		     __FUNCTION__, gc->fgPixel));
 		if ((bo = sna_drawable_use_bo(drawable, true,
@@ -11742,7 +11788,7 @@ static GCOps sna_gc_ops__tmp = {
 static void
 sna_validate_gc(GCPtr gc, unsigned long changes, DrawablePtr drawable)
 {
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s changes=%x\n", __FUNCTION__, changes));
 
 	if (changes & (GCClipMask|GCSubwindowMode) ||
 	    drawable->serialNumber != (gc->serialNumber & DRAWABLE_SERIAL_BITS) ||
commit 6a7efa991a705bdb5a3b9d5300d824a1ce8759af
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jun 1 08:47:20 2012 +0100

    test: Exercise basic rectangle drawing code
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/test/Makefile.am b/test/Makefile.am
index a14396e..59fae6b 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -1,5 +1,6 @@
 stress_TESTS = \
 	basic-fillrect \
+	basic-rectangle \
 	basic-copyarea \
 	basic-copyarea-size \
 	basic-putimage \
diff --git a/test/basic-rectangle.c b/test/basic-rectangle.c
new file mode 100644
index 0000000..8f78bba
--- /dev/null
+++ b/test/basic-rectangle.c
@@ -0,0 +1,223 @@
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <X11/Xutil.h> /* for XDestroyImage */
+
+#include "test.h"
+
+static void draw_rect(struct test_display *t, Drawable d, uint8_t alu,
+		      int x, int y, int w, int h, uint32_t fg, int lw)
+{
+	XGCValues val;
+	GC gc;
+
+	val.function = alu;
+	val.foreground = fg;
+	val.line_width = lw;
+
+	gc = XCreateGC(t->dpy, d, GCForeground | GCFunction | GCLineWidth, &val);
+	XDrawRectangle(t->dpy, d, gc, x, y, w, h);
+	XFreeGC(t->dpy, gc);
+}
+
+static void clear(struct test_display *dpy, struct test_target *tt)
+{
+	XRenderColor render_color = {0};
+	XRenderFillRectangle(dpy->dpy, PictOpClear, tt->picture, &render_color,
+			     0, 0, tt->width, tt->height);
+}
+
+static void zrect_tests(struct test *t, int reps, int sets, enum target target)
+{
+	struct test_target real, ref;
+	int r, s;
+
+	printf("Testing empty rects (%s): ", test_target_name(target));
+	fflush(stdout);
+
+	test_target_create_render(&t->real, target, &real);
+	clear(&t->real, &real);
+
+	test_target_create_render(&t->ref, target, &ref);
+	clear(&t->ref, &ref);
+
+	for (s = 0; s < sets; s++) {
+		for (r = 0; r < reps; r++) {
+			int x = rand() % (2*real.width) - real.width;
+			int y = rand() % (2*real.height) - real.height;
+			uint8_t alu = rand() % (GXset + 1);
+			uint32_t fg = rand();
+			uint32_t lw = rand() % 4;
+
+			draw_rect(&t->real, real.draw, alu,
+				  x, y, 0, 0, fg, lw);
+			draw_rect(&t->ref, ref.draw, alu,
+				  x, y, 0, 0, fg, lw);
+		}
+
+		test_compare(t,
+			     real.draw, real.format,
+			     ref.draw, ref.format,
+			     0, 0, real.width, real.height,
+			     "");
+	}
+
+	printf("passed [%d iterations x %d]\n", reps, sets);
+
+	test_target_destroy_render(&t->real, &real);
+	test_target_destroy_render(&t->ref, &ref);
+}
+
+static void hrect_tests(struct test *t, int reps, int sets, enum target target)
+{
+	struct test_target real, ref;
+	int r, s;
+
+	printf("Testing horizontal rects (%s): ", test_target_name(target));
+	fflush(stdout);
+
+	test_target_create_render(&t->real, target, &real);
+	clear(&t->real, &real);
+
+	test_target_create_render(&t->ref, target, &ref);
+	clear(&t->ref, &ref);
+
+	for (s = 0; s < sets; s++) {
+		for (r = 0; r < reps; r++) {
+			int x = rand() % (2*real.width) - real.width;
+			int y = rand() % (2*real.height) - real.height;
+			int w = rand() % (2*real.width);
+			uint8_t alu = rand() % (GXset + 1);
+			uint32_t fg = rand();
+			uint32_t lw = rand() % 4;
+
+			draw_rect(&t->real, real.draw, alu,
+				  x, y, w, 0, fg, lw);
+			draw_rect(&t->ref, ref.draw, alu,
+				  x, y, w, 0, fg, lw);
+		}
+
+		test_compare(t,
+			     real.draw, real.format,
+			     ref.draw, ref.format,
+			     0, 0, real.width, real.height,
+			     "");
+	}
+
+	printf("passed [%d iterations x %d]\n", reps, sets);
+
+	test_target_destroy_render(&t->real, &real);
+	test_target_destroy_render(&t->ref, &ref);
+}
+
+static void vrect_tests(struct test *t, int reps, int sets, enum target target)
+{
+	struct test_target real, ref;
+	int r, s;
+
+	printf("Testing vertical rects (%s): ", test_target_name(target));
+	fflush(stdout);
+
+	test_target_create_render(&t->real, target, &real);
+	clear(&t->real, &real);
+
+	test_target_create_render(&t->ref, target, &ref);
+	clear(&t->ref, &ref);
+
+	for (s = 0; s < sets; s++) {
+		for (r = 0; r < reps; r++) {
+			int x = rand() % (2*real.width) - real.width;
+			int y = rand() % (2*real.height) - real.height;
+			int h = rand() % (2*real.width);
+			uint8_t alu = rand() % (GXset + 1);
+			uint32_t fg = rand();
+			uint32_t lw = rand() % 4;
+
+			draw_rect(&t->real, real.draw, alu,
+				  x, y, 0, h, fg, lw);
+			draw_rect(&t->ref, ref.draw, alu,
+				  x, y, 0, h, fg, lw);
+		}
+
+		test_compare(t,
+			     real.draw, real.format,
+			     ref.draw, ref.format,
+			     0, 0, real.width, real.height,
+			     "");
+	}
+
+	printf("passed [%d iterations x %d]\n", reps, sets);
+
+	test_target_destroy_render(&t->real, &real);
+	test_target_destroy_render(&t->ref, &ref);
+}
+
+static void rect_tests(struct test *t, int reps, int sets, enum target target)
+{
+	struct test_target real, ref;
+	int r, s;
+
+	printf("Testing general (%s): ", test_target_name(target));
+	fflush(stdout);
+
+	test_target_create_render(&t->real, target, &real);
+	clear(&t->real, &real);
+
+	test_target_create_render(&t->ref, target, &ref);
+	clear(&t->ref, &ref);
+
+	for (s = 0; s < sets; s++) {
+		for (r = 0; r < reps; r++) {
+			int x = rand() % (2*real.width) - real.width;
+			int y = rand() % (2*real.height) - real.height;
+			int w = rand() % (2*real.width);
+			int h = rand() % (2*real.height);
+			uint8_t alu = rand() % (GXset + 1);
+			uint32_t fg = rand();
+			uint32_t lw = rand() % 4;
+
+			draw_rect(&t->real, real.draw, alu,
+				  x, y, w, h, fg, lw);
+			draw_rect(&t->ref, ref.draw, alu,
+				  x, y, w, h, fg, lw);
+		}
+
+		test_compare(t,
+			     real.draw, real.format,
+			     ref.draw, ref.format,
+			     0, 0, real.width, real.height,
+			     "");
+	}
+
+	printf("passed [%d iterations x %d]\n", reps, sets);
+
+	test_target_destroy_render(&t->real, &real);
+	test_target_destroy_render(&t->ref, &ref);
+}
+
+int main(int argc, char **argv)
+{
+	struct test test;
+	int i;
+
+	test_init(&test, argc, argv);
+
+	for (i = 0; i <= DEFAULT_ITERATIONS; i++) {
+		int reps = 1 << i;
+		int sets = 1 << (12 - i);
+		enum target t;
+
+		if (sets < 2)
+			sets = 2;
+
+		for (t = TARGET_FIRST; t <= TARGET_LAST; t++) {
+			zrect_tests(&test, reps, sets, t);
+			hrect_tests(&test, reps, sets, t);
+			vrect_tests(&test, reps, sets, t);
+			rect_tests(&test, reps, sets, t);
+		}
+	}
+
+	return 0;
+}
commit bc4323558bebd53e474fbc5404e1c41ab16d02e6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 21:19:26 2012 +0100

    sna: Handle negative values when computing the stipple modulus
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7f3af05..d7750c7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9772,6 +9772,8 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 
 	for (y1 = box->y1; y1 < box->y2; y1 = y2) {
 		int oy = (y1 - origin->y) % gc->stipple->drawable.height;
+		if (oy < 0)
+			oy += gc->stipple->drawable.height;
 
 		y2 = box->y2;
 		if (y2 - y1 > gc->stipple->drawable.height - oy)
@@ -9783,6 +9785,8 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 
 			x2 = box->x2;
 			ox = (x1 - origin->x) % gc->stipple->drawable.width;
+			if (ox < 0)
+				ox += gc->stipple->drawable.width;
 			bx1 = ox & ~7;
 			bx2 = ox + (x2 - x1);
 			if (bx2 > gc->stipple->drawable.width) {
commit aca994e03e6e0e16f55841418b0061b175e91e5d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 21:17:24 2012 +0100

    sna: Fill early break for clip process of spans
    
    When on the same Y-band as the span, as soon as the clip boxes are too
    far to the right, we can stop searching for more intersections.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1ea5074..7f3af05 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4155,12 +4155,10 @@ sna_fill_spans__cpu(DrawablePtr drawable,
 		    GCPtr gc, int n,
 		    DDXPointPtr pt, int *width, int sorted)
 {
-	RegionRec *clip = sna_gc(gc)->priv;
-	BoxRec extents;
+	const RegionRec *clip = sna_gc(gc)->priv;
 
 	DBG(("%s x %d\n", __FUNCTION__, n));
 
-	extents = clip->extents;
 	while (n--) {
 		BoxRec b;
 
@@ -4171,26 +4169,37 @@ sna_fill_spans__cpu(DrawablePtr drawable,
 		b.x2 = b.x1 + *width++;
 		b.y2 = b.y1 + 1;
 
-		if (!box_intersect(&b, &extents))
+		if (!box_intersect(&b, &clip->extents))
 			continue;
 
 		if (region_is_singular(clip)) {
+			DBG(("%s: singular fill: (%d, %d) x %d\n",
+			     __FUNCTION__, b.x1, b.y1, b.x2 - b.x1));
 			fbFill(drawable, gc, b.x1, b.y1, b.x2 - b.x1, 1);
 		} else {
 			const BoxRec * const clip_start = RegionBoxptr(clip);
 			const BoxRec * const clip_end = clip_start + clip->data->numRects;
 			const BoxRec *c;
 
+			DBG(("%s: multiple fills: (%d, %d) x %d, clip start((%d, %d), (%d,%d)), end((%d, %d), (%d, %d))\n",
+			     __FUNCTION__, b.x1, b.y1, b.x2 - b.x1,
+			     clip_start->x1, clip_start->y1,
+			     clip_start->x2, clip_start->y2,
+			     clip_end[-1].x1, clip_end[-1].y1,
+			     clip_end[-1].x2, clip_end[-1].y2));
+
 			c = find_clip_box_for_y(clip_start, clip_end, b.y1);
 			while (c != clip_end) {
 				int16_t x1, x2;
 
-				if (b.y2 <= c->y1)
-					break;
+				DBG(("%s: clip box? (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     c->x1, c->y1, c->x2, c->y2));
 
-				if (b.x1 >= c->x2)
+				if (b.y2 <= c->y1 || b.x2 <= c->x1)
 					break;
-				if (b.x2 <= c->x1) {
+
+				if (b.x1 > c->x2) {
 					c++;
 					continue;
 				}
@@ -4203,9 +4212,12 @@ sna_fill_spans__cpu(DrawablePtr drawable,
 					x1 = b.x1;
 				if (x2 > b.x2)
 					x2 = b.x2;
-				if (x2 > x1)
+				if (x2 > x1) {
+					DBG(("%s: fbFill(%d, %d) x %d\n",
+					     __FUNCTION__, x1, b.y1, x2 - x1));
 					fbFill(drawable, gc,
 					       x1, b.y1, x2 - x1, 1);
+				}
 			}
 		}
 	}
@@ -4518,12 +4530,10 @@ sna_fill_spans__fill_clip_boxes(DrawablePtr drawable,
 
 		c = find_clip_box_for_y(clip_start, clip_end, y);
 		while (c != clip_end) {
-			if (y + 1 <= c->y1)
+			if (y + 1 <= c->y1 || X2 <= c->x1)
 				break;
 
-			if (X1 >= c->x2)
-				break;
-			if (X2 <= c->x1) {
+			if (X1 >= c->x2) {
 				c++;
 				continue;
 			}
@@ -4719,12 +4729,10 @@ no_damage_clipped:
 							clip_end,
 							y);
 				while (c != clip_end) {
-					if (y + 1 <= c->y1)
+					if (y + 1 <= c->y1 || X2 <= c->x1)
 						break;
 
-					if (X1 >= c->x2)
-						break;
-					if (X2 <= c->x1) {
+					if (X1 >= c->x2) {
 						c++;
 						continue;
 					}
@@ -4824,12 +4832,10 @@ damage_clipped:
 							clip_end,
 							y);
 				while (c != clip_end) {
-					if (y + 1 <= c->y1)
+					if (y + 1 <= c->y1 || X2 <= c->x1)
 						break;
 
-					if (X1 >= c->x2)
-						break;
-					if (X2 <= c->x1) {
+					if (X1 >= c->x2) {
 						c++;
 						continue;
 					}
@@ -10364,7 +10370,7 @@ fallback:
 							       true)))
 		goto out;
 
-	DBG(("%s: fallback -- miFillPolygon -> sna_fill_spans__cpu\n",
+	DBG(("%s: fallback -- miPolyFillArc -> sna_fill_spans__cpu\n",
 	     __FUNCTION__));
 	sna_gc(gc)->priv = &data.region;
 	assert(gc->ops == (GCOps *)&sna_gc_ops);
commit 0fe150f898120ba9a00e1e6b9d66bec10d7e8a29
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 20:02:47 2012 +0100

    sna: Fix computation of box for clipped stippled rectangles
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3a9f325..1ea5074 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9900,19 +9900,28 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 
 		region_set(&clip, extents);
 		region_maybe_clip(&clip, gc->pCompositeClip);
-		if (!RegionNotEmpty(&clip))
+		if (!RegionNotEmpty(&clip)) {
+			DBG(("%s: all clipped\n", __FUNCTION__));
 			return true;
+		}
 
 		if (clip.data == NULL) {
+			DBG(("%s: clipped to extents ((%d, %d), (%d, %d))\n",
+			     __FUNCTION__,
+			     clip.extents.x1, clip.extents.y1,
+			     clip.extents.x2, clip.extents.y2));
 			do {
 				BoxRec box;
 
 				box.x1 = r->x + drawable->x;
-				box.x2 = bound(r->x, r->width);
+				box.x2 = bound(box.x1, r->width);
 				box.y1 = r->y + drawable->y;
-				box.y2 = bound(r->y, r->height);
+				box.y2 = bound(box.y1, r->height);
 				r++;
 
+				DBG(("%s: box (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     box.x1, box.y1, box.x2, box.y2));
 				if (!box_intersect(&box, &clip.extents))
 					continue;
 
@@ -9928,13 +9937,18 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 			const BoxRec * const clip_end = clip_start + clip.data->numRects;
 			const BoxRec *c;
 
+			DBG(("%s: clipped to boxes: start((%d, %d), (%d, %d)); end=((%d, %d), (%d, %d))\n", __FUNCTION__,
+			     clip_start->x1, clip_start->y1,
+			     clip_start->x2, clip_start->y2,
+			     clip_end->x1, clip_end->y1,
+			     clip_end->x2, clip_end->y2));
 			do {
 				BoxRec unclipped;
 
 				unclipped.x1 = r->x + drawable->x;
-				unclipped.x2 = bound(r->x, r->width);
+				unclipped.x2 = bound(unclipped.x1, r->width);
 				unclipped.y1 = r->y + drawable->y;
-				unclipped.y2 = bound(r->y, r->height);
+				unclipped.y2 = bound(unclipped.y1, r->height);
 				r++;
 
 				c = find_clip_box_for_y(clip_start,
commit a65c3b7b45df46ba993af6755b2460aa80fde930
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 19:15:05 2012 +0100

    sna: Add some more DBG for stippled blts
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d68c59c..3a9f325 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9758,8 +9758,8 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 				  struct kgem_bo *bo,
 				  uint32_t br00, uint32_t br13,
 				  GCPtr gc,
-				  BoxRec *box,
-				  DDXPointRec *origin)
+				  const BoxRec *box,
+				  const DDXPointRec *origin)
 {
 	int x1, x2, y1, y2;
 	uint32_t *b;
@@ -9787,8 +9787,13 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 			bw = ALIGN(bw, 2);
 			bh = y2 - y1;
 
-			DBG(("%s: box(%d, %d), (%d, %d) pat=(%d, %d), up=(%d, %d)\n",
-			     __FUNCTION__, x1, y1, x2, y2, ox, oy, bx1, bx2));
+			DBG(("%s: box((%d, %d)x(%d, %d)) origin=(%d, %d), pat=(%d, %d), up=(%d, %d), stipple=%dx%d\n",
+			     __FUNCTION__,
+			     x1, y1, x2-x1, y2-y1,
+			     origin->x, origin->y,
+			     ox, oy, bx1, bx2,
+			     gc->stipple->drawable.width,
+			     gc->stipple->drawable.height));
 
 			len = bw*bh;
 			len = ALIGN(len, 8) / 4;
@@ -9846,11 +9851,11 @@ sna_poly_fill_rect_stippled_n_blt(DrawablePtr drawable,
 	int16_t dx, dy;
 	uint32_t br00, br13;
 
-	DBG(("%s: upload (%d, %d), (%d, %d), origin (%d, %d), clipped=%d\n", __FUNCTION__,
+	DBG(("%s: upload (%d, %d), (%d, %d), origin (%d, %d), clipped=%d, alu=%d, opaque=%d\n", __FUNCTION__,
 	     extents->x1, extents->y1,
 	     extents->x2, extents->y2,
 	     origin.x, origin.y,
-	     clipped));
+	     clipped, gc->alu, gc->fillStyle == FillOpaqueStippled));
 
 	if (gc->stipple->drawable.width > 32 ||
 	    gc->stipple->drawable.height > 32)
commit b2cc64253217a90cf2ade9a8d6f1ed21b23fe875
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 19:45:56 2012 +0100

    sna: And combine the two migrate to CPU clauses
    
    Just a small tidy up to remove a duplicated predicate.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dfe4c5a..d68c59c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1528,22 +1528,23 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		     region->extents.x2 - region->extents.x1,
 		     region->extents.y2 - region->extents.y1));
 
-		if ((flags & MOVE_WRITE) == 0 && priv->cpu_damage == NULL &&
-		    region->extents.x2 - region->extents.x1 == 1 &&
-		    region->extents.y2 - region->extents.y1 == 1) {
-			/*  Often associated with synchronisation, KISS */
-			sna_read_boxes(sna,
-				       priv->gpu_bo, 0, 0,
-				       pixmap, 0, 0,
-				       &region->extents, 1);
-			goto done;
+		if (priv->cpu_damage == NULL) {
+			if ((flags & MOVE_WRITE) == 0 &&
+			    region->extents.x2 - region->extents.x1 == 1 &&
+			    region->extents.y2 - region->extents.y1 == 1) {
+				/*  Often associated with synchronisation, KISS */
+				sna_read_boxes(sna,
+					       priv->gpu_bo, 0, 0,
+					       pixmap, 0, 0,
+					       &region->extents, 1);
+				goto done;
+			}
+		} else {
+			if (sna_damage_contains_box__no_reduce(priv->cpu_damage,
+							       &region->extents))
+				goto done;
 		}
 
-		if (priv->cpu_damage &&
-		    sna_damage_contains_box__no_reduce(priv->cpu_damage,
-						       &region->extents))
-			goto done;
-
 		if (sna_damage_contains_box(priv->gpu_damage,
 					    &region->extents) != PIXMAN_REGION_OUT) {
 			RegionRec want, *r = region;
commit bd1dd4cf663ca230724dc6beb329b7eb270ea9d5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 17:57:56 2012 +0100

    sna: If the CPU damage already contains the migration region, we are done
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4f4b7b3..dfe4c5a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1539,6 +1539,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			goto done;
 		}
 
+		if (priv->cpu_damage &&
+		    sna_damage_contains_box__no_reduce(priv->cpu_damage,
+						       &region->extents))
+			goto done;
+
 		if (sna_damage_contains_box(priv->gpu_damage,
 					    &region->extents) != PIXMAN_REGION_OUT) {
 			RegionRec want, *r = region;
commit a2d92ade3911e62f950c85b5650fb13da1279ced
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 17:49:01 2012 +0100

    sna: Prevent readback of 1x1 if it lies inside CPU damage
    
    If the pixel exists only in the CPU damage, attempting to read it back
    from the GPU only results in garbage.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4511ec8..4f4b7b3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1528,7 +1528,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		     region->extents.x2 - region->extents.x1,
 		     region->extents.y2 - region->extents.y1));
 
-		if ((flags & MOVE_WRITE) == 0 &&
+		if ((flags & MOVE_WRITE) == 0 && priv->cpu_damage == NULL &&
 		    region->extents.x2 - region->extents.x1 == 1 &&
 		    region->extents.y2 - region->extents.y1 == 1) {
 			/*  Often associated with synchronisation, KISS */
@@ -1548,6 +1548,16 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			     region->extents.x2 - region->extents.x1,
 			     region->extents.y2 - region->extents.y1));
 
+			if ((flags & MOVE_WRITE) == 0 &&
+			    region->extents.x2 - region->extents.x1 == 1 &&
+			    region->extents.y2 - region->extents.y1 == 1) {
+				sna_read_boxes(sna,
+					       priv->gpu_bo, 0, 0,
+					       pixmap, 0, 0,
+					       &region->extents, 1);
+				goto done;
+			}
+
 			/* Expand the region to move 32x32 pixel blocks at a
 			 * time, as we assume that we will continue writing
 			 * afterwards and so aim to coallesce subsequent
@@ -10079,8 +10089,11 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		goto fallback;
 	}
 
-	if (!PM_IS_SOLID(draw, gc->planemask))
+	if (!PM_IS_SOLID(draw, gc->planemask)) {
+		DBG(("%s: fallback -- planemask=%#lx (not-solid)\n",
+		     __FUNCTION__, gc->planemask));
 		goto fallback;
+	}
 
 	/* Clear the cpu damage so that we refresh the GPU status of the
 	 * pixmap upon a redraw after a period of inactivity.
commit a34a06c4ba36b21e386b1f6371a4bd3f03ff35a1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 20:14:38 2012 +0100

    sna/glyphs: Clip the damage to the drawable for unclipped glyphs-to-dst
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 0de1f70..ca70e76 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -415,6 +415,36 @@ static void apply_damage(struct sna_composite_op *op,
 	sna_damage_add_box(op->damage, &box);
 }
 
+static void apply_damage_clipped_to_dst(struct sna_composite_op *op,
+					const struct sna_composite_rectangles *r,
+					DrawablePtr dst)
+{
+	BoxRec box;
+
+	if (op->damage == NULL)
+		return;
+
+	box.x1 = r->dst.x + op->dst.x;
+	box.y1 = r->dst.y + op->dst.y;
+	box.x2 = box.x1 + r->width;
+	box.y2 = box.y1 + r->height;
+
+	if (box.x1 < dst->x)
+		box.x1 = dst->x;
+
+	if (box.x2 > op->dst.width)
+		box.x2 = op->dst.width;
+
+	if (box.y1 < dst->y)
+		box.y1 = dst->y;
+
+	if (box.y2 > op->dst.height)
+		box.y2 = op->dst.height;
+
+	assert_pixmap_contains_box(op->dst.pixmap, &box);
+	sna_damage_add_box(op->damage, &box);
+}
+
 static Bool
 glyphs_to_dst(struct sna *sna,
 	      CARD8 op,
@@ -553,7 +583,7 @@ glyphs_to_dst(struct sna *sna,
 				     r.width, r.height));
 
 				tmp.blt(sna, &tmp, &r);
-				apply_damage(&tmp, &r);
+				apply_damage_clipped_to_dst(&tmp, &r, dst->pDrawable);
 			}
 
 next_glyph:
commit dcc7ba8ccf95db1c265bd5492f041f94ce368b7a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 17:48:40 2012 +0100

    sna/composite: Add debugging modes to disable acceleration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index d281776..9dc3808 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -41,6 +41,9 @@
 #define DBG(x) ErrorF x
 #endif
 
+#define NO_COMPOSITE 0
+#define NO_COMPOSITE_RECTANGLES 0
+
 #define BOUND(v)	(INT16) ((v) < MINSHORT ? MINSHORT : (v) > MAXSHORT ? MAXSHORT : (v))
 
 Bool sna_composite_create(struct sna *sna)
@@ -450,10 +453,13 @@ sna_composite(CARD8 op,
 					  src, mask, dst,
 					  src_x,  src_y,
 					  mask_x, mask_y,
-					  dst_x, dst_y,
+					  dst_x,  dst_y,
 					  width,  height))
 		return;
 
+	if (NO_COMPOSITE)
+		goto fallback;
+
 	if (wedged(sna)) {
 		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		goto fallback;
@@ -526,13 +532,15 @@ sna_composite(CARD8 op,
 	goto out;
 
 fallback:
-	DBG(("%s -- fallback dst=(%d, %d)+(%d, %d), size=(%d, %d)\n",
+	DBG(("%s -- fallback dst=(%d, %d)+(%d, %d), size=(%d, %d): region=((%d,%d), (%d, %d))\n",
 	     __FUNCTION__,
 	     dst_x, dst_y,
 	     dst->pDrawable->x, dst->pDrawable->y,
-	     width, height));
-	if (op == PictOpSrc || op == PictOpClear)
-		flags = MOVE_WRITE;
+	     width, height,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+	if (op <= PictOpSrc && !dst->alphaMap)
+		flags = MOVE_WRITE | MOVE_INPLACE_HINT;
 	else
 		flags = MOVE_WRITE | MOVE_READ;
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region, flags))
@@ -563,10 +571,10 @@ fallback:
 
 	DBG(("%s: fallback -- fbComposite\n", __FUNCTION__));
 	fbComposite(op, src, mask, dst,
-		    src_x, src_y,
+		    src_x,  src_y,
 		    mask_x, mask_y,
-		    dst_x, dst_y,
-		    width, height);
+		    dst_x,  dst_y,
+		    width,  height);
 out:
 	REGION_UNINIT(NULL, &region);
 }
@@ -758,6 +766,9 @@ sna_composite_rectangles(CARD8		 op,
 	     RegionExtents(&region)->x1, RegionExtents(&region)->y1,
 	     RegionExtents(&region)->x2, RegionExtents(&region)->y2));
 
+	if (NO_COMPOSITE_RECTANGLES)
+		goto fallback;
+
 	if (wedged(sna))
 		goto fallback;
 
commit 984400b95a879c7c020de14545ac2e1d9e759e05
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 16:56:21 2012 +0100

    sna/glyphs: Don't apply drawable offsets twice
    
    The offsets from image_from_pict() already include the drawable offset
    so we need not apply them twice.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 45c24d8..0de1f70 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1173,10 +1173,8 @@ glyphs_fallback(CARD8 op,
 		y -= region.extents.y1;
 	} else {
 		mask_image = dst_image;
-		src_x -= x;
-		src_y -= y;
-		x += dst->pDrawable->x;
-		y += dst->pDrawable->y;
+		src_x -= x - dst->pDrawable->x;
+		src_y -= y - dst->pDrawable->y;
 	}
 
 	do {
commit 7bbd445c8cbeb7bcea2c56c4deb9bf6ccf6f2497
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 16:45:03 2012 +0100

    sna/trapezoids: Apply the drawable offset for the CPU migration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e604720..0fd1b03 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4425,10 +4425,10 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (mask) {
 		RegionRec region;
 
-		region.extents.x1 = dst_x;
-		region.extents.y1 = dst_y;
-		region.extents.x2 = dst_x + extents.x2;
-		region.extents.y2 = dst_y + extents.y2;
+		region.extents.x1 = dst_x + dst->pDrawable->x;
+		region.extents.y1 = dst_y + dst->pDrawable->y;
+		region.extents.x2 = region.extents.x1 + extents.x2;
+		region.extents.y2 = region.extents.y2 + extents.y2;
 		region.data = NULL;
 
 		DBG(("%s: move-to-cpu\n", __FUNCTION__));
commit c5313620a2f66bc8daf211fc77a9c480a79ea271
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 15:06:16 2012 +0100

    sna: Suppress flushes once we have cleared the kernel dirtiness
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 15261b6..537a6b0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2954,7 +2954,11 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
 {
 	/* The kernel will emit a flush *and* update its own flushing lists. */
-	return kgem_busy(kgem, bo->handle);
+	if (!bo->needs_flush)
+		return false;
+
+	bo->needs_flush = kgem_busy(kgem, bo->handle);
+	return bo->needs_flush;
 }
 
 bool kgem_check_bo(struct kgem *kgem, ...)
commit 0347c8abec9740d706d87dd2f719ac68f3d1c32e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 13:50:31 2012 +0100

    sna/glyphs: Fix upconverting of glyphs via gpu
    
    When using the mask instead of the src for adding the glyphs we need to
    pass the glyph coordinates through the mask coordinates rather than the
    source.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 81e833e..45c24d8 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -940,7 +940,11 @@ next_image:
 					if (glyph_atlas)
 						tmp.done(sna, &tmp);
 
-					if (this_atlas->format == format->format) {
+					DBG(("%s: atlas format=%08x, mask format=%08x\n",
+					     __FUNCTION__,
+					     (int)this_atlas->format,
+					     (int)(format->depth << 24 | format->format)));
+					if (this_atlas->format == (format->depth << 24 | format->format)) {
 						ok = sna->render.composite(sna, PictOpAdd,
 									   this_atlas, NULL, mask,
 									   0, 0, 0, 0, 0, 0,
@@ -954,6 +958,8 @@ next_image:
 									   &tmp);
 					}
 					if (!ok) {
+						DBG(("%s: fallback -- can not handle PictOpAdd of glyph onto mask!\n",
+						     __FUNCTION__));
 						FreePicture(mask, 0);
 						return FALSE;
 					}
@@ -968,6 +974,7 @@ next_image:
 				     r.src.x, r.src.y,
 				     glyph->info.width, glyph->info.height));
 
+				r.mask = r.src;
 				r.dst.x = x - glyph->info.x;
 				r.dst.y = y - glyph->info.y;
 				r.width  = glyph->info.width;
commit 90ae4f853222ee33206134f4efdc4accfb2f2c38
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 14:17:40 2012 +0100

    sna: Avoid mixing signed/unsigned int/int16 arithmetric
    
    Life becomes unpleasant with sign extension.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cebfc7e..4511ec8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5881,7 +5881,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 
 		while (--n) {
 			int16_t sdx, sdy;
-			uint16_t adx, ady, length;
+			int adx, ady, length;
 			int e, e1, e2, e3;
 			int x1 = x2, x;
 			int y1 = y2, y;
@@ -7079,7 +7079,7 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 		const xSegment *s = _s;
 		do {
 			int16_t sdx, sdy;
-			uint16_t adx, ady, length;
+			int adx, ady, length;
 			int e, e1, e2, e3;
 			int x1, x2;
 			int y1, y2;
commit 660c89e9742bac5ce7cbd480e08b4667e37dee8c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 13:18:21 2012 +0100

    sna: Use full 16-bit unsigned values for absolute differences
    
    Beware the overflow implicit in:
      adx = x2 >= x1 ? x2 - x1 : x1 - x2;
    when both x2 and x1 may be large signed 16-bit values
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50532
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ee2f69a..cebfc7e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5881,9 +5881,8 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 
 		while (--n) {
 			int16_t sdx, sdy;
-			int16_t adx, ady;
-			int16_t e, e1, e2, e3;
-			int16_t length;
+			uint16_t adx, ady, length;
+			int e, e1, e2, e3;
 			int x1 = x2, x;
 			int y1 = y2, y;
 			int oc1 = oc2;
@@ -7080,9 +7079,8 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 		const xSegment *s = _s;
 		do {
 			int16_t sdx, sdy;
-			int16_t adx, ady;
-			int16_t e, e1, e2, e3;
-			int16_t length;
+			uint16_t adx, ady, length;
+			int e, e1, e2, e3;
 			int x1, x2;
 			int y1, y2;
 			int oc1, oc2;
commit 035c2953751f58225bd6b1fcb4c1275ccb5526cb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 31 09:29:40 2012 +0100

    sna/glyphs: use add(WHITE, glyph, mask) for channel expansion
    
    If the glyph format does not match the mask format we can not simply add
    the two together, but must first perform a channel expansion (or
    contraction) by multiplying the glyph against a WHITE source.
    
    Normally the glyph and the mask are equivalent formats and so we hit the
    fast path.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 0a2e042..81e833e 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -130,6 +130,15 @@ static void unrealize_glyph_caches(struct sna *sna)
 		free(cache->glyphs);
 	}
 	memset(render->glyph, 0, sizeof(render->glyph));
+
+	if (render->white_image) {
+		pixman_image_unref(render->white_image);
+		render->white_image = NULL;
+	}
+	if (render->white_picture) {
+		FreePicture(render->white_picture, 0);
+		render->white_picture = NULL;
+	}
 }
 
 /* All caches for a single format share a single pixmap for glyph storage,
@@ -144,11 +153,13 @@ static void unrealize_glyph_caches(struct sna *sna)
 static Bool realize_glyph_caches(struct sna *sna)
 {
 	ScreenPtr screen = sna->scrn->pScreen;
+	pixman_color_t white = { 0xffff, 0xffff, 0xffff, 0xffff };
 	unsigned int formats[] = {
 		PIXMAN_a8,
 		PIXMAN_a8r8g8b8,
 	};
 	unsigned int i;
+	int error;
 
 	DBG(("%s\n", __FUNCTION__));
 
@@ -163,7 +174,6 @@ static Bool realize_glyph_caches(struct sna *sna)
 		PictFormatPtr pPictFormat;
 		CARD32 component_alpha;
 		int depth = PIXMAN_FORMAT_DEPTH(formats[i]);
-		int error;
 
 		pPictFormat = PictureMatchFormat(screen, depth, formats[i]);
 		if (!pPictFormat)
@@ -205,7 +215,10 @@ static Bool realize_glyph_caches(struct sna *sna)
 		cache->evict = rand() % GLYPH_CACHE_SIZE;
 	}
 
-	return TRUE;
+	sna->render.white_image = pixman_image_create_solid_fill(&white);
+	sna->render.white_picture =
+		CreateSolidPicture(0, (xRenderColor *)&white, &error);
+	return sna->render.white_image && sna->render.white_picture;
 
 bail:
 	unrealize_glyph_caches(sna);
@@ -412,7 +425,6 @@ glyphs_to_dst(struct sna *sna,
 {
 	struct sna_composite_op tmp;
 	ScreenPtr screen = dst->pDrawable->pScreen;
-	int index = screen->myNum;
 	PicturePtr glyph_atlas;
 	BoxPtr rects;
 	int nrect;
@@ -566,7 +578,6 @@ glyphs_slow(struct sna *sna,
 {
 	struct sna_composite_op tmp;
 	ScreenPtr screen = dst->pDrawable->pScreen;
-	int index = screen->myNum;
 	int16_t x, y;
 
 	if (NO_GLYPHS_SLOW)
@@ -705,7 +716,6 @@ glyphs_via_mask(struct sna *sna,
 {
 	ScreenPtr screen = dst->pDrawable->pScreen;
 	struct sna_composite_op tmp;
-	int index = screen->myNum;
 	CARD32 component_alpha;
 	PixmapPtr pixmap;
 	PicturePtr glyph_atlas, mask;
@@ -759,7 +769,6 @@ glyphs_via_mask(struct sna *sna,
 	    ((uint32_t)width * height * format->depth < 8 * 4096 ||
 	     too_large(sna, width, height))) {
 		pixman_image_t *mask_image;
-		int s;
 
 		DBG(("%s: small mask [format=%lx, depth=%d, size=%d], rendering glyphs to upload buffer\n",
 		     __FUNCTION__, (unsigned long)format->format,
@@ -784,7 +793,6 @@ upload:
 		}
 
 		memset(pixmap->devPrivate.ptr, 0, pixmap->devKind*height);
-		s = dst->pDrawable->pScreen->myNum;
 		do {
 			int n = list->len;
 			x += list->xOff;
@@ -833,15 +841,28 @@ upload:
 				     g->info.width,
 				     g->info.height));
 
-				pixman_image_composite(PictOpAdd,
-						       glyph_image,
-						       NULL,
-						       mask_image,
-						       0, 0,
-						       0, 0,
-						       xi, yi,
-						       g->info.width,
-						       g->info.height);
+				if (list->format == format) {
+					assert(pixman_image_get_format(glyph_image) == pixman_image_get_format(mask_image));
+					pixman_image_composite(PictOpAdd,
+							       glyph_image,
+							       NULL,
+							       mask_image,
+							       0, 0,
+							       0, 0,
+							       xi, yi,
+							       g->info.width,
+							       g->info.height);
+				} else {
+					pixman_image_composite(PictOpAdd,
+							       sna->render.white_image,
+							       glyph_image,
+							       mask_image,
+							       0, 0,
+							       0, 0,
+							       xi, yi,
+							       g->info.width,
+							       g->info.height);
+				}
 
 next_image:
 				x += g->info.xOff;
@@ -914,14 +935,25 @@ next_image:
 				}
 
 				if (this_atlas != glyph_atlas) {
+					bool ok;
+
 					if (glyph_atlas)
 						tmp.done(sna, &tmp);
 
-					if (!sna->render.composite(sna, PictOpAdd,
-								   this_atlas, NULL, mask,
-								   0, 0, 0, 0, 0, 0,
-								   width, height,
-								   &tmp)) {
+					if (this_atlas->format == format->format) {
+						ok = sna->render.composite(sna, PictOpAdd,
+									   this_atlas, NULL, mask,
+									   0, 0, 0, 0, 0, 0,
+									   width, height,
+									   &tmp);
+					} else {
+						ok = sna->render.composite(sna, PictOpAdd,
+									   sna->render.white_picture, this_atlas, mask,
+									   0, 0, 0, 0, 0, 0,
+									   width, height,
+									   &tmp);
+					}
+					if (!ok) {
 						FreePicture(mask, 0);
 						return FALSE;
 					}
@@ -1046,6 +1078,7 @@ glyphs_fallback(CARD8 op,
 		GlyphListPtr list,
 		GlyphPtr *glyphs)
 {
+	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
 	pixman_image_t *dst_image, *mask_image, *src_image;
 	int dx, dy, x, y;
 	BoxRec box;
@@ -1178,16 +1211,30 @@ glyphs_fallback(CARD8 op,
 				     g->info.width,
 				     g->info.height));
 
-				pixman_image_composite(PictOpAdd,
-						       glyph_image,
-						       NULL,
-						       mask_image,
-						       dx, dy,
-						       0, 0,
-						       x - g->info.x,
-						       y - g->info.y,
-						       g->info.width,
-						       g->info.height);
+				if (list->format == mask_format) {
+					assert(pixman_image_get_format(glyph_image) == pixman_image_get_format(mask_image));
+					pixman_image_composite(PictOpAdd,
+							       glyph_image,
+							       NULL,
+							       mask_image,
+							       dx, dy,
+							       0, 0,
+							       x - g->info.x,
+							       y - g->info.y,
+							       g->info.width,
+							       g->info.height);
+				} else {
+					pixman_image_composite(PictOpAdd,
+							       sna->render.white_image,
+							       glyph_image,
+							       mask_image,
+							       dx, dy,
+							       0, 0,
+							       x - g->info.x,
+							       y - g->info.y,
+							       g->info.width,
+							       g->info.height);
+				}
 			} else {
 				int xi = x - g->info.x;
 				int yi = y - g->info.y;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 8764796..b9360c3 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -274,6 +274,8 @@ struct sna_render {
 		uint16_t count;
 		uint16_t evict;
 	} glyph[2];
+	pixman_image_t *white_image;
+	PicturePtr white_picture;
 
 	uint16_t vertex_start;
 	uint16_t vertex_index;
commit cf5b3e2ebf4ee0330f5421b9377bb512a94ec284
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 14:44:59 2012 +0100

    sna: Don't attempt to flush scanout if unattached
    
    This was lost in the midst of the OSTimer overhaul.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50393
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8f2fdea..ee2f69a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11899,7 +11899,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 	struct sna_pixmap *priv;
 
 	priv = sna_accel_scanout(sna);
-	if (priv == NULL) {
+	if (priv == NULL || priv->gpu_bo == NULL) {
 		DBG(("%s -- no scanout attached\n", __FUNCTION__));
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 		return false;
commit 3a64ba0895035f6567e9a15357396b06c0f2d6dd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 13:51:07 2012 +0100

    sna: Convert remaining drmCommands to drmIoctl
    
    This helps to silence valgrind
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 828df03..15261b6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -568,6 +568,11 @@ static bool semaphores_enabled(void)
 	return detected;
 }
 
+static bool __kgem_throttle(struct kgem *kgem)
+{
+	return drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_THROTTLE, NULL) == -EIO;
+}
+
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
@@ -579,7 +584,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->fd = fd;
 	kgem->gen = gen;
-	kgem->wedged = drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE) == -EIO;
+	kgem->wedged = __kgem_throttle(kgem);
 	kgem->wedged |= DBG_NO_HW;
 
 	kgem->batch_size = ARRAY_SIZE(kgem->batch);
@@ -1864,7 +1869,7 @@ void _kgem_submit(struct kgem *kgem)
 				       DRM_IOCTL_I915_GEM_EXECBUFFER2,
 				       &execbuf);
 			while (ret == -1 && errno == EBUSY && retry--) {
-				drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE);
+				__kgem_throttle(kgem);
 				ret = drmIoctl(kgem->fd,
 					       DRM_IOCTL_I915_GEM_EXECBUFFER2,
 					       &execbuf);
@@ -1954,8 +1959,7 @@ void kgem_throttle(struct kgem *kgem)
 {
 	static int warned;
 
-	kgem->wedged |= drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE) == -EIO;
-
+	kgem->wedged |= __kgem_throttle(kgem);
 	if (kgem->wedged && !warned) {
 		struct sna *sna = container_of(kgem, struct sna, kgem);
 		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 10f4421..f8e386b 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -334,8 +334,7 @@ static int sna_open_drm_master(ScrnInfoPtr scrn)
 	val = FALSE;
 	gp.param = I915_PARAM_HAS_BLT;
 	gp.value = &val;
-	if (drmCommandWriteRead(fd, DRM_I915_GETPARAM,
-				&gp, sizeof(gp))) {
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "Failed to detect BLT.  Kernel 2.6.37 required.\n");
 		drmClose(fd);
commit 47e2db0ba8ae3fbcdf58cba26c58ec0932b4c90b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 10:37:22 2012 +0100

    sna: Replace FREELIKE_BLOCK with MAKE_MEM_NOACCESS
    
    And similarly for MALLOCLIKE_BLOCK. The issue being that valgrind may
    overwrite such blocks with alloc-fill and free-fill values, but when in
    fact they are defined and still in use by the GPU. This can lead to the
    GPU processing garbage, and GPU hangs.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50393
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8587936..828df03 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -984,7 +984,7 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 	     __FUNCTION__, type ? "CPU" : "GTT",
 	     bo->handle, kgem->vma[type].count));
 
-	VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+	VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
 	munmap(MAP(bo->map), bytes(bo));
 	bo->map = NULL;
 
@@ -3194,7 +3194,7 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		assert(bo->map);
 		assert(bo->rq == NULL);
 
-		VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+		VG(if (type) VALGRIND_MAKE_MEM_NOACCESS(MAP(bo->map), bytes(bo)));
 		munmap(MAP(bo->map), bytes(bo));
 		bo->map = NULL;
 		list_del(&bo->vma);
@@ -3348,7 +3348,7 @@ retry:
 		return NULL;
 	}
 
-	VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, bytes(bo), 0, 1));
+	VG(VALGRIND_MAKE_MEM_DEFINED(mmap_arg.addr_ptr, bytes(bo)));
 
 	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
 	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
commit 08a630dc5ef87e551865e558fe4fc45ea66457b4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 08:50:44 2012 +0100

    sna: Ensure we flush scanout even when otherwise idle.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50477
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 72bb893..8f2fdea 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11924,11 +11924,12 @@ static bool sna_accel_do_flush(struct sna *sna)
 			return priv->cpu_damage || !__kgem_flush(&sna->kgem, priv->gpu_bo);
 		}
 	} else {
-		if (priv->cpu_damage == NULL && priv->gpu_bo->exec == NULL) {
+		if (priv->cpu_damage == NULL &&
+		    !__kgem_flush(&sna->kgem, priv->gpu_bo)) {
 			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
 		} else {
 			sna->timer_active |= 1 << FLUSH_TIMER;
-			sna->timer_ready |= 1 << FLUSH_TIMER;
+			sna->timer_ready  |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval / 2;
 			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
commit 10ca765ad346931087be146f3e6d1f4069c95f26
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 08:32:34 2012 +0100

    Mark a few more options as being UXA specific
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index 5679e0e..56f1ae9 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -12,15 +12,12 @@ const OptionInfoRec intel_options[] = {
 	{OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	1},
 	{OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	1},
+	{OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_RELAXED_FENCING,"RelaxedFencing",	OPTV_BOOLEAN,	{0},	1},
 #ifdef INTEL_XVMC
 	{OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	1},
 #endif
-	{OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, 0},
-	{OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, 0},
-	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, 0},
-	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, 0},
-	{OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	1},
-	{OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	1},
 #ifdef USE_SNA
 	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	1},
 	{OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	1},
@@ -29,6 +26,9 @@ const OptionInfoRec intel_options[] = {
 #endif
 #ifdef USE_UXA
 	{OPTION_FALLBACKDEBUG,	"FallbackDebug",OPTV_BOOLEAN,	{0},	0},
+	{OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, 0},
 	{OPTION_BUFFER_CACHE,	"BufferCache",	OPTV_BOOLEAN,   {0},    1},
 	{OPTION_SHADOW,		"Shadow",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_TRIPLE_BUFFER,	"TripleBuffer", OPTV_BOOLEAN,	{0},	1},
commit 740368c4c6eb547adad247ff529d16e594d6459b
Author: Nick Bowler <nbowler at draconx.ca>
Date:   Mon May 28 18:28:28 2012 -0400

    Include config.h in intel_options.c
    
    Commit 8a9a585341e2 ("Only create a single instance of the intel_options
    array") moved the definition of intel_options into a separate .c file.
    Several of the defined options are #ifdef'd based on the configuration,
    but since config.h is never included, the macros being tested are never
    defined.  Therefore, none of the configuration-specific options will
    ever be available at runtime, even if they should be.
    
    Add an inclusion of config.h so that such configuration-specific options
    work again.
    
    Signed-off-by: Nick Bowler <nbowler at draconx.ca>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index 56929be..5679e0e 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -1,3 +1,7 @@
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
 #include "intel_options.h"
 
 const OptionInfoRec intel_options[] = {
commit e1b720c40cf511b4ed6d387891d715522d0d54b6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 30 07:47:53 2012 +0100

    sna/sprite: Restore another xf86drm.h
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index fff31fb..8c3cdaa 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -33,6 +33,7 @@
 
 #include "intel_options.h"
 
+#include <xf86drm.h>
 #include <xf86xv.h>
 #include <X11/extensions/Xv.h>
 #include <fourcc.h>
@@ -197,9 +198,9 @@ sna_video_sprite_show(struct sna *sna,
 		set.plane_id = plane;
 		set.value = video->color_key;
 
-		if (drmCommandWrite(sna->kgem.fd,
-				    DRM_I915_SET_SPRITE_DESTKEY,
-				    &set, sizeof(set)))
+		if (drmIoctl(sna->kgem.fd,
+			     DRM_IOCTL_I915_SET_SPRITE_DESTKEY,
+			     &set))
 			xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
 				   "failed to update color key\n");
 
commit e986b06162abf286b3f8f97b791fd66dcf8f9aec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 22:21:05 2012 +0100

    sna/overlay: Handle return from GETPARAM query correctly
    
    drmIoctl() returns 0 on success, not a positive value.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index c39bbfc..945818a 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -114,7 +114,7 @@ static Bool sna_has_overlay(struct sna *sna)
 	gp.param = I915_PARAM_HAS_OVERLAY;
 	gp.value = &has_overlay;
 	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp);
-	return ret > 0 && has_overlay;
+	return ret == 0 && has_overlay;
 }
 
 static Bool sna_video_overlay_update_attrs(struct sna *sna,
commit 68874e2d694cc68f5f051ba5fe4a79a71fdb2dbc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 21:28:39 2012 +0100

    sna: Fix chunking for large stipples
    
    Reported-and-tested-by: Matti Hamalainen <ccr at tnsp.org>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49510
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9029d9f..72bb893 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9751,7 +9751,7 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 	uint32_t *b;
 
 	for (y1 = box->y1; y1 < box->y2; y1 = y2) {
-		int oy = (y1 - origin->y)  % gc->stipple->drawable.height;
+		int oy = (y1 - origin->y) % gc->stipple->drawable.height;
 
 		y2 = box->y2;
 		if (y2 - y1 > gc->stipple->drawable.height - oy)
@@ -9765,12 +9765,11 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 			ox = (x1 - origin->x) % gc->stipple->drawable.width;
 			bx1 = ox & ~7;
 			bx2 = ox + (x2 - x1);
-			if (bx2 - bx1 > gc->stipple->drawable.width) {
-				bx2 = bx1 + gc->stipple->drawable.width;
-				x2 = x1 + (bx1-ox) + gc->stipple->drawable.width;
+			if (bx2 > gc->stipple->drawable.width) {
+				bx2 = gc->stipple->drawable.width;
+				x2 = x1 + bx2-ox;
 			}
-			bx2 = (bx2 + 7) & ~7;
-			bw = (bx2 - bx1)/8;
+			bw = (bx2 - bx1 + 7)/8;
 			bw = ALIGN(bw, 2);
 			bh = y2 - y1;
 
commit 12af82581118470f59817f626d61df655950e243
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 21:45:32 2012 +0100

    sna/overlay: Replace drmCommand with direct invocations of drmIoctl
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index d7a62f3..c39bbfc 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -113,10 +113,8 @@ static Bool sna_has_overlay(struct sna *sna)
 
 	gp.param = I915_PARAM_HAS_OVERLAY;
 	gp.value = &has_overlay;
-	ret = drmCommandWriteRead(sna->kgem.fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
-
-	return !! has_overlay;
-	(void)ret;
+	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp);
+	return ret > 0 && has_overlay;
 }
 
 static Bool sna_video_overlay_update_attrs(struct sna *sna,
@@ -138,22 +136,18 @@ static Bool sna_video_overlay_update_attrs(struct sna *sna,
 	attrs.gamma4 = video->gamma4;
 	attrs.gamma5 = video->gamma5;
 
-	return drmCommandWriteRead(sna->kgem.fd, DRM_I915_OVERLAY_ATTRS,
-				  &attrs, sizeof(attrs)) == 0;
+	return drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_OVERLAY_ATTRS, &attrs) == 0;
 }
 
 static void sna_video_overlay_off(struct sna *sna)
 {
 	struct drm_intel_overlay_put_image request;
-	int ret;
 
 	DBG(("%s()\n", __FUNCTION__));
 
 	request.flags = 0;
 
-	ret = drmCommandWrite(sna->kgem.fd, DRM_I915_OVERLAY_PUT_IMAGE,
-			      &request, sizeof(request));
-	(void)ret;
+	drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, &request);
 }
 
 static void sna_video_overlay_stop(ScrnInfoPtr scrn,
@@ -448,8 +442,7 @@ sna_video_overlay_show(struct sna *sna,
 
 	DBG(("%s: flags=%x\n", __FUNCTION__, request.flags));
 
-	return drmCommandWrite(sna->kgem.fd, DRM_I915_OVERLAY_PUT_IMAGE,
-			       &request, sizeof(request)) == 0;
+	return drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_OVERLAY_PUT_IMAGE, &request) == 0;
 }
 
 static int
commit 3a1b6e57fbbbb4c8e5ac9588537840f4ec176ae6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 21:37:16 2012 +0100

    sna/overlay: Restore one xf86drm.h include
    
    This was removed in haste, when it is required for drmIoctl and friends.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index eb39a25..d7a62f3 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -31,6 +31,7 @@
 #include "sna.h"
 #include "sna_video.h"
 
+#include <xf86drm.h>
 #include <xf86xv.h>
 #include <X11/extensions/Xv.h>
 #include <fourcc.h>
commit 264d1b1e1fb368f7845a8112bfa2ad2224acabd9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 13:03:56 2012 +0100

    sna: Also check for a free exec slot for the upload buffers
    
    As we check before allocating the upload buffer, we can not be certain
    that we will allocate an already attached bo or that we have a free exec
    slot. So always check that we have an extra exec slot available - the
    false positive rate is going to be negligible.
    
    Reported-by: Zdenek Kabelac <zdenek.kabelac at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50457
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b8f0970..ee18ebe 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1792,11 +1792,8 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 105: 5))
 		return 0;
-	if (!kgem_check_exec(&sna->kgem, 1))
+	if (!kgem_check_reloc_and_exec(&sna->kgem, 1))
 		return 0;
-	if (!kgem_check_reloc(&sna->kgem, 1))
-		return 0;
-
 	if (op->need_magic_ca_pass && sna->render.vbo)
 		return 0;
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 4bc4bf3..8ab707f 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1155,9 +1155,7 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, 25))
 		return 0;
-	if (!kgem_check_exec(&sna->kgem, 1))
-		return 0;
-	if (!kgem_check_reloc(&sna->kgem, 1))
+	if (!kgem_check_reloc_and_exec(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index bb6bcf2..deff5df 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -327,6 +327,11 @@ static inline bool kgem_check_exec(struct kgem *kgem, int n)
 	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
 }
 
+static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
+{
+	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
+}
+
 static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
 						  int num_dwords,
 						  int num_surfaces)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 02c11cf..9029d9f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3041,7 +3041,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
 		    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-		    !kgem_check_reloc(&sna->kgem, 2)) {
+		    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
 		}
@@ -3170,7 +3170,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 			if (!kgem_check_batch(&sna->kgem, 12) ||
 			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-			    !kgem_check_reloc(&sna->kgem, 2)) {
+			    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
 			}
@@ -5226,7 +5226,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 
 			if (!kgem_check_batch(&sna->kgem, 8) ||
 			    !kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
-			    !kgem_check_reloc(&sna->kgem, 2)) {
+			    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
 			}
@@ -5344,7 +5344,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
 		    !kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
-		    !kgem_check_reloc(&sna->kgem, 2)) {
+		    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
 		}
@@ -9400,7 +9400,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 
 				if (!kgem_check_batch(&sna->kgem, 8) ||
 				    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-				    !kgem_check_reloc(&sna->kgem, 2)) {
+				    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 					_kgem_submit(&sna->kgem);
 					_kgem_set_mode(&sna->kgem, KGEM_BLT);
 				}
@@ -9539,7 +9539,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				} else {
 					if (!kgem_check_batch(&sna->kgem, 8) ||
 					    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-					    !kgem_check_reloc(&sna->kgem, 2)) {
+					    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
 					}
@@ -9679,7 +9679,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					} else {
 						if (!kgem_check_batch(&sna->kgem, 8) ||
 						    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-						    !kgem_check_reloc(&sna->kgem, 2)) {
+						    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
 						}
@@ -11502,7 +11502,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
 		    !kgem_check_bo_fenced(&sna->kgem, bo) ||
-		    !kgem_check_reloc(&sna->kgem, 2)) {
+		    !kgem_check_reloc_and_exec(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
 		}
commit c7bcca9a3fb6c669d8b093e6399914693770b0b3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 12:49:12 2012 +0100

    sna: Make finding DPMS property valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b8e69b5..5ed3179 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1263,41 +1263,40 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 	DBG(("%s: dpms=%d\n", __FUNCTION__, dpms));
 
 	for (i = 0; i < koutput->count_props; i++) {
-		drmModePropertyPtr props;
+		struct drm_mode_get_property prop;
 
-		props = drmModeGetProperty(sna->kgem.fd, koutput->props[i]);
-		if (!props)
+		VG_CLEAR(prop);
+		prop.prop_id = koutput->props[i];
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
 			continue;
 
-		if (!strcmp(props->name, "DPMS")) {
-			/* Record thevalue of the backlight before turning
-			 * off the display, and reset if after turnging it on.
-			 * Order is important as the kernel may record and also
-			 * reset the backlight across DPMS. Hence we need to
-			 * record the value before the kernel modifies it
-			 * and reapply it afterwards.
-			 */
-			if (dpms == DPMSModeOff)
-				sna_output_dpms_backlight(output,
-							  sna_output->dpms_mode,
-							  dpms);
-
-			drmModeConnectorSetProperty(sna->kgem.fd,
-						    sna_output->output_id,
-						    props->prop_id,
-						    dpms);
-
-			if (dpms != DPMSModeOff)
-				sna_output_dpms_backlight(output,
-							  sna_output->dpms_mode,
-							  dpms);
-
-			sna_output->dpms_mode = dpms;
-			drmModeFreeProperty(props);
-			return;
-		}
+		if (strcmp(prop.name, "DPMS"))
+			continue;
 
-		drmModeFreeProperty(props);
+		/* Record thevalue of the backlight before turning
+		 * off the display, and reset if after turnging it on.
+		 * Order is important as the kernel may record and also
+		 * reset the backlight across DPMS. Hence we need to
+		 * record the value before the kernel modifies it
+		 * and reapply it afterwards.
+		 */
+		if (dpms == DPMSModeOff)
+			sna_output_dpms_backlight(output,
+						  sna_output->dpms_mode,
+						  dpms);
+
+		drmModeConnectorSetProperty(sna->kgem.fd,
+					    sna_output->output_id,
+					    prop.prop_id,
+					    dpms);
+
+		if (dpms != DPMSModeOff)
+			sna_output_dpms_backlight(output,
+						  sna_output->dpms_mode,
+						  dpms);
+
+		sna_output->dpms_mode = dpms;
+		break;
 	}
 }
 
commit 3c074797094dc7675bb6ae549630fdf061ab5258
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 12:45:20 2012 +0100

    sna: Make GetEDID valgrind clean.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 1f67499..b8e69b5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1086,24 +1086,22 @@ sna_output_attach_edid(xf86OutputPtr output)
 
 	/* look for an EDID property */
 	for (i = 0; i < koutput->count_props; i++) {
-		drmModePropertyPtr props;
+		struct drm_mode_get_property prop;
 
-		props = drmModeGetProperty(sna->kgem.fd, koutput->props[i]);
-		if (!props)
+		VG_CLEAR(prop);
+		prop.prop_id = koutput->props[i];
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPROPERTY, &prop))
 			continue;
 
-		if (!(props->flags & DRM_MODE_PROP_BLOB)) {
-			drmModeFreeProperty(props);
+		if (!(prop.flags & DRM_MODE_PROP_BLOB))
 			continue;
-		}
 
-		if (!strcmp(props->name, "EDID")) {
-			drmModeFreePropertyBlob(edid_blob);
-			edid_blob =
-				drmModeGetPropertyBlob(sna->kgem.fd,
-						       koutput->prop_values[i]);
-		}
-		drmModeFreeProperty(props);
+		if (strcmp(prop.name, "EDID"))
+			continue;
+
+		drmModeFreePropertyBlob(edid_blob);
+		edid_blob = drmModeGetPropertyBlob(sna->kgem.fd,
+						   koutput->prop_values[i]);
 	}
 
 	if (edid_blob) {
commit 560e50c0a5afa0824086557581ca6aa5696b44a2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 12:36:31 2012 +0100

    sna: Make GetEncoder() valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ae0030d..1f67499 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -79,7 +79,6 @@ struct sna_output {
 	struct sna_mode *mode;
 	int output_id;
 	drmModeConnectorPtr mode_output;
-	drmModeEncoderPtr mode_encoder;
 	int num_props;
 	struct sna_property *props;
 	void *private_data;
@@ -1649,7 +1648,7 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	struct sna *sna = to_sna(scrn);
 	xf86OutputPtr output;
 	drmModeConnectorPtr koutput;
-	drmModeEncoderPtr kencoder;
+	struct drm_mode_get_encoder enc;
 	struct sna_output *sna_output;
 	const char *output_name;
 	const char *s;
@@ -1660,8 +1659,9 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (!koutput)
 		return;
 
-	kencoder = drmModeGetEncoder(sna->kgem.fd, koutput->encoders[0]);
-	if (!kencoder)
+	VG_CLEAR(enc);
+	enc.encoder_id = koutput->encoders[0];
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETENCODER, &enc))
 		goto cleanup_connector;
 
 	if (koutput->connector_type < ARRAY_SIZE(output_names))
@@ -1673,12 +1673,12 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (xf86IsEntityShared(scrn->entityList[0])) {
 		s = xf86GetOptValString(sna->Options, OPTION_ZAPHOD);
 		if (s && !sna_zaphod_match(s, name))
-			goto cleanup_encoder;
+			goto cleanup_connector;
 	}
 
 	output = xf86OutputCreate(scrn, &sna_output_funcs, name);
 	if (!output)
-		goto cleanup_encoder;
+		goto cleanup_connector;
 
 	sna_output = calloc(sizeof(struct sna_output), 1);
 	if (!sna_output)
@@ -1686,7 +1686,6 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 
 	sna_output->output_id = mode->mode_res->connectors[num];
 	sna_output->mode_output = koutput;
-	sna_output->mode_encoder = kencoder;
 	sna_output->mode = mode;
 
 	output->mm_width = koutput->mmWidth;
@@ -1698,8 +1697,8 @@ sna_output_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	if (is_panel(koutput->connector_type))
 		sna_output_backlight_init(output);
 
-	output->possible_crtcs = kencoder->possible_crtcs;
-	output->possible_clones = kencoder->possible_clones;
+	output->possible_crtcs = enc.possible_crtcs;
+	output->possible_clones = enc.possible_clones;
 	output->interlaceAllowed = TRUE;
 
 	sna_output->output = output;
@@ -1711,8 +1710,6 @@ cleanup_output:
 	xf86OutputDestroy(output);
 cleanup_connector:
 	drmModeFreeConnector(koutput);
-cleanup_encoder:
-	drmModeFreeEncoder(kencoder);
 }
 
 struct sna_visit_set_pixmap_window {
commit b4b6fa795201379694a03d537064dbca446cfdfd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 29 11:32:28 2012 +0100

    sna: Trim the set of includes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 3372e7e..4bc4bf3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -34,8 +34,6 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
-
 #include "sna.h"
 #include "sna_reg.h"
 #include "sna_render.h"
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index b4d9203..68c1bb7 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -34,8 +34,6 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
-
 #include "sna.h"
 #include "sna_reg.h"
 #include "sna_render.h"
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index f3b7537..d9068de 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -34,8 +34,6 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
-
 #include "sna.h"
 #include "sna_reg.h"
 #include "sna_render.h"
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 9eed660..3f31437 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -34,8 +34,6 @@
 #include "config.h"
 #endif
 
-#include <xf86.h>
-
 #include "sna.h"
 #include "sna_reg.h"
 #include "sna_render.h"
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a999441..8587936 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -39,6 +39,8 @@
 #include <errno.h>
 #include <fcntl.h>
 
+#include <xf86drm.h>
+
 #ifdef HAVE_VALGRIND
 #include <valgrind.h>
 #include <memcheck.h>
diff --git a/src/sna/sna.h b/src/sna/sna.h
index b4f2ba4..d9fd9d1 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -45,18 +45,15 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "compiler.h"
 
-#include <xf86_OSproc.h>
-#include <xf86Pci.h>
-#include <xf86Cursor.h>
-#include <xf86xv.h>
 #include <xf86Crtc.h>
-#include <xf86RandR12.h>
+#include <windowstr.h>
+#include <glyphstr.h>
+#include <picturestr.h>
 #include <gcstruct.h>
 
 #include <xorg-server.h>
 #include <pciaccess.h>
 
-#include <xf86drm.h>
 #include <xf86drmMode.h>
 
 #include "../compat-api.h"
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 3ec2d23..ae0030d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -40,7 +40,8 @@
 #include <xorgVersion.h>
 #include <X11/Xatom.h>
 #include <X11/extensions/dpmsconst.h>
-#include <xf86DDC.h>
+#include <xf86drm.h>
+#include <xf86DDC.h> /* for xf86InterpretEDID */
 
 #include "sna.h"
 #include "sna_reg.h"
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 59de4e2..c74ecc4 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -36,17 +36,16 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "config.h"
 #endif
 
-#include <xf86.h>
-#include <xf86_OSproc.h>
 #include <errno.h>
 #include <string.h>
 
-#include <i915_drm.h>
-#include <dri2.h>
-
 #include "sna.h"
 #include "sna_reg.h"
 
+#include <xf86drm.h>
+#include <i915_drm.h>
+#include <dri2.h>
+
 #if DRI2INFOREC_VERSION <= 2
 #error DRI2 version supported by the Xserver is too old
 #endif
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index af8bfe9..10f4421 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -46,6 +46,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <errno.h>
 
 #include <xf86cmap.h>
+#include <xf86drm.h>
 #include <micmap.h>
 #include <fb.h>
 
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 9c49281..8764796 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -3,6 +3,8 @@
 
 #include "compiler.h"
 
+#include <picturestr.h>
+
 #define GRADIENT_CACHE_SIZE 16
 
 struct sna;
diff --git a/src/sna/sna_transform.c b/src/sna/sna_transform.c
index 3cd9b07..644d404 100644
--- a/src/sna/sna_transform.c
+++ b/src/sna/sna_transform.c
@@ -31,7 +31,6 @@
 #include "config.h"
 #endif
 
-#include "xf86.h"
 #include "sna.h"
 
 /**
diff --git a/src/sna/sna_video.h b/src/sna/sna_video.h
index 687fbe1..7bfc971 100644
--- a/src/sna/sna_video.h
+++ b/src/sna/sna_video.h
@@ -27,8 +27,8 @@ THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #ifndef SNA_VIDEO_H
 #define SNA_VIDEO_H
 
-#include <xf86.h>
 #include <xf86_OSproc.h>
+#include <xf86xv.h>
 #include <fourcc.h>
 
 #if defined(XvMCExtension) && defined(ENABLE_XVMC)
commit 9d1a9e666763a3e1270c8f000b1b37a6b926441f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 20:06:35 2012 +0100

    sna: Bump experimental ioctl number for vmap
    
    Gazumped!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index af5b4b9..a999441 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -82,7 +82,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
 
 #if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
-#define DRM_I915_GEM_VMAP       0x2c
+#define DRM_I915_GEM_VMAP       0x2d
 #define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
 #define I915_PARAM_HAS_VMAP              19
 struct drm_i915_gem_vmap {
commit 55e6f5f220401318529e81f7c96fe0af3b893a0c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 16:21:09 2012 +0100

    sna: Use the local function for turning the cursor off prior to release
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 5d84578..3ec2d23 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -915,7 +915,7 @@ sna_crtc_destroy(xf86CrtcPtr crtc)
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 
-	drmModeSetCursor(sna->kgem.fd, crtc_id(sna_crtc), 0, 64, 64);
+	sna_crtc_hide_cursor(crtc);
 	gem_close(sna->kgem.fd, sna_crtc->cursor);
 
 	list_del(&sna_crtc->link);
commit 77dd429222922aa1ba7f283553e11e60e4d5c496
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:40:23 2012 +0100

    sna/dri: Make WAIT_VBLANK valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 2ce4e40..59de4e2 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -686,6 +686,11 @@ sna_dri_copy_region(DrawablePtr draw,
 	copy(sna, draw, region, dst, src, false);
 }
 
+static inline int sna_wait_vblank(struct sna *sna, drmVBlank *vbl)
+{
+	return drmIoctl(sna->kgem.fd, DRM_IOCTL_WAIT_VBLANK, vbl);
+}
+
 #if DRI2INFOREC_VERSION >= 4
 
 static int
@@ -1080,6 +1085,7 @@ static void sna_dri_vblank_handle(int fd,
 			if (kgem_bo_is_busy(info->bo)) {
 				drmVBlank vbl;
 
+				VG_CLEAR(vbl);
 				vbl.request.type =
 					DRM_VBLANK_RELATIVE |
 					DRM_VBLANK_EVENT;
@@ -1087,7 +1093,7 @@ static void sna_dri_vblank_handle(int fd,
 					vbl.request.type |= DRM_VBLANK_SECONDARY;
 				vbl.request.sequence = 1;
 				vbl.request.signal = (unsigned long)info;
-				if (!drmWaitVBlank(sna->kgem.fd, &vbl))
+				if (!sna_wait_vblank(sna, &vbl))
 					return;
 			}
 		}
@@ -1332,6 +1338,8 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	     (long long)divisor,
 	     (long long)remainder));
 
+	VG_CLEAR(vbl);
+
 	/* XXX In theory we can just exchange pixmaps.... */
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1)
@@ -1435,7 +1443,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		if (pipe > 0)
 			vbl.request.type |= DRM_VBLANK_SECONDARY;
 		vbl.request.sequence = 0;
-		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
+		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(info);
 			return FALSE;
 		}
@@ -1491,7 +1499,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		/* Account for 1 frame extra pageflip delay */
 		vbl.request.sequence -= 1;
 		vbl.request.signal = (unsigned long)info;
-		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
+		if (sna_wait_vblank(sna, &vbl)) {
 			sna_dri_frame_event_info_free(info);
 			return FALSE;
 		}
@@ -1559,6 +1567,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		goto blit_fallback;
 	}
 
+	VG_CLEAR(vbl);
+
 	/* Truncate to match kernel interfaces; means occasional overflow
 	 * misses, but that's generally not a big deal */
 	*target_msc &= 0xffffffff;
@@ -1607,7 +1617,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 				vbl.request.type |= DRM_VBLANK_SECONDARY;
 			vbl.request.sequence = 0;
 			vbl.request.signal = (unsigned long)info;
-			if (drmWaitVBlank(sna->kgem.fd, &vbl) == 0)
+			if (sna_wait_vblank(sna, &vbl) == 0)
 				return TRUE;
 		}
 
@@ -1621,7 +1631,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	if (pipe > 0)
 		vbl.request.type |= DRM_VBLANK_SECONDARY;
 	vbl.request.sequence = 0;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl))
+	if (sna_wait_vblank(sna, &vbl))
 		goto blit_fallback;
 
 	current_msc = vbl.reply.sequence;
@@ -1648,7 +1658,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			 vbl.request.type |= DRM_VBLANK_SECONDARY;
 		 vbl.request.sequence = *target_msc;
 		 vbl.request.signal = (unsigned long)info;
-		 if (drmWaitVBlank(sna->kgem.fd, &vbl))
+		 if (sna_wait_vblank(sna, &vbl))
 			 goto blit_fallback;
 
 		 return TRUE;
@@ -1683,7 +1693,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	vbl.request.sequence -= 1;
 
 	vbl.request.signal = (unsigned long)info;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl))
+	if (sna_wait_vblank(sna, &vbl))
 		goto blit_fallback;
 
 	*target_msc = vbl.reply.sequence;
@@ -1849,20 +1859,14 @@ sna_dri_get_msc(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
 		return TRUE;
 	}
 
+	VG_CLEAR(vbl);
+
 	vbl.request.type = DRM_VBLANK_RELATIVE;
 	if (pipe > 0)
 		vbl.request.type |= DRM_VBLANK_SECONDARY;
 	vbl.request.sequence = 0;
 
-	if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-		static int limit = 5;
-		if (limit) {
-			xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-				   "%s:%d get vblank counter failed: %s\n",
-				   __FUNCTION__, __LINE__,
-				   strerror(errno));
-			limit--;
-		}
+	if (sna_wait_vblank(sna, &vbl)) {
 		DBG(("%s: failed on pipe %d\n", __FUNCTION__, pipe));
 		return FALSE;
 	}
@@ -1906,22 +1910,15 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (pipe == -1)
 		goto out_complete;
 
+	VG_CLEAR(vbl);
+
 	/* Get current count */
 	vbl.request.type = DRM_VBLANK_RELATIVE;
 	if (pipe > 0)
 		vbl.request.type |= DRM_VBLANK_SECONDARY;
 	vbl.request.sequence = 0;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-		static int limit = 5;
-		if (limit) {
-			xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-				   "%s:%d get vblank counter failed: %s\n",
-				   __FUNCTION__, __LINE__,
-				   strerror(errno));
-			limit--;
-		}
+	if (sna_wait_vblank(sna, &vbl))
 		goto out_complete;
-	}
 
 	current_msc = vbl.reply.sequence;
 
@@ -1961,17 +1958,8 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 			vbl.request.type |= DRM_VBLANK_SECONDARY;
 		vbl.request.sequence = target_msc;
 		vbl.request.signal = (unsigned long)info;
-		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-			static int limit = 5;
-			if (limit) {
-				xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-					   "%s:%d get vblank counter failed: %s\n",
-					   __FUNCTION__, __LINE__,
-					   strerror(errno));
-				limit--;
-			}
+		if (sna_wait_vblank(sna, &vbl))
 			goto out_free_info;
-		}
 
 		info->frame = vbl.reply.sequence;
 		DRI2BlockClient(client, draw);
@@ -1998,17 +1986,8 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 		vbl.request.sequence += divisor;
 
 	vbl.request.signal = (unsigned long)info;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-		static int limit = 5;
-		if (limit) {
-			xf86DrvMsg(sna->scrn->scrnIndex, X_WARNING,
-				   "%s:%d get vblank counter failed: %s\n",
-				   __FUNCTION__, __LINE__,
-				   strerror(errno));
-			limit--;
-		}
+	if (sna_wait_vblank(sna, &vbl))
 		goto out_free_info;
-	}
 
 	info->frame = vbl.reply.sequence;
 	DRI2BlockClient(client, draw);
commit 8ebe84818ab70e662a9bb3f232b4664ff40ac375
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:27:46 2012 +0100

    sna: Make sna_copy_fbcon() valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index e0b9ee5..5d84578 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -554,7 +554,7 @@ sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 }
 
 static struct kgem_bo *sna_create_bo_for_fbcon(struct sna *sna,
-					       drmModeFBPtr fbcon)
+					       const struct drm_mode_fb_cmd *fbcon)
 {
 	struct drm_gem_flink flink;
 	struct kgem_bo *bo;
@@ -584,7 +584,7 @@ static struct kgem_bo *sna_create_bo_for_fbcon(struct sna *sna,
 void sna_copy_fbcon(struct sna *sna)
 {
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
-	drmModeFBPtr fbcon;
+	struct drm_mode_fb_cmd fbcon;
 	PixmapPtr scratch;
 	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
@@ -600,22 +600,27 @@ void sna_copy_fbcon(struct sna *sna)
 	DBG(("%s\n", __FUNCTION__));
 
 	/* Scan the connectors for a framebuffer and assume that is the fbcon */
-	fbcon = NULL;
-	for (i = 0; fbcon == NULL && i < xf86_config->num_crtc; i++) {
+	VG_CLEAR(fbcon);
+	fbcon.fb_id = 0;
+	for (i = 0; i < xf86_config->num_crtc; i++) {
 		struct sna_crtc *crtc = xf86_config->crtc[i]->driver_private;
-		drmModeCrtcPtr mode_crtc;
+		struct drm_mode_crtc mode;
 
-		mode_crtc = drmModeGetCrtc(sna->kgem.fd,
-					   sna->mode.mode_res->crtcs[crtc->num]);
-		if (mode_crtc == NULL)
+		VG_CLEAR(mode);
+		mode.crtc_id = crtc->id;
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
+			continue;
+		if (!mode.fb_id)
 			continue;
 
-		if (mode_crtc->buffer_id)
-			fbcon = drmModeGetFB(sna->kgem.fd,
-					     mode_crtc->buffer_id);
-		drmModeFreeCrtc(mode_crtc);
+		fbcon.fb_id = mode.fb_id;
+		if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETFB, &fbcon)) {
+			fbcon.fb_id = 0;
+			continue;
+		}
+		break;
 	}
-	if (fbcon == NULL) {
+	if (fbcon.fb_id == 0) {
 		DBG(("%s: no fbcon found\n", __FUNCTION__));
 		return;
 	}
@@ -625,17 +630,17 @@ void sna_copy_fbcon(struct sna *sna)
 	 * across a depth change upon starting X.
 	 */
 	scratch = GetScratchPixmapHeader(sna->scrn->pScreen,
-					fbcon->width, fbcon->height,
-					fbcon->depth, fbcon->bpp,
+					fbcon.width, fbcon.height,
+					fbcon.depth, fbcon.bpp,
 					0, NULL);
 	if (scratch == NullPixmap)
-		goto cleanup_fbcon;
+		return;
 
 	box.x1 = box.y1 = 0;
-	box.x2 = min(fbcon->width, sna->front->drawable.width);
-	box.y2 = min(fbcon->height, sna->front->drawable.height);
+	box.x2 = min(fbcon.width, sna->front->drawable.width);
+	box.y2 = min(fbcon.height, sna->front->drawable.height);
 
-	bo = sna_create_bo_for_fbcon(sna, fbcon);
+	bo = sna_create_bo_for_fbcon(sna, &fbcon);
 	if (bo == NULL)
 		goto cleanup_scratch;
 
@@ -645,14 +650,14 @@ void sna_copy_fbcon(struct sna *sna)
 	assert(priv && priv->gpu_bo);
 
 	sx = dx = 0;
-	if (box.x2 < (uint16_t)fbcon->width)
-		sx = (fbcon->width - box.x2) / 2.;
+	if (box.x2 < (uint16_t)fbcon.width)
+		sx = (fbcon.width - box.x2) / 2.;
 	if (box.x2 < sna->front->drawable.width)
 		dx = (sna->front->drawable.width - box.x2) / 2.;
 
 	sy = dy = 0;
-	if (box.y2 < (uint16_t)fbcon->height)
-		sy = (fbcon->height - box.y2) / 2.;
+	if (box.y2 < (uint16_t)fbcon.height)
+		sy = (fbcon.height - box.y2) / 2.;
 	if (box.y2 < sna->front->drawable.height)
 		dy = (sna->front->drawable.height - box.y2) / 2.;
 
@@ -669,8 +674,6 @@ void sna_copy_fbcon(struct sna *sna)
 
 cleanup_scratch:
 	FreeScratchPixmapHeader(scratch);
-cleanup_fbcon:
-	drmModeFreeFB(fbcon);
 }
 
 static void update_flush_interval(struct sna *sna)
commit f37dae734ebac5c0ed2d6f50fc92bc107f4e4a33
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:20:54 2012 +0100

    sna: Just use the CRTC id from the array
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 8ca8efb..e0b9ee5 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -971,7 +971,6 @@ static void
 sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 {
 	struct sna *sna = to_sna(scrn);
-	drmModeCrtcPtr mode_crtc;
 	xf86CrtcPtr crtc;
 	struct sna_crtc *sna_crtc;
 	struct drm_i915_get_pipe_from_crtc_id get_pipe;
@@ -983,10 +982,7 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 		return;
 
 	sna_crtc->num = num;
-
-	mode_crtc = drmModeGetCrtc(sna->kgem.fd, mode->mode_res->crtcs[num]);
-	sna_crtc->id = mode_crtc->crtc_id;
-	drmModeFreeCrtc(mode_crtc);
+	sna_crtc->id = mode->mode_res->crtcs[num];
 
 	VG_CLEAR(get_pipe);
 	get_pipe.pipe = 0;
commit b1f24a0eae4bb0081ff7469a2aee63a1f32140f7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:16:03 2012 +0100

    sna: Make sna_crtc_apply() valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9aade61..8ca8efb 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -54,7 +54,7 @@
 
 struct sna_crtc {
 	struct sna *sna;
-	drmModeModeInfo kmode;
+	struct drm_mode_modeinfo kmode;
 	PixmapPtr shadow;
 	uint32_t shadow_fb_id;
 	uint32_t cursor;
@@ -371,7 +371,7 @@ mode_from_kmode(ScrnInfoPtr scrn,
 }
 
 static void
-mode_to_kmode(drmModeModeInfoPtr kmode, DisplayModePtr mode)
+mode_to_kmode(struct drm_mode_modeinfo *kmode, DisplayModePtr mode)
 {
 	memset(kmode, 0, sizeof(*kmode));
 
@@ -415,6 +415,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
 	struct sna_mode *mode = &sna->mode;
+	struct drm_mode_crtc arg;
 	uint32_t output_ids[16];
 	int output_count = 0;
 	int fb_id, x, y;
@@ -468,9 +469,16 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	     fb_id, sna_crtc->shadow_fb_id ? " [shadow]" : "",
 	     output_count));
 
-	ret = drmModeSetCrtc(sna->kgem.fd, crtc_id(sna_crtc),
-			     fb_id, x, y, output_ids, output_count,
-			     &sna_crtc->kmode);
+	VG_CLEAR(arg);
+	arg.x = x;
+	arg.y = y;
+	arg.crtc_id = sna_crtc->id;
+	arg.fb_id = fb_id;
+	arg.set_connectors_ptr = (uintptr_t)output_ids;
+	arg.count_connectors = output_count;
+	arg.mode = sna_crtc->kmode;
+	arg.mode_valid = 1;
+	ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_SETCRTC, &arg);
 	if (ret) {
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(-ret));
commit 0ebfcdbb80245f98c0c7ef9f090a2be6900fb457
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:10:04 2012 +0100

    sna: Make AddFB valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 2d6cba4..9aade61 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -157,6 +157,7 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 		       int width, int height)
 {
 	ScrnInfoPtr scrn = sna->scrn;
+	struct drm_mode_fb_cmd arg;
 	int ret;
 
 	assert(bo->proxy == NULL);
@@ -170,21 +171,25 @@ static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
 	     __FUNCTION__, width, height, scrn->depth, scrn->bitsPerPixel));
 
 	assert(bo->tiling != I915_TILING_Y);
-	ret = drmModeAddFB(sna->kgem.fd,
-			   width, height,
-			   scrn->depth, scrn->bitsPerPixel,
-			   bo->pitch, bo->handle,
-			   &bo->delta);
-	if (ret < 0) {
-		ErrorF("%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
-		       __FUNCTION__,
-		       width, height,
-		       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+
+	VG_CLEAR(arg);
+	arg.width = width;
+	arg.height = height;
+	arg.pitch = bo->pitch;
+	arg.bpp = scrn->bitsPerPixel;
+	arg.depth = scrn->depth;
+	arg.handle = bo->handle;
+
+	if ((ret = drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_ADDFB, &arg))) {
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			   "%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d: %d\n",
+			   __FUNCTION__, width, height,
+			   scrn->depth, scrn->bitsPerPixel, bo->pitch, ret);
 		return 0;
 	}
 
 	bo->scanout = true;
-	return bo->delta;
+	return bo->delta = arg.fb_id;
 }
 
 static uint32_t gem_create(int fd, int size)
commit 32ca3c7b65516b0bd48b0d0979b086ee1580167a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:02:11 2012 +0100

    sna: Make sna_crtc_is_bound() valgrind clean
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ec0e03d..2d6cba4 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -393,6 +393,7 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 {
 	struct drm_mode_crtc mode;
 
+	VG_CLEAR(mode);
 	mode.crtc_id = crtc_id(crtc->driver_private);
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
 		return false;
commit 7973f6751c9bf565dee4c89aa3e1badbcc45018f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 14:00:55 2012 +0100

    sna: Add a little bit more verbosity to cursor routines for valgrind
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a218d83..ec0e03d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -748,54 +748,78 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 }
 
 static void
-sna_crtc_set_cursor_colors(xf86CrtcPtr crtc, int bg, int fg)
+sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 {
+	struct sna *sna = to_sna(crtc->scrn);
+	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct drm_mode_cursor arg;
+
+	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
+
+	VG_CLEAR(arg);
+	arg.flags = DRM_MODE_CURSOR_BO;
+	arg.crtc_id = sna_crtc->id;
+	arg.width = arg.height = 64;
+	arg.handle = 0;
 
+	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
-sna_crtc_set_cursor_position (xf86CrtcPtr crtc, int x, int y)
+sna_crtc_show_cursor(xf86CrtcPtr crtc)
 {
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct drm_mode_cursor arg;
+
+	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
 
-	drmModeMoveCursor(sna->kgem.fd, crtc_id(sna_crtc), x, y);
+	VG_CLEAR(arg);
+	arg.flags = DRM_MODE_CURSOR_BO;
+	arg.crtc_id = sna_crtc->id;
+	arg.width = arg.height = 64;
+	arg.handle = sna_crtc->cursor;
+
+	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
-sna_crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 *image)
+sna_crtc_set_cursor_colors(xf86CrtcPtr crtc, int bg, int fg)
 {
-	struct sna *sna = to_sna(crtc->scrn);
-	struct sna_crtc *sna_crtc = crtc->driver_private;
-	struct drm_i915_gem_pwrite pwrite;
-
-	VG_CLEAR(pwrite);
-	pwrite.handle = sna_crtc->cursor;
-	pwrite.offset = 0;
-	pwrite.size = 64*64*4;
-	pwrite.data_ptr = (uintptr_t)image;
-	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
 }
 
 static void
-sna_crtc_hide_cursor(xf86CrtcPtr crtc)
+sna_crtc_set_cursor_position(xf86CrtcPtr crtc, int x, int y)
 {
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct drm_mode_cursor arg;
 
-	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
-	drmModeSetCursor(sna->kgem.fd, crtc_id(sna_crtc), 0, 64, 64);
+	DBG(("%s: CRTC:%d (%d, %d)\n", __FUNCTION__, crtc_id(sna_crtc), x, y));
+
+	VG_CLEAR(arg);
+	arg.flags = DRM_MODE_CURSOR_MOVE;
+	arg.crtc_id = sna_crtc->id;
+	arg.x = x;
+	arg.y = y;
+	arg.handle = sna_crtc->cursor;
+
+	drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_CURSOR, &arg);
 }
 
 static void
-sna_crtc_show_cursor(xf86CrtcPtr crtc)
+sna_crtc_load_cursor_argb(xf86CrtcPtr crtc, CARD32 *image)
 {
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
+	struct drm_i915_gem_pwrite pwrite;
 
-	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
-	drmModeSetCursor(sna->kgem.fd, crtc_id(sna_crtc),
-			 sna_crtc->cursor, 64, 64);
+	VG_CLEAR(pwrite);
+	pwrite.handle = sna_crtc->cursor;
+	pwrite.offset = 0;
+	pwrite.size = 64*64*4;
+	pwrite.data_ptr = (uintptr_t)image;
+	(void)drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
 }
 
 static void *
commit 19c463e52df919fc75de7e420fd3565f0e9a0576
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 12:54:35 2012 +0100

    sna: Only wait if there is a suitable active buffer
    
    There is not point repeating the search after retiring if we know that
    there is no outstanding suitable active buffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0ef160c..af5b4b9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2837,7 +2837,9 @@ search_inactive:
 		return bo;
 	}
 
-	if (flags & CREATE_INACTIVE && __kgem_throttle_retire(kgem, flags)) {
+	if (flags & CREATE_INACTIVE &&
+	    !list_is_empty(&kgem->active[bucket][tiling]) &&
+	    __kgem_throttle_retire(kgem, flags)) {
 		flags &= ~CREATE_INACTIVE;
 		goto search_inactive;
 	}
commit 5b99c7cd340f782d3057d4257865c5feb96b71f0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 10:55:49 2012 +0100

    uxa/dri: Enable vblank scheduling even with pageflipping disabled
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 34d00b6..3261e54 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -1685,15 +1685,13 @@ Bool I830DRI2ScreenInit(ScreenPtr screen)
 
 	info.CopyRegion = I830DRI2CopyRegion;
 #if DRI2INFOREC_VERSION >= 4
-	if (intel->use_pageflipping) {
-	    info.version = 4;
-	    info.ScheduleSwap = I830DRI2ScheduleSwap;
-	    info.GetMSC = I830DRI2GetMSC;
-	    info.ScheduleWaitMSC = I830DRI2ScheduleWaitMSC;
-	    info.numDrivers = 1;
-	    info.driverNames = driverNames;
-	    driverNames[0] = info.driverName;
-	}
+	info.version = 4;
+	info.ScheduleSwap = I830DRI2ScheduleSwap;
+	info.GetMSC = I830DRI2GetMSC;
+	info.ScheduleWaitMSC = I830DRI2ScheduleWaitMSC;
+	info.numDrivers = 1;
+	info.driverNames = driverNames;
+	driverNames[0] = info.driverName;
 #endif
 
 	return DRI2ScreenInit(screen, &info);
commit 810357ad65d551ec5d35dbf228f1b62fe235801f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 10:12:38 2012 +0100

    uxa/dri: Fix ordering of setting vs using swap members
    
    Trivial readibility fix, as the actual ordering is serialised through
    there being only a single thread.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 36e96ff..34d00b6 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -933,12 +933,12 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 	int tmp_name;
 
 	if (!intel->use_triple_buffer) {
+		info->type = DRI2_SWAP;
 		if (!intel_do_pageflip(intel,
 				       intel_get_pixmap_bo(priv->pixmap),
 				       info, info->pipe))
 			return FALSE;
 
-		info->type = DRI2_SWAP;
 		I830DRI2ExchangeBuffers(intel, info->front, info->back);
 		return TRUE;
 	}
commit 2b56a188ef2c48f82572eca2201a3a0ecf6b4f45
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 10:01:09 2012 +0100

    Mark another couple of options as being UXA only.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_options.c b/src/intel_options.c
index 80572be..56929be 100644
--- a/src/intel_options.c
+++ b/src/intel_options.c
@@ -7,9 +7,7 @@ const OptionInfoRec intel_options[] = {
 	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	0},
 	{OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	1},
 	{OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	0},
-	{OPTION_SHADOW,	"Shadow",	OPTV_BOOLEAN,	{0},	0},
 	{OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	1},
-	{OPTION_TRIPLE_BUFFER, "TripleBuffer", OPTV_BOOLEAN,	{0},	1},
 #ifdef INTEL_XVMC
 	{OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	1},
 #endif
@@ -26,8 +24,10 @@ const OptionInfoRec intel_options[] = {
 	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	1},
 #endif
 #ifdef USE_UXA
-	{OPTION_FALLBACKDEBUG, "FallbackDebug", OPTV_BOOLEAN, {0},	0},
-	{OPTION_BUFFER_CACHE,       "BufferCache",  OPTV_BOOLEAN,   {0},    1},
+	{OPTION_FALLBACKDEBUG,	"FallbackDebug",OPTV_BOOLEAN,	{0},	0},
+	{OPTION_BUFFER_CACHE,	"BufferCache",	OPTV_BOOLEAN,   {0},    1},
+	{OPTION_SHADOW,		"Shadow",	OPTV_BOOLEAN,	{0},	0},
+	{OPTION_TRIPLE_BUFFER,	"TripleBuffer", OPTV_BOOLEAN,	{0},	1},
 #endif
 	{-1,			NULL,		OPTV_NONE,	{0},	0}
 };
commit b025f1a604dbbbd456a23f330ceed5f97ecdffcf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 09:48:49 2012 +0100

    sna/dri: Tweak vblank_mode=n swapbuffers to account for throttle delay
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 0f582b5..2ce4e40 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1665,26 +1665,22 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		     (int)*target_msc,
 		     (int)divisor));
 
-	vbl.request.type = DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT;
-	vbl.request.type |= DRM_VBLANK_NEXTONMISS;
+	vbl.request.type =
+		DRM_VBLANK_ABSOLUTE | DRM_VBLANK_EVENT | DRM_VBLANK_NEXTONMISS;
 	if (pipe > 0)
 		vbl.request.type |= DRM_VBLANK_SECONDARY;
 
 	vbl.request.sequence = current_msc - current_msc % divisor + remainder;
-
 	/*
 	 * If the calculated deadline vbl.request.sequence is smaller than
 	 * or equal to current_msc, it means we've passed the last point
 	 * when effective onset frame seq could satisfy
 	 * seq % divisor == remainder, so we need to wait for the next time
 	 * this will happen.
-	 *
-	 * This comparison takes the 1 frame swap delay in pageflipping mode
-	 * into account, as well as a potential DRM_VBLANK_NEXTONMISS delay
-	 * if we are blitting/exchanging instead of flipping.
 	 */
-	if (vbl.request.sequence <= current_msc)
+	if (vbl.request.sequence < current_msc)
 		vbl.request.sequence += divisor;
+	vbl.request.sequence -= 1;
 
 	vbl.request.signal = (unsigned long)info;
 	if (drmWaitVBlank(sna->kgem.fd, &vbl))
commit 45148a714f1461fff898aeb393ad225f081b9bd5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 09:39:51 2012 +0100

    sna/dri: Enable handling for Option "SwapBuffersWait"
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 92614d0..b4f2ba4 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -208,6 +208,8 @@ struct sna {
 	unsigned flags;
 #define SNA_NO_THROTTLE		0x1
 #define SNA_NO_DELAYED_FLUSH	0x2
+#define SNA_NO_WAIT		0x4
+#define SNA_NO_FLIP		0x8
 
 	unsigned watch_flush;
 	unsigned flush;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index ee509c0..0f582b5 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -448,7 +448,8 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 			return NULL;
 		}
 
-		if (pixmap == sna->front && sync) {
+		if (pixmap == sna->front && sync &&
+		    (sna->flags & SNA_NO_WAIT) == 0) {
 			BoxRec crtc_box;
 
 			crtc = sna_covering_crtc(sna->scrn, &region->extents,
@@ -952,6 +953,11 @@ can_flip(struct sna * sna,
 		return FALSE;
 	}
 
+	if (sna->flags & SNA_NO_FLIP) {
+		DBG(("%s: no, pageflips disabled\n", __FUNCTION__));
+		return FALSE;
+	}
+
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
 		     __FUNCTION__, front->format, back->format));
@@ -1591,21 +1597,22 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 						 get_private(front)->bo,
 						 get_private(back)->bo,
 						 true);
-
-		info->type = DRI2_SWAP_THROTTLE;
-		vbl.request.type =
-			DRM_VBLANK_RELATIVE |
-			DRM_VBLANK_NEXTONMISS |
-			DRM_VBLANK_EVENT;
-		if (pipe > 0)
-			vbl.request.type |= DRM_VBLANK_SECONDARY;
-		vbl.request.sequence = 0;
-		vbl.request.signal = (unsigned long)info;
-		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-			sna_dri_frame_event_info_free(info);
-			DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
+		if ((sna->flags & SNA_NO_WAIT) == 0) {
+			info->type = DRI2_SWAP_THROTTLE;
+			vbl.request.type =
+				DRM_VBLANK_RELATIVE |
+				DRM_VBLANK_NEXTONMISS |
+				DRM_VBLANK_EVENT;
+			if (pipe > 0)
+				vbl.request.type |= DRM_VBLANK_SECONDARY;
+			vbl.request.sequence = 0;
+			vbl.request.signal = (unsigned long)info;
+			if (drmWaitVBlank(sna->kgem.fd, &vbl) == 0)
+				return TRUE;
 		}
 
+		sna_dri_frame_event_info_free(info);
+		DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
 		return TRUE;
 	}
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 39e67c4..af8bfe9 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -62,6 +62,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <sys/poll.h>
 #include "i915_drm.h"
 
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#endif
+
 #if HAVE_DOT_GIT
 #include "git_version.h"
 #endif
@@ -372,6 +377,23 @@ static void sna_selftest(void)
 	sna_damage_selftest();
 }
 
+static bool has_pageflipping(struct sna *sna)
+{
+	drm_i915_getparam_t gp;
+	int v;
+
+	if (sna->flags & SNA_NO_WAIT)
+		return false;
+
+	VG_CLEAR(gp);
+	gp.param = I915_PARAM_HAS_PAGEFLIPPING;
+	gp.value = &v;
+	drmIoctl(sna->kgem.fd, DRM_IOCTL_I915_GETPARAM, &gp);
+
+	VG(VALGRIND_MAKE_MEM_DEFINED(&v, sizeof(v)));
+	return v > 0;
+}
+
 /**
  * This is called before ScreenInit to do any require probing of screen
  * configuration.
@@ -500,6 +522,10 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		sna->flags |= SNA_NO_THROTTLE;
 	if (!xf86ReturnOptValBool(sna->Options, OPTION_DELAYED_FLUSH, TRUE))
 		sna->flags |= SNA_NO_DELAYED_FLUSH;
+	if (!xf86ReturnOptValBool(sna->Options, OPTION_SWAPBUFFERS_WAIT, TRUE))
+		sna->flags |= SNA_NO_WAIT;
+	if (!has_pageflipping(sna))
+		sna->flags |= SNA_NO_FLIP;
 
 	xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "Framebuffer %s\n",
 		   sna->tiling & SNA_TILING_FB ? "tiled" : "linear");
commit c709f2447dfc6dc36c50ff741d5d9bbdc7c03b58
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 08:17:21 2012 +0100

    sna/dri: Requeue vblank throttling until the vsync'ed copy completes
    
    If the GPU is busy, then we may not actually schedule our copy for
    several vblanks, resulting in us falsely reporting that the work
    completed too early and allowing the client to continue scheduling more
    work and racing ahead of the queued copies.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 9d2fd99..0ef160c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1382,17 +1382,18 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			}
 		}
 
-		rq->bo->refcnt--;
-		assert(rq->bo->refcnt == 0);
 		assert(rq->bo->rq == NULL);
 		assert(list_is_empty(&rq->bo->request));
-		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
-			kgem_bo_move_to_inactive(kgem, rq->bo);
-			retired = true;
-		} else {
-			DBG(("%s: closing %d\n",
-			     __FUNCTION__, rq->bo->handle));
-			kgem_bo_free(kgem, rq->bo);
+
+		if (--rq->bo->refcnt == 0) {
+			if (kgem_bo_set_purgeable(kgem, rq->bo)) {
+				kgem_bo_move_to_inactive(kgem, rq->bo);
+				retired = true;
+			} else {
+				DBG(("%s: closing %d\n",
+				     __FUNCTION__, rq->bo->handle));
+				kgem_bo_free(kgem, rq->bo);
+			}
 		}
 
 		if (kgem->sync == rq)
@@ -4221,3 +4222,17 @@ kgem_replace_bo(struct kgem *kgem,
 
 	return dst;
 }
+
+struct kgem_bo *kgem_get_last_request(struct kgem *kgem)
+{
+	struct kgem_request *rq;
+
+	if (list_is_empty(&kgem->requests))
+		return NULL;
+
+	rq = list_last_entry(&kgem->requests,
+			     struct kgem_request,
+			     list);
+
+	return kgem_bo_reference(rq->bo);
+}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index fdabfae..bb6bcf2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -245,6 +245,7 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
 
 void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
+struct kgem_bo *kgem_get_last_request(struct kgem *kgem);
 
 void _kgem_submit(struct kgem *kgem);
 static inline void kgem_submit(struct kgem *kgem)
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 632c57d..ee509c0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -90,6 +90,7 @@ struct sna_dri_frame_event {
 	void *event_data;
 	DRI2BufferPtr front;
 	DRI2BufferPtr back;
+	struct kgem_bo *bo;
 
 	unsigned int fe_frame;
 	unsigned int fe_tv_sec;
@@ -395,13 +396,14 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	priv->gpu_bo = ref(bo);
 }
 
-static void
+static struct kgem_bo *
 sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		      struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
 		      bool sync)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(draw);
 	pixman_region16_t clip;
+	struct kgem_bo *bo = NULL;
 	bool flush = false;
 	xf86CrtcPtr crtc;
 	BoxRec box, *boxes;
@@ -421,7 +423,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 		if (!pixman_region_not_empty(region)) {
 			DBG(("%s: all clipped\n", __FUNCTION__));
-			return;
+			return NULL;
 		}
 	}
 
@@ -443,7 +445,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		pixman_region_intersect(region, &win->clipList, region);
 		if (!pixman_region_not_empty(region)) {
 			DBG(("%s: all clipped\n", __FUNCTION__));
-			return;
+			return NULL;
 		}
 
 		if (pixmap == sna->front && sync) {
@@ -495,6 +497,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	if (flush) { /* STAT! */
 		assert(sna_crtc_is_bound(sna, crtc));
 		kgem_submit(&sna->kgem);
+		bo = kgem_get_last_request(&sna->kgem);
 	}
 
 	pixman_region_translate(region, dx, dy);
@@ -503,6 +506,8 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	if (region == &clip)
 		pixman_region_fini(&clip);
+
+	return bo;
 }
 
 static void
@@ -650,7 +655,7 @@ sna_dri_copy_region(DrawablePtr draw,
 
 	if (dst_buffer->attachment == DRI2BufferFrontLeft) {
 		dst = sna_pixmap_get_bo(pixmap);
-		copy = sna_dri_copy_to_front;
+		copy = (void *)sna_dri_copy_to_front;
 	} else
 		dst = get_private(dst_buffer)->bo;
 
@@ -884,6 +889,9 @@ sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 		sna_dri_frame_event_release_bo(&info->sna->kgem,
 					       info->cache.bo);
 
+	if (info->bo)
+		kgem_bo_destroy(&info->sna->kgem, info->bo);
+
 	free(info);
 }
 
@@ -1052,13 +1060,32 @@ static void sna_dri_vblank_handle(int fd,
 		}
 		/* else fall through to blit */
 	case DRI2_SWAP:
-		sna_dri_copy_to_front(sna, draw, NULL,
-				      get_private(info->front)->bo,
-				      get_private(info->back)->bo,
-				      true);
+		info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+						 get_private(info->front)->bo,
+						 get_private(info->back)->bo,
+						 true);
+		info->type = DRI2_SWAP_THROTTLE;
 	case DRI2_SWAP_THROTTLE:
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
 		     __FUNCTION__, info->type, frame, tv_sec, tv_usec));
+
+		if (info->bo && kgem_bo_is_busy(info->bo)) {
+			kgem_retire(&sna->kgem);
+			if (kgem_bo_is_busy(info->bo)) {
+				drmVBlank vbl;
+
+				vbl.request.type =
+					DRM_VBLANK_RELATIVE |
+					DRM_VBLANK_EVENT;
+				if (info->pipe > 0)
+					vbl.request.type |= DRM_VBLANK_SECONDARY;
+				vbl.request.sequence = 1;
+				vbl.request.signal = (unsigned long)info;
+				if (!drmWaitVBlank(sna->kgem.fd, &vbl))
+					return;
+			}
+		}
+
 		DRI2SwapComplete(info->client,
 				 draw, frame,
 				 tv_sec, tv_usec,
@@ -1560,26 +1587,26 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		DBG(("%s: emitting immediate vsync'ed blit, throttling client\n",
 		     __FUNCTION__));
 
-		 info->type = DRI2_SWAP_THROTTLE;
+		info->bo = sna_dri_copy_to_front(sna, draw, NULL,
+						 get_private(front)->bo,
+						 get_private(back)->bo,
+						 true);
 
-		 vbl.request.type =
-			 DRM_VBLANK_RELATIVE |
-			 DRM_VBLANK_EVENT |
-			 DRM_VBLANK_NEXTONMISS;
-		 if (pipe > 0)
-			 vbl.request.type |= DRM_VBLANK_SECONDARY;
-		 vbl.request.sequence = 0;
-		 vbl.request.signal = (unsigned long)info;
-		 if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-			 sna_dri_frame_event_info_free(info);
-			 DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
-		 }
-
-		 sna_dri_copy_to_front(sna, draw, NULL,
-				       get_private(front)->bo,
-				       get_private(back)->bo,
-				       true);
-		 return TRUE;
+		info->type = DRI2_SWAP_THROTTLE;
+		vbl.request.type =
+			DRM_VBLANK_RELATIVE |
+			DRM_VBLANK_NEXTONMISS |
+			DRM_VBLANK_EVENT;
+		if (pipe > 0)
+			vbl.request.type |= DRM_VBLANK_SECONDARY;
+		vbl.request.sequence = 0;
+		vbl.request.signal = (unsigned long)info;
+		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
+			sna_dri_frame_event_info_free(info);
+			DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
+		}
+
+		return TRUE;
 	}
 
 	/* Get current count */
commit 7e73fa02ed361a9c0c08f61d00421671bf10ce9f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 28 08:16:43 2012 +0100

    sna: Add some debugging to show count of outstanding requests during retire
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 00ef82d..9d2fd99 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1306,6 +1306,14 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 				kgem_bo_free(kgem, bo);
 		}
 	}
+#if DEBUG_KGEM
+	{
+		int count = 0;
+		list_for_each_entry(bo, &kgem->flushing, request)
+			count++;
+		ErrorF("%s: %d bo on flushing list\n", __FUNCTION__, count);
+	}
+#endif
 
 	return retired;
 }
@@ -1394,6 +1402,24 @@ static bool kgem_retire__requests(struct kgem *kgem)
 		free(rq);
 	}
 
+#if DEBUG_KGEM
+	{
+		int count = 0;
+
+		list_for_each_entry(bo, &kgem->requests, request)
+			count++;
+
+		bo = NULL;
+		if (!list_is_empty(&kgem->requests))
+			bo = list_first_entry(&kgem->requests,
+					      struct kgem_request,
+					      list)->bo;
+
+		ErrorF("%s: %d outstanding requests, oldest=%d\n",
+		       __FUNCTION__, count, bo ? bo->handle : 0);
+	}
+#endif
+
 	return retired;
 }
 
commit 62b557065edc0555f2bf83b0eed9169329a2f2ba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 27 16:52:51 2012 +0100

    sna: Use magic upload buffers for video textures
    
    So that we may benefit from the caching of buffers and the automatic
    selection  of the preferred upload method.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 1f6e210..ce1e284 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -196,6 +196,7 @@ sna_video_frame_init(struct sna *sna,
 {
 	int align;
 
+	frame->bo = NULL;
 	frame->id = id;
 	frame->width = width;
 	frame->height = height;
@@ -444,12 +445,9 @@ sna_video_copy_data(struct sna *sna,
 {
 	uint8_t *dst;
 
-	if (frame->bo == NULL)
-		return FALSE;
-
 	DBG(("%s: handle=%d, size=%dx%d, rotation=%d\n",
-	     __FUNCTION__, frame->bo->handle, frame->width, frame->height,
-	     video->rotation));
+	     __FUNCTION__, frame->bo ? frame->bo->handle : 0,
+	     frame->width, frame->height, video->rotation));
 	DBG(("%s: top=%d, left=%d\n", __FUNCTION__, frame->top, frame->left));
 
 	/* In the common case, we can simply the upload in a single pwrite */
@@ -462,10 +460,22 @@ sna_video_copy_data(struct sna *sna,
 			if (pitch[0] == frame->pitch[0] &&
 			    pitch[1] == frame->pitch[1] &&
 			    frame->top == 0 && frame->left == 0) {
-				kgem_bo_write(&sna->kgem, frame->bo,
-					      buf,
-					      pitch[1]*frame->height +
-					      pitch[0]*frame->height);
+				if (frame->bo) {
+					kgem_bo_write(&sna->kgem, frame->bo,
+						      buf,
+						      pitch[1]*frame->height +
+						      pitch[0]*frame->height);
+				} else {
+					frame->bo = kgem_create_buffer(&sna->kgem, frame->size,
+								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
+								       (void **)&dst);
+					if (frame->bo == NULL)
+						return FALSE;
+
+					memcpy(dst, buf,
+					       pitch[1]*frame->height +
+					       pitch[0]*frame->height);
+				}
 				if (frame->id != FOURCC_I420) {
 					uint32_t tmp;
 					tmp = frame->VBufOffset;
@@ -476,18 +486,38 @@ sna_video_copy_data(struct sna *sna,
 			}
 		} else {
 			if (frame->width*2 == frame->pitch[0]) {
-				kgem_bo_write(&sna->kgem, frame->bo,
-					      buf + (frame->top * frame->width*2) + (frame->left << 1),
-					      frame->nlines*frame->width*2);
+				if (frame->bo) {
+					kgem_bo_write(&sna->kgem, frame->bo,
+						      buf + (frame->top * frame->width*2) + (frame->left << 1),
+						      frame->nlines*frame->width*2);
+				} else {
+					frame->bo = kgem_create_buffer(&sna->kgem, frame->size,
+								       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
+								       (void **)&dst);
+					if (frame->bo == NULL)
+						return FALSE;
+
+					memcpy(dst,
+					       buf + (frame->top * frame->width*2) + (frame->left << 1),
+					       frame->nlines*frame->width*2);
+				}
 				return TRUE;
 			}
 		}
 	}
 
 	/* copy data, must use GTT so that we keep the overlay uncached */
-	dst = kgem_bo_map__gtt(&sna->kgem, frame->bo);
-	if (dst == NULL)
-		return FALSE;
+	if (frame->bo) {
+		dst = kgem_bo_map__gtt(&sna->kgem, frame->bo);
+		if (dst == NULL)
+			return FALSE;
+	} else {
+		frame->bo = kgem_create_buffer(&sna->kgem, frame->size,
+					       KGEM_BUFFER_WRITE | KGEM_BUFFER_WRITE_INPLACE,
+					       (void **)&dst);
+		if (frame->bo == NULL)
+			return FALSE;
+	}
 
 	if (is_planar_fourcc(frame->id))
 		sna_copy_planar_data(video, frame, buf, dst);
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index e1806d5..bc117a4 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -273,13 +273,6 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 
 		assert(kgem_bo_size(frame.bo) >= frame.size);
 	} else {
-		frame.bo = kgem_create_linear(&sna->kgem, frame.size,
-					      CREATE_GTT_MAP);
-		if (frame.bo == NULL) {
-			DBG(("%s: failed to allocate bo\n", __FUNCTION__));
-			return BadAlloc;
-		}
-
 		if (!sna_video_copy_data(sna, video, &frame, buf)) {
 			DBG(("%s: failed to copy frame\n", __FUNCTION__));
 			kgem_bo_destroy(&sna->kgem, frame.bo);
commit 35291d2db813f75fedcdca9920a40592acd3cca3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 27 16:54:29 2012 +0100

    sna: Search the inactive bo cache for a mappable upload buffer
    
    See if we have a bo that we can cheaply map to an inplace upload, rather
    than rely on an existing GTT map.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 9a67c38..00ef82d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -153,21 +153,25 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 
 static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
 {
-	if (flags & CREATE_NO_RETIRE)
+	if (flags & CREATE_NO_RETIRE) {
+		DBG(("%s: not retiring per-request\n", __FUNCTION__));
 		return false;
+	}
 
-	if (!kgem->need_retire)
+	if (!kgem->need_retire) {
+		DBG(("%s: nothing to retire\n", __FUNCTION__));
 		return false;
+	}
 
 	if (kgem_retire(kgem))
 		return true;
 
-	if (!kgem->need_throttle)
+	if (flags & CREATE_NO_THROTTLE || !kgem->need_throttle) {
+		DBG(("%s: not throttling\n", __FUNCTION__));
 		return false;
+	}
 
-	if ((flags & CREATE_NO_THROTTLE) == 0)
-		kgem_throttle(kgem);
-
+	kgem_throttle(kgem);
 	return kgem_retire(kgem);
 }
 
@@ -3707,6 +3711,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		 * XXX This is especially noticeable on memory constrained
 		 * devices like gen2 or with relatively slow gpu like i3.
 		 */
+		DBG(("%s: searching for an inactive GTT map for upload\n",
+		     __FUNCTION__));
 		old = search_linear_cache(kgem, alloc,
 					  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
 #if HAVE_I915_GEM_BUFFER_INFO
@@ -3731,6 +3737,13 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old == NULL)
 			old = search_linear_cache(kgem, NUM_PAGES(size),
 						  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
+		if (old == NULL) {
+			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
+			if (old && !kgem_bo_is_mappable(kgem, old)) {
+				_kgem_bo_destroy(kgem, old);
+				old = NULL;
+			}
+		}
 		if (old) {
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
commit 36d53ff52b055b2b5fc03aff7b2cab83037d9f42
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 27 16:46:42 2012 +0100

    sna: Validate all CRTCs after updating one
    
    Updating one CRTC may cause the kernel to turn off another, so be
    paranoid and run the check in a loop after applying the CRTC set.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 1f2d085..92614d0 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -295,7 +295,7 @@ struct sna {
 
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 extern void sna_mode_remove_fb(struct sna *sna);
-extern void sna_mode_hotplug(struct sna *sna);
+extern void sna_mode_update(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
 extern int sna_crtc_id(xf86CrtcPtr crtc);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 4b142a3..a218d83 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -397,8 +397,8 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
 		return false;
 
-	DBG(("%s: mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
-	     mode.mode_valid, sna->mode.fb_id == mode.fb_id));
+	DBG(("%s: crtc=%d, mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
+	     mode.crtc_id, mode.mode_valid, sna->mode.fb_id == mode.fb_id));
 	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
 }
 
@@ -469,10 +469,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(-ret));
 		ret = FALSE;
-	} else {
-		sna_crtc->active = sna_crtc_is_bound(sna, crtc);
+	} else
 		ret = TRUE;
-	}
 
 	if (crtc->scrn->pScreen)
 		xf86_reload_cursors(crtc->scrn->pScreen);
@@ -520,6 +518,7 @@ sna_crtc_restore(struct sna *sna)
 		if (crtc->enabled)
 			sna_crtc_apply(crtc);
 	}
+	sna_mode_update(sna);
 
 	kgem_bo_retire(&sna->kgem, bo);
 	scrn->displayWidth = bo->pitch / sna->mode.cpp;
@@ -742,6 +741,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		crtc->mode = saved_mode;
 		return FALSE;
 	}
+	sna_mode_update(sna);
 
 	update_flush_interval(sna);
 	return TRUE;
@@ -1765,6 +1765,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		if (!sna_crtc_apply(crtc))
 			goto fail;
 	}
+	sna_mode_update(sna);
 
 	kgem_bo_retire(&sna->kgem, bo);
 
@@ -2243,7 +2244,7 @@ sna_wait_for_scanline(struct sna *sna,
 	return true;
 }
 
-void sna_mode_hotplug(struct sna *sna)
+void sna_mode_update(struct sna *sna)
 {
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
 	int i;
@@ -2251,9 +2252,10 @@ void sna_mode_hotplug(struct sna *sna)
 	/* Validate CRTC attachments */
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
-		if (crtc->enabled) {
-			struct sna_crtc *sna_crtc = crtc->driver_private;
+		struct sna_crtc *sna_crtc = crtc->driver_private;
+		if (crtc->enabled)
 			sna_crtc->active = sna_crtc_is_bound(sna, crtc);
-		}
+		else
+			sna_crtc->active = false;
 	}
 }
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index f3f19e1..39e67c4 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -608,7 +608,7 @@ sna_handle_uevents(int fd, void *closure)
 	if (memcmp(&s.st_rdev, &udev_devnum, sizeof (dev_t)) == 0 &&
 	    hotplug && atoi(hotplug) == 1) {
 		DBG(("%s: hotplug event\n", __FUNCTION__));
-		sna_mode_hotplug(sna);
+		sna_mode_update(sna);
 		RRGetInfo(xf86ScrnToScreen(scrn), TRUE);
 	}
 
commit 7fdd8bd6d2ba7bf113d4109e6bb6f750f909565f
Author: Johannes Obermayr <johannesobermayr at gmx.de>
Date:   Sat May 26 23:52:28 2012 +0200

    glamor: Fix misspelled xf86GetOptValString and OPTION_ACCEL_METHOD introduced by commit e456291.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index 5c0186e..53043dd 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -39,6 +39,7 @@
 #include "i915_drm.h"
 #include "intel_glamor.h"
 #include "uxa.h"
+#include "intel_options.h"
 
 void
 intel_glamor_exchange_buffers(struct intel_screen_private *intel,
@@ -177,7 +178,7 @@ intel_glamor_enabled(intel_screen_private *intel)
 {
 	const char *s;
 
-	s = xf86GetOptString(intel->Options, ACCEL_METHOD);
+	s = xf86GetOptValString(intel->Options, OPTION_ACCEL_METHOD);
 	if (s == NULL)
 		return FALSE;
 
commit 8ea4ba081de0206351394481f54dcbe6922a085b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 26 21:48:16 2012 +0100

    sna: Fix typo for debug compilation
    
    s/ctrc/crtc/
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index a1f671a..632c57d 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -493,7 +493,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
-		assert(sna_crtc_is_bound(sna, ctrc));
+		assert(sna_crtc_is_bound(sna, crtc));
 		kgem_submit(&sna->kgem);
 	}
 
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 9247e88..e1806d5 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -308,7 +308,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	 * we can hit the next vsync.
 	 */
 	if (flush) {
-		assert(sna_crtc_is_bound(sna, ctrc));
+		assert(sna_crtc_is_bound(sna, crtc));
 		kgem_submit(&sna->kgem);
 	}
 
commit 317bf05196086eb5dedb6436c07f253f01c9bf63
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 25 14:34:16 2012 +0100

    sna: Perform the selftest after probing
    
    We do no want to slow down the detection phase by performing our
    self-tests, so only running those before initialising the driver.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 0f43a46..f3f19e1 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -396,8 +396,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 
 	DBG(("%s\n", __FUNCTION__));
 
-	sna_selftest();
-
 	if (scrn->numEntities != 1)
 		return FALSE;
 
@@ -408,6 +406,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	if (flags & PROBE_DETECT)
 		return TRUE;
 
+	sna_selftest();
+
 	sna = to_sna(scrn);
 	if (sna == NULL) {
 		sna = xnfcalloc(sizeof(struct sna), 1);
commit a5fe863e3f1a6750edaffb518c4e768e9bc11521
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 25 14:28:56 2012 +0100

    sna: Check the bus type before declaring probe success
    
    This should never fail, but still better to fail during detection rather
    than pretend it works.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 9c6994b..0f43a46 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -402,6 +402,8 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 		return FALSE;
 
 	pEnt = xf86GetEntityInfo(scrn->entityList[0]);
+	if (pEnt->location.type != BUS_PCI)
+		return FALSE;
 
 	if (flags & PROBE_DETECT)
 		return TRUE;
@@ -419,9 +421,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 
 	scrn->displayWidth = 640;	/* default it */
 
-	if (sna->pEnt->location.type != BUS_PCI)
-		return FALSE;
-
 	sna->PciInfo = xf86GetPciInfoForEntity(sna->pEnt->index);
 
 	fd = sna_open_drm_master(scrn);
commit 4094826aee76ef24dad13bc5a8a723bfe4a69162
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 25 08:55:13 2012 +0100

    sna: Trust the crtc-is-bound determination after modeset and hotplug
    
    As these should be the only time that they change and we now have the
    checks in place, we can drop the workaround of doing the check just
    before emitting the wait.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0a95da7..fdabfae 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -141,7 +141,6 @@ struct kgem {
 	uint16_t nexec;
 	uint16_t nreloc;
 	uint16_t nfence;
-	uint16_t wait;
 	uint16_t batch_size;
 	uint16_t min_alignment;
 
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 5f7b526..1f2d085 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -299,6 +299,7 @@ extern void sna_mode_hotplug(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
 extern int sna_crtc_id(xf86CrtcPtr crtc);
+extern bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc);
 extern int sna_output_dpms_status(xf86OutputPtr output);
 
 extern int sna_page_flip(struct sna *sna,
@@ -353,7 +354,6 @@ extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 
 extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
-extern bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc);
 
 Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_wakeup(struct sna *sna);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f1fcdbc..4b142a3 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -389,6 +389,19 @@ mode_to_kmode(drmModeModeInfoPtr kmode, DisplayModePtr mode)
 	kmode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
 
+bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
+{
+	struct drm_mode_crtc mode;
+
+	mode.crtc_id = crtc_id(crtc->driver_private);
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
+		return false;
+
+	DBG(("%s: mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
+	     mode.mode_valid, sna->mode.fb_id == mode.fb_id));
+	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
+}
+
 static Bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
@@ -2110,7 +2123,6 @@ static void sna_emit_wait_for_scanline_gen6(struct sna *sna,
 	b[1] = pipe;
 	b[2] = y2 - 1;
 	b[3] = MI_WAIT_FOR_EVENT | event;
-	sna->kgem.wait = sna->kgem.nbatch + 3;
 	kgem_advance_batch(&sna->kgem, 4);
 }
 
@@ -2140,7 +2152,6 @@ static void sna_emit_wait_for_scanline_gen4(struct sna *sna,
 	b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
 	b[3] = b[1] = (y1 << 16) | (y2-1);
 	b[4] = MI_WAIT_FOR_EVENT | event;
-	sna->kgem.wait = sna->kgem.nbatch + 4;
 	kgem_advance_batch(&sna->kgem, 5);
 }
 
@@ -2168,7 +2179,6 @@ static void sna_emit_wait_for_scanline_gen2(struct sna *sna,
 		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
 	else
 		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
-	sna->kgem.wait = sna->kgem.nbatch + 4;
 	kgem_advance_batch(&sna->kgem, 5);
 }
 
@@ -2233,19 +2243,6 @@ sna_wait_for_scanline(struct sna *sna,
 	return true;
 }
 
-bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
-{
-	struct drm_mode_crtc mode;
-
-	mode.crtc_id = crtc_id(crtc->driver_private);
-	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
-		return false;
-
-	DBG(("%s: mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
-	     mode.mode_valid, sna->mode.fb_id == mode.fb_id));
-	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
-}
-
 void sna_mode_hotplug(struct sna *sna)
 {
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 4fa4320..a1f671a 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -493,8 +493,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
 	if (flush) { /* STAT! */
-		if (!sna_crtc_is_bound(sna, crtc))
-			sna->kgem.batch[sna->kgem.wait] = 0;
+		assert(sna_crtc_is_bound(sna, ctrc));
 		kgem_submit(&sna->kgem);
 	}
 
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 4975f55..9247e88 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -308,8 +308,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	 * we can hit the next vsync.
 	 */
 	if (flush) {
-		if (!sna_crtc_is_bound(sna, crtc))
-			sna->kgem.batch[sna->kgem.wait] = 0;
+		assert(sna_crtc_is_bound(sna, ctrc));
 		kgem_submit(&sna->kgem);
 	}
 
commit 8a9a585341e2dd43c649204fcf6d92a867671ba3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 25 08:50:33 2012 +0100

    Only create a single instance of the intel_options array
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/Makefile.am b/src/Makefile.am
index d057e43..a7043d1 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -43,6 +43,7 @@ NULL:=#
 intel_drv_la_SOURCES = \
 	intel_list.h \
 	intel_options.h \
+	intel_options.c \
 	intel_module.c \
 	compat-api.h \
 	$(NULL)
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 60f43bb..fadc0a6 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -220,11 +220,9 @@ static Bool I830GetEarlyOptions(ScrnInfoPtr scrn)
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	/* Process the options */
-	xf86CollectOptions(scrn, NULL);
-	if (!(intel->Options = malloc(sizeof(intel_options))))
+	intel->Options = intel_options_get(scrn);
+	if (!intel->Options)
 		return FALSE;
-	memcpy(intel->Options, intel_options, sizeof(intel_options));
-	xf86ProcessOptions(scrn->scrnIndex, scrn->options, intel->Options);
 
 	intel->fallback_debug = xf86ReturnOptValBool(intel->Options,
 						     OPTION_FALLBACKDEBUG,
diff --git a/src/intel_options.c b/src/intel_options.c
new file mode 100644
index 0000000..80572be
--- /dev/null
+++ b/src/intel_options.c
@@ -0,0 +1,47 @@
+#include "intel_options.h"
+
+const OptionInfoRec intel_options[] = {
+	{OPTION_ACCEL_METHOD,	"AccelMethod",	OPTV_STRING,	{0},	0},
+	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	1},
+	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	0},
+	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	0},
+	{OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	0},
+	{OPTION_SHADOW,	"Shadow",	OPTV_BOOLEAN,	{0},	0},
+	{OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	1},
+	{OPTION_TRIPLE_BUFFER, "TripleBuffer", OPTV_BOOLEAN,	{0},	1},
+#ifdef INTEL_XVMC
+	{OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	1},
+#endif
+	{OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, 0},
+	{OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	1},
+#ifdef USE_SNA
+	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	1},
+	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	0},
+	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	1},
+#endif
+#ifdef USE_UXA
+	{OPTION_FALLBACKDEBUG, "FallbackDebug", OPTV_BOOLEAN, {0},	0},
+	{OPTION_BUFFER_CACHE,       "BufferCache",  OPTV_BOOLEAN,   {0},    1},
+#endif
+	{-1,			NULL,		OPTV_NONE,	{0},	0}
+};
+
+OptionInfoPtr intel_options_get(ScrnInfoPtr scrn)
+{
+	OptionInfoPtr options;
+
+	xf86CollectOptions(scrn, NULL);
+	if (!(options = malloc(sizeof(intel_options))))
+		return NULL;
+
+	memcpy(options, intel_options, sizeof(intel_options));
+	xf86ProcessOptions(scrn->scrnIndex, scrn->options, options);
+
+	return options;
+}
diff --git a/src/intel_options.h b/src/intel_options.h
index 8863878..42a9e56 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -1,6 +1,9 @@
 #ifndef INTEL_OPTIONS_H
 #define INTEL_OPTIONS_H
 
+#include <xf86.h>
+#include <xf86Opt.h>
+
 /*
  * Note: "ColorKey" is provided for compatibility with the i810 driver.
  * However, the correct option name is "VideoKey".  "ColorKey" usually
@@ -40,36 +43,7 @@ enum intel_options {
 	NUM_OPTIONS,
 };
 
-static OptionInfoRec intel_options[] = {
-	{OPTION_ACCEL_METHOD,	"AccelMethod",	OPTV_STRING,	{0},	0},
-	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	FALSE},
-	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	FALSE},
-	{OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	FALSE},
-	{OPTION_SHADOW,	"Shadow",	OPTV_BOOLEAN,	{0},	FALSE},
-	{OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_TRIPLE_BUFFER, "TripleBuffer", OPTV_BOOLEAN,	{0},	TRUE},
-#ifdef INTEL_XVMC
-	{OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	TRUE},
-#endif
-	{OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, FALSE},
-	{OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, FALSE},
-	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, FALSE},
-	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, FALSE},
-	{OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	TRUE},
-#ifdef USE_SNA
-	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	TRUE},
-	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	FALSE},
-	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	TRUE},
-#endif
-#ifdef USE_UXA
-	{OPTION_FALLBACKDEBUG, "FallbackDebug", OPTV_BOOLEAN, {0},	FALSE},
-	{OPTION_BUFFER_CACHE,       "BufferCache",  OPTV_BOOLEAN,   {0},    TRUE},
-#endif
-	{-1,			NULL,		OPTV_NONE,	{0},	FALSE}
-};
+extern const OptionInfoRec intel_options[];
+OptionInfoPtr intel_options_get(ScrnInfoPtr scrn);
 
 #endif /* INTEL_OPTIONS_H */
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index fb54a24..9c6994b 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -259,15 +259,8 @@ static Bool sna_get_early_options(ScrnInfoPtr scrn)
 {
 	struct sna *sna = to_sna(scrn);
 
-	/* Process the options */
-	xf86CollectOptions(scrn, NULL);
-	if (!(sna->Options = malloc(sizeof(intel_options))))
-		return FALSE;
-
-	memcpy(sna->Options, intel_options, sizeof(intel_options));
-	xf86ProcessOptions(scrn->scrnIndex, scrn->options, sna->Options);
-
-	return TRUE;
+	sna->Options = intel_options_get(scrn);
+	return sna->Options != NULL;
 }
 
 struct sna_device {
commit e45629135065d0cc73c285f8df35ab4e1d07c6dc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 24 18:22:45 2012 +0100

    Allow runtime switching of AccelMethod between uxa/sna and even glamor
    
    Section "Device"
      Option "AccelMethod" "uxa/glamor/sna"
    EndSection
    
    The appropriate backend must also be enabled at compile time for the
    runtime option to be available (i.e. --enable-uxa (default) --enable-sna
    --enable-glamor)
    
    Demanded-by: Adam Jackson <ajax at redhat.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50290
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index c031a26..a025521 100644
--- a/configure.ac
+++ b/configure.ac
@@ -158,6 +158,43 @@ if test "x$GLAMOR" != "xno"; then
 	AC_DEFINE(USE_GLAMOR, 1, [Enable glamor acceleration])
 fi
 
+AC_ARG_WITH(default-accel,
+	    AS_HELP_STRING([--with-default-accel],
+			   [Select the default acceleration method [default=sna if enabled, otherwise uxa]]),
+			   [accel="$withval"],
+			   [accel=auto])
+
+AC_MSG_CHECKING([which acceleration method to use by default])
+if test "x$accel" = xauto; then
+	if test "x$UXA" != "xno"; then
+		accel=uxa
+	else
+		if test "x$SNA" != "xno"; then
+			accel=sna
+		fi
+	fi
+	if test "x$accel" = xauto; then
+		AC_MSG_ERROR([No default acceleration option])
+	fi
+fi
+
+if test "x$accel" = xsna; then
+	if test "x$SNA" != "xno"; then
+		AC_DEFINE(DEFAULT_ACCEL_METHOD, SNA, [Default acceleration method])
+	else
+		AC_MSG_ERROR([SNA requested as default, but is not enabled])
+	fi
+fi
+
+if test "x$accel" = xuxa; then
+	if test "x$UXA" != "xno"; then
+		AC_DEFINE(DEFAULT_ACCEL_METHOD, UXA, [Default acceleration method])
+	else
+		AC_MSG_ERROR([UXA requested as default, but is not enabled])
+	fi
+fi
+AC_MSG_RESULT($accel)
+
 AC_ARG_ENABLE(vmap,
 	      AS_HELP_STRING([--enable-vmap],
 			     [Enable use of vmap [default=no]]),
@@ -174,7 +211,6 @@ AC_ARG_ENABLE(debug,
 			     [Enables internal debugging [default=no]]),
               [DEBUG="$enableval"],
               [DEBUG=no])
-
 # Store the list of server defined optional extensions in REQUIRED_MODULES
 XORG_DRIVER_CHECK_EXT(RANDR, randrproto)
 XORG_DRIVER_CHECK_EXT(RENDER, renderproto)
diff --git a/src/common.h b/src/common.h
index 06b2192..e3ab1f2 100644
--- a/src/common.h
+++ b/src/common.h
@@ -63,7 +63,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 /* I830 hooks for the I810 driver setup/probe. */
 extern const OptionInfoRec *I830AvailableOptions(int chipid, int busid);
-extern void intel_init_scrn(ScrnInfoPtr scrn);
+extern Bool intel_init_scrn(ScrnInfoPtr scrn);
 
 /* Symbol lists shared by the i810 and i830 parts. */
 extern int I830EntityIndex;
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 6b2ab80..60f43bb 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1236,7 +1236,7 @@ static Bool I830PMEvent(int scrnIndex, pmEvent event, Bool undo)
 	return TRUE;
 }
 
-void intel_init_scrn(ScrnInfoPtr scrn)
+Bool intel_init_scrn(ScrnInfoPtr scrn)
 {
 	scrn->PreInit = I830PreInit;
 	scrn->ScreenInit = I830ScreenInit;
@@ -1247,4 +1247,5 @@ void intel_init_scrn(ScrnInfoPtr scrn)
 	scrn->FreeScreen = I830FreeScreen;
 	scrn->ValidMode = I830ValidMode;
 	scrn->PMEvent = I830PMEvent;
+	return TRUE;
 }
diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index 4741d58..5c0186e 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -172,6 +172,18 @@ intel_glamor_finish_access(PixmapPtr pixmap, uxa_access_t access)
 	return;
 }
 
+static Bool
+intel_glamor_enabled(intel_screen_private *intel)
+{
+	const char *s;
+
+	s = xf86GetOptString(intel->Options, ACCEL_METHOD);
+	if (s == NULL)
+		return FALSE;
+
+	return strcasecmp(s, "glamor") == 0;
+}
+
 Bool
 intel_glamor_init(ScreenPtr screen)
 {
@@ -181,6 +193,9 @@ intel_glamor_init(ScreenPtr screen)
 	if ((intel->uxa_flags & UXA_GLAMOR_EGL_INITIALIZED) == 0)
 		goto fail;
 
+	if (!intel_glamor_enabled(intel))
+		goto fail;
+
 	if (!glamor_init(screen, GLAMOR_INVERTED_Y_AXIS | GLAMOR_USE_EGL_SCREEN)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "Failed to initialize glamor.\n");
diff --git a/src/intel_module.c b/src/intel_module.c
index ac6dae1..4430ac6 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -31,6 +31,7 @@
 #include <xf86.h>
 #include <xf86_OSproc.h>
 #include <xf86cmap.h>
+#include <xf86Parser.h>
 #include <xf86drmMode.h>
 
 #include <xorgVersion.h>
@@ -292,6 +293,43 @@ static Bool has_kernel_mode_setting(struct pci_device *dev)
 	return ret == 0;
 }
 
+extern XF86ConfigPtr xf86configptr;
+
+static XF86ConfDevicePtr
+_xf86findDriver(const char *ident, XF86ConfDevicePtr p)
+{
+	while (p) {
+		if (xf86nameCompare(ident, p->dev_driver) == 0)
+			return p;
+
+		p = p->list.next;
+	}
+	return NULL;
+}
+
+static enum accel_method { UXA, SNA } get_accel_method(void)
+{
+	enum accel_method accel_method = DEFAULT_ACCEL_METHOD;
+	XF86ConfDevicePtr dev;
+
+	dev = _xf86findDriver("intel", xf86configptr->conf_device_lst);
+	if (dev && dev->dev_option_lst) {
+		const char *s;
+
+		s = xf86FindOptionValue(dev->dev_option_lst, "AccelMethod");
+		if (s ) {
+			if (strcasecmp(s, "sna") == 0)
+				accel_method = SNA;
+			else if (strcasecmp(s, "uxa") == 0)
+				accel_method = UXA;
+			else if (strcasecmp(s, "glamor") == 0)
+				accel_method = UXA;
+		}
+	}
+
+	return accel_method;
+}
+
 /*
  * intel_pci_probe --
  *
@@ -338,34 +376,35 @@ static Bool intel_pci_probe(DriverPtr		driver,
 
 	scrn = xf86ConfigPciEntity(NULL, 0, entity_num, intel_pci_chipsets,
 				   NULL, NULL, NULL, NULL, NULL);
-	if (scrn != NULL) {
-		scrn->driverVersion = INTEL_VERSION;
-		scrn->driverName = INTEL_DRIVER_NAME;
-		scrn->name = INTEL_NAME;
-		scrn->Probe = NULL;
+	if (scrn == NULL)
+		return FALSE;
+
+	scrn->driverVersion = INTEL_VERSION;
+	scrn->driverName = INTEL_DRIVER_NAME;
+	scrn->name = INTEL_NAME;
+	scrn->Probe = NULL;
 
-		switch (DEVICE_ID(device)) {
 #if !KMS_ONLY
-		case PCI_CHIP_I810:
-		case PCI_CHIP_I810_DC100:
-		case PCI_CHIP_I810_E:
-		case PCI_CHIP_I815:
-			lg_i810_init(scrn);
-			break;
+	switch (DEVICE_ID(device)) {
+	case PCI_CHIP_I810:
+	case PCI_CHIP_I810_DC100:
+	case PCI_CHIP_I810_E:
+	case PCI_CHIP_I815:
+		return lg_i810_init(scrn);
+	}
 #endif
 
-		default:
+	switch (get_accel_method()) {
 #if USE_SNA
-			sna_init_scrn(scrn, entity_num);
-#elif USE_UXA
-			intel_init_scrn(scrn);
-#else
-			scrn = NULL;
+	case SNA: return sna_init_scrn(scrn, entity_num);
 #endif
-			break;
-		}
+
+#if USE_UXA
+	case UXA: return intel_init_scrn(scrn);
+#endif
+
+	default: return FALSE;
 	}
-	return scrn != NULL;
 }
 
 #ifdef XFree86LOADER
diff --git a/src/intel_options.h b/src/intel_options.h
index 9367751..8863878 100644
--- a/src/intel_options.h
+++ b/src/intel_options.h
@@ -8,6 +8,7 @@
  */
 
 enum intel_options {
+	OPTION_ACCEL_METHOD,
 	OPTION_DRI,
 	OPTION_VIDEO_KEY,
 	OPTION_COLOR_KEY,
@@ -40,6 +41,7 @@ enum intel_options {
 };
 
 static OptionInfoRec intel_options[] = {
+	{OPTION_ACCEL_METHOD,	"AccelMethod",	OPTV_STRING,	{0},	0},
 	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	TRUE},
 	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	FALSE},
 	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	FALSE},
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 09d52c5..6ead393 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -2102,7 +2102,7 @@ lg_i810_available_options(int chipid, int busid)
 }
 
 
-void lg_i810_init(ScrnInfoPtr scrn)
+Bool lg_i810_init(ScrnInfoPtr scrn)
 {
     scrn->PreInit = I810PreInit;
     scrn->ScreenInit = I810ScreenInit;
@@ -2112,4 +2112,5 @@ void lg_i810_init(ScrnInfoPtr scrn)
     scrn->LeaveVT = I810LeaveVT;
     scrn->FreeScreen = I810FreeScreen;
     scrn->ValidMode = I810ValidMode;
+    return TRUE;
 }
diff --git a/src/legacy/legacy.h b/src/legacy/legacy.h
index 7bdd172..0ff3299 100644
--- a/src/legacy/legacy.h
+++ b/src/legacy/legacy.h
@@ -1,3 +1,3 @@
 /* The old i810 (only) driver. */
 const OptionInfoRec *lg_i810_available_options(int chipid, int busid);
-void lg_i810_init(ScrnInfoPtr scrn);
+Bool lg_i810_init(ScrnInfoPtr scrn);
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 5d42e69..fb54a24 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -1047,7 +1047,7 @@ static Bool sna_pm_event(int scrnIndex, pmEvent event, Bool undo)
 	return TRUE;
 }
 
-void sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
+Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 {
 	EntityInfoPtr entity;
 
@@ -1081,8 +1081,13 @@ void sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	xf86SetEntitySharable(scrn->entityList[0]);
 
 	entity = xf86GetEntityInfo(entity_num);
+	if (!entity)
+		return FALSE;
+
 	xf86SetEntityInstanceForScreen(scrn,
 				       entity->index,
 				       xf86GetNumEntityInstances(entity->index)-1);
 	free(entity);
+
+	return TRUE;
 }
diff --git a/src/sna/sna_module.h b/src/sna/sna_module.h
index aa1ae0d..1b46cb7 100644
--- a/src/sna/sna_module.h
+++ b/src/sna/sna_module.h
@@ -1 +1 @@
-void sna_init_scrn(ScrnInfoPtr scrn, int entity_num);
+Bool sna_init_scrn(ScrnInfoPtr scrn, int entity_num);
commit df6ab02c3690eea8393ecc8c113e2f2891856cc6
Author: Eugeni Dodonov <eugeni.dodonov at intel.com>
Date:   Mon Oct 31 14:43:22 2011 -0200

    Unify options handling between UXA and SNA
    
    Unifies available options for both UXA and SNA drivers, and
    moves them into a common header file, intel_opts.h.
    
    Signed-off-by: Eugeni Dodonov <eugeni.dodonov at intel.com>

diff --git a/src/Makefile.am b/src/Makefile.am
index fd139ee..d057e43 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -42,6 +42,7 @@ NULL:=#
 
 intel_drv_la_SOURCES = \
 	intel_list.h \
+	intel_options.h \
 	intel_module.c \
 	compat-api.h \
 	$(NULL)
diff --git a/src/intel_display.c b/src/intel_display.c
index 77a1cce..89f7259 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1702,7 +1702,6 @@ int intel_crtc_to_pipe(xf86CrtcPtr crtc)
 
 Bool intel_crtc_on(xf86CrtcPtr crtc)
 {
-	ScrnInfoPtr scrn = crtc->scrn;
 	struct intel_crtc *intel_crtc = crtc->driver_private;
 	drmModeCrtcPtr drm_crtc;
 	Bool ret;
diff --git a/src/intel_driver.c b/src/intel_driver.c
index b055437..6b2ab80 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -78,59 +78,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xf86drmMode.h>
 
 #include "intel_glamor.h"
-
-/* *INDENT-OFF* */
-/*
- * Note: "ColorKey" is provided for compatibility with the i810 driver.
- * However, the correct option name is "VideoKey".  "ColorKey" usually
- * refers to the tranparency key for 8+24 overlays, not for video overlays.
- */
-
-typedef enum {
-   OPTION_DRI,
-   OPTION_VIDEO_KEY,
-   OPTION_COLOR_KEY,
-   OPTION_FALLBACKDEBUG,
-   OPTION_TILING_FB,
-   OPTION_TILING_2D,
-   OPTION_SHADOW,
-   OPTION_SWAPBUFFERS_WAIT,
-   OPTION_TRIPLE_BUFFER,
-#ifdef INTEL_XVMC
-   OPTION_XVMC,
-#endif
-   OPTION_PREFER_OVERLAY,
-   OPTION_DEBUG_FLUSH_BATCHES,
-   OPTION_DEBUG_FLUSH_CACHES,
-   OPTION_DEBUG_WAIT,
-   OPTION_HOTPLUG,
-   OPTION_RELAXED_FENCING,
-   OPTION_BUFFER_CACHE,
-} I830Opts;
-
-static OptionInfoRec I830Options[] = {
-   {OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	FALSE},
-   {OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	FALSE},
-   {OPTION_FALLBACKDEBUG, "FallbackDebug", OPTV_BOOLEAN, {0},	FALSE},
-   {OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	FALSE},
-   {OPTION_SHADOW,	"Shadow",	OPTV_BOOLEAN,	{0},	FALSE},
-   {OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_TRIPLE_BUFFER, "TripleBuffer", OPTV_BOOLEAN,	{0},	TRUE},
-#ifdef INTEL_XVMC
-   {OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	TRUE},
-#endif
-   {OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, FALSE},
-   {OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, FALSE},
-   {OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, FALSE},
-   {OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, FALSE},
-   {OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_BUFFER_CACHE,	"BufferCache",	OPTV_BOOLEAN,	{0},	TRUE},
-   {-1,			NULL,		OPTV_NONE,	{0},	FALSE}
-};
-/* *INDENT-ON* */
+#include "intel_options.h"
 
 static void i830AdjustFrame(int scrnIndex, int x, int y, int flags);
 static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen);
@@ -140,11 +88,6 @@ static Bool I830EnterVT(int scrnIndex, int flags);
 extern void xf86SetCursor(ScreenPtr screen, CursorPtr pCurs, int x, int y);
 
 /* Export I830 options to i830 driver where necessary */
-const OptionInfoRec *intel_uxa_available_options(int chipid, int busid)
-{
-	return I830Options;
-}
-
 static void
 I830LoadPalette(ScrnInfoPtr scrn, int numColors, int *indices,
 		LOCO * colors, VisualPtr pVisual)
@@ -278,9 +221,9 @@ static Bool I830GetEarlyOptions(ScrnInfoPtr scrn)
 
 	/* Process the options */
 	xf86CollectOptions(scrn, NULL);
-	if (!(intel->Options = malloc(sizeof(I830Options))))
+	if (!(intel->Options = malloc(sizeof(intel_options))))
 		return FALSE;
-	memcpy(intel->Options, I830Options, sizeof(I830Options));
+	memcpy(intel->Options, intel_options, sizeof(intel_options));
 	xf86ProcessOptions(scrn->scrnIndex, scrn->options, intel->Options);
 
 	intel->fallback_debug = xf86ReturnOptValBool(intel->Options,
diff --git a/src/intel_driver.h b/src/intel_driver.h
index 98973e5..f33d135 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -247,7 +247,5 @@ void intel_detect_chipset(ScrnInfoPtr scrn,
 			  struct pci_device *pci,
 			  struct intel_chipset *chipset);
 
-const OptionInfoRec *intel_uxa_available_options(int chipid, int busid);
-
 
 #endif /* INTEL_DRIVER_H */
diff --git a/src/intel_module.c b/src/intel_module.c
index c6f94f5..ac6dae1 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -41,6 +41,7 @@
 
 #include "common.h"
 #include "intel_driver.h"
+#include "intel_options.h"
 #include "legacy/legacy.h"
 #include "sna/sna_module.h"
 
@@ -397,13 +398,7 @@ intel_available_options(int chipid, int busid)
 #endif
 
 	default:
-#if USE_SNA
-		return sna_available_options(chipid, busid);
-#elif USE_UXA
-		return intel_uxa_available_options(chipid, busid);
-#else
-		return NULL;
-#endif
+		return intel_options;
 	}
 }
 
diff --git a/src/intel_options.h b/src/intel_options.h
new file mode 100644
index 0000000..9367751
--- /dev/null
+++ b/src/intel_options.h
@@ -0,0 +1,73 @@
+#ifndef INTEL_OPTIONS_H
+#define INTEL_OPTIONS_H
+
+/*
+ * Note: "ColorKey" is provided for compatibility with the i810 driver.
+ * However, the correct option name is "VideoKey".  "ColorKey" usually
+ * refers to the tranparency key for 8+24 overlays, not for video overlays.
+ */
+
+enum intel_options {
+	OPTION_DRI,
+	OPTION_VIDEO_KEY,
+	OPTION_COLOR_KEY,
+	OPTION_TILING_FB,
+	OPTION_TILING_2D,
+	OPTION_SHADOW,
+	OPTION_SWAPBUFFERS_WAIT,
+	OPTION_TRIPLE_BUFFER,
+#ifdef INTEL_XVMC
+	OPTION_XVMC,
+#endif
+	OPTION_PREFER_OVERLAY,
+	OPTION_DEBUG_FLUSH_BATCHES,
+	OPTION_DEBUG_FLUSH_CACHES,
+	OPTION_DEBUG_WAIT,
+	OPTION_HOTPLUG,
+	OPTION_RELAXED_FENCING,
+	OPTION_USE_SNA,
+#ifdef USE_SNA
+	OPTION_THROTTLE,
+	OPTION_VMAP,
+	OPTION_ZAPHOD,
+	OPTION_DELAYED_FLUSH,
+#endif
+#ifdef USE_UXA
+	OPTION_FALLBACKDEBUG,
+	OPTION_BUFFER_CACHE,
+#endif
+	NUM_OPTIONS,
+};
+
+static OptionInfoRec intel_options[] = {
+	{OPTION_DRI,		"DRI",		OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	FALSE},
+	{OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	FALSE},
+	{OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	FALSE},
+	{OPTION_SHADOW,	"Shadow",	OPTV_BOOLEAN,	{0},	FALSE},
+	{OPTION_SWAPBUFFERS_WAIT, "SwapbuffersWait", OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_TRIPLE_BUFFER, "TripleBuffer", OPTV_BOOLEAN,	{0},	TRUE},
+#ifdef INTEL_XVMC
+	{OPTION_XVMC,	"XvMC",		OPTV_BOOLEAN,	{0},	TRUE},
+#endif
+	{OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, FALSE},
+	{OPTION_DEBUG_FLUSH_BATCHES, "DebugFlushBatches", OPTV_BOOLEAN, {0}, FALSE},
+	{OPTION_DEBUG_FLUSH_CACHES, "DebugFlushCaches", OPTV_BOOLEAN, {0}, FALSE},
+	{OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, FALSE},
+	{OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	TRUE},
+#ifdef USE_SNA
+	{OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	TRUE},
+	{OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	FALSE},
+	{OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	TRUE},
+#endif
+#ifdef USE_UXA
+	{OPTION_FALLBACKDEBUG, "FallbackDebug", OPTV_BOOLEAN, {0},	FALSE},
+	{OPTION_BUFFER_CACHE,       "BufferCache",  OPTV_BOOLEAN,   {0},    TRUE},
+#endif
+	{-1,			NULL,		OPTV_NONE,	{0},	FALSE}
+};
+
+#endif /* INTEL_OPTIONS_H */
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 879940d..5f7b526 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -194,21 +194,6 @@ static inline struct sna_gc *sna_gc(GCPtr gc)
 }
 
 enum {
-	OPTION_TILING_FB,
-	OPTION_TILING_2D,
-	OPTION_PREFER_OVERLAY,
-	OPTION_COLOR_KEY,
-	OPTION_VIDEO_KEY,
-	OPTION_HOTPLUG,
-	OPTION_THROTTLE,
-	OPTION_RELAXED_FENCING,
-	OPTION_VMAP,
-	OPTION_ZAPHOD,
-	OPTION_DELAYED_FLUSH,
-	NUM_OPTIONS
-};
-
-enum {
 	FLUSH_TIMER = 0,
 	THROTTLE_TIMER,
 	EXPIRE_TIMER,
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b8cf3cc..f1fcdbc 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -45,6 +45,8 @@
 #include "sna.h"
 #include "sna_reg.h"
 
+#include "intel_options.h"
+
 #if DEBUG_DISPLAY
 #undef DBG
 #define DBG(x) ErrorF x
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index d0b5bde..5d42e69 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -55,6 +55,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "sna_video.h"
 
 #include "intel_driver.h"
+#include "intel_options.h"
 
 #include <sys/ioctl.h>
 #include <sys/fcntl.h>
@@ -76,31 +77,11 @@ DevPrivateKeyRec sna_gc_index;
 DevPrivateKeyRec sna_glyph_key;
 DevPrivateKeyRec sna_glyph_image_key;
 
-static OptionInfoRec sna_options[] = {
-   {OPTION_TILING_FB,	"LinearFramebuffer",	OPTV_BOOLEAN,	{0},	FALSE},
-   {OPTION_TILING_2D,	"Tiling",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_PREFER_OVERLAY, "XvPreferOverlay", OPTV_BOOLEAN, {0}, FALSE},
-   {OPTION_COLOR_KEY,	"ColorKey",	OPTV_INTEGER,	{0},	FALSE},
-   {OPTION_VIDEO_KEY,	"VideoKey",	OPTV_INTEGER,	{0},	FALSE},
-   {OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_THROTTLE,	"Throttle",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_RELAXED_FENCING,	"UseRelaxedFencing",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_VMAP,	"UseVmap",	OPTV_BOOLEAN,	{0},	TRUE},
-   {OPTION_ZAPHOD,	"ZaphodHeads",	OPTV_STRING,	{0},	FALSE},
-   {OPTION_DELAYED_FLUSH,	"DelayedFlush",	OPTV_BOOLEAN,	{0},	TRUE},
-   {-1,			NULL,		OPTV_NONE,	{0},	FALSE}
-};
-
 static Bool sna_enter_vt(int scrnIndex, int flags);
 
 /* temporary */
 extern void xf86SetCursor(ScreenPtr screen, CursorPtr pCurs, int x, int y);
 
-const OptionInfoRec *sna_available_options(int chipid, int busid)
-{
-	return sna_options;
-}
-
 static void
 sna_load_palette(ScrnInfoPtr scrn, int numColors, int *indices,
 		 LOCO * colors, VisualPtr pVisual)
@@ -280,10 +261,10 @@ static Bool sna_get_early_options(ScrnInfoPtr scrn)
 
 	/* Process the options */
 	xf86CollectOptions(scrn, NULL);
-	if (!(sna->Options = malloc(sizeof(sna_options))))
+	if (!(sna->Options = malloc(sizeof(intel_options))))
 		return FALSE;
 
-	memcpy(sna->Options, sna_options, sizeof(sna_options));
+	memcpy(sna->Options, intel_options, sizeof(intel_options));
 	xf86ProcessOptions(scrn->scrnIndex, scrn->options, sna->Options);
 
 	return TRUE;
diff --git a/src/sna/sna_module.h b/src/sna/sna_module.h
index 97d5dd5..aa1ae0d 100644
--- a/src/sna/sna_module.h
+++ b/src/sna/sna_module.h
@@ -1,3 +1 @@
-const OptionInfoRec *sna_available_options(int chipid, int busid);
 void sna_init_scrn(ScrnInfoPtr scrn, int entity_num);
-
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index c80ccfb..1f6e210 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -57,6 +57,8 @@
 #include "sna_reg.h"
 #include "sna_video.h"
 
+#include "intel_options.h"
+
 #include <xf86xv.h>
 #include <X11/extensions/Xv.h>
 
diff --git a/src/sna/sna_video_overlay.c b/src/sna/sna_video_overlay.c
index ba6f671..eb39a25 100644
--- a/src/sna/sna_video_overlay.c
+++ b/src/sna/sna_video_overlay.c
@@ -36,6 +36,8 @@
 #include <fourcc.h>
 #include <i915_drm.h>
 
+#include "intel_options.h"
+
 #if DEBUG_VIDEO_OVERLAY
 #undef DBG
 #define DBG(x) ErrorF x
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index 0e5f3ab..fff31fb 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -31,6 +31,8 @@
 #include "sna.h"
 #include "sna_video.h"
 
+#include "intel_options.h"
+
 #include <xf86xv.h>
 #include <X11/extensions/Xv.h>
 #include <fourcc.h>
commit ae2be7e25bda46551381c19a673b321b4382e1f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 24 16:33:27 2012 +0100

    sna/trapezoids: Correct rounding for downsampling onto sample grid
    
    Reported-by: S. Christian Collins <s_chriscollins at hotmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49446
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c0565fa..e604720 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3150,7 +3150,7 @@ composite_unaligned_boxes(struct sna *sna,
 
 static inline int pixman_fixed_to_grid (pixman_fixed_t v)
 {
-	return (v + FAST_SAMPLES_mask/2) >> (16 - FAST_SAMPLES_shift);
+	return (v + ((1<<(16-FAST_SAMPLES_shift))-1)/2) >> (16 - FAST_SAMPLES_shift);
 }
 
 static inline bool
@@ -3158,6 +3158,12 @@ project_trapezoid_onto_grid(const xTrapezoid *in,
 			    int dx, int dy,
 			    xTrapezoid *out)
 {
+	__DBG(("%s: in: L:(%d, %d), (%d, %d); R:(%d, %d), (%d, %d), [%d, %d]\n",
+	       __FUNCTION__,
+	       in->left.p1.x, in->left.p1.y, in->left.p2.x, in->left.p2.y,
+	       in->right.p1.x, in->right.p1.y, in->right.p2.x, in->right.p2.y,
+	       in->top, in->bottom));
+
 	out->left.p1.x = dx + pixman_fixed_to_grid(in->left.p1.x);
 	out->left.p1.y = dy + pixman_fixed_to_grid(in->left.p1.y);
 	out->left.p2.x = dx + pixman_fixed_to_grid(in->left.p2.x);
@@ -3171,6 +3177,12 @@ project_trapezoid_onto_grid(const xTrapezoid *in,
 	out->top = dy + pixman_fixed_to_grid(in->top);
 	out->bottom = dy + pixman_fixed_to_grid(in->bottom);
 
+	__DBG(("%s: out: L:(%d, %d), (%d, %d); R:(%d, %d), (%d, %d), [%d, %d]\n",
+	       __FUNCTION__,
+	       out->left.p1.x, out->left.p1.y, out->left.p2.x, out->left.p2.y,
+	       out->right.p1.x, out->right.p1.y, out->right.p2.x, out->right.p2.y,
+	       out->top, out->bottom));
+
 	return xTrapezoidValid(out);
 }
 
commit 0ab226e27e7920bdb9f7eb62c5174cd097ac7f7f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 24 12:51:46 2012 +0100

    sna: Query CRTC states following a hotplug event
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 7a01ee7..879940d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -310,6 +310,7 @@ struct sna {
 
 Bool sna_mode_pre_init(ScrnInfoPtr scrn, struct sna *sna);
 extern void sna_mode_remove_fb(struct sna *sna);
+extern void sna_mode_hotplug(struct sna *sna);
 extern void sna_mode_fini(struct sna *sna);
 
 extern int sna_crtc_id(xf86CrtcPtr crtc);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index c5b96f2..b8cf3cc 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2243,3 +2243,18 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 	     mode.mode_valid, sna->mode.fb_id == mode.fb_id));
 	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
 }
+
+void sna_mode_hotplug(struct sna *sna)
+{
+	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(sna->scrn);
+	int i;
+
+	/* Validate CRTC attachments */
+	for (i = 0; i < xf86_config->num_crtc; i++) {
+		xf86CrtcPtr crtc = xf86_config->crtc[i];
+		if (crtc->enabled) {
+			struct sna_crtc *sna_crtc = crtc->driver_private;
+			sna_crtc->active = sna_crtc_is_bound(sna, crtc);
+		}
+	}
+}
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index cab6d03..d0b5bde 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -635,6 +635,7 @@ sna_handle_uevents(int fd, void *closure)
 	if (memcmp(&s.st_rdev, &udev_devnum, sizeof (dev_t)) == 0 &&
 	    hotplug && atoi(hotplug) == 1) {
 		DBG(("%s: hotplug event\n", __FUNCTION__));
+		sna_mode_hotplug(sna);
 		RRGetInfo(xf86ScrnToScreen(scrn), TRUE);
 	}
 
commit 3f3bde4f0c72f6f31aae322bcdc20b95eade6631
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 24 11:58:46 2012 +0100

    uxa: Only consider an output valid if the kernel reports it attached
    
    Reported-by: Kyle Hill <kyle.hill at tacomafia.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=50078
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index abdc372..77a1cce 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1699,3 +1699,25 @@ int intel_crtc_to_pipe(xf86CrtcPtr crtc)
 	struct intel_crtc *intel_crtc = crtc->driver_private;
 	return intel_crtc->pipe;
 }
+
+Bool intel_crtc_on(xf86CrtcPtr crtc)
+{
+	ScrnInfoPtr scrn = crtc->scrn;
+	struct intel_crtc *intel_crtc = crtc->driver_private;
+	drmModeCrtcPtr drm_crtc;
+	Bool ret;
+
+	if (!crtc->enabled)
+		return FALSE;
+
+	/* Kernel manages CRTC status based on output config */
+	drm_crtc = drmModeGetCrtc(intel_crtc->mode->fd, crtc_id(intel_crtc));
+	if (drm_crtc == NULL)
+		return FALSE;
+
+	ret = (drm_crtc->mode_valid &&
+	       intel_crtc->mode->fb_id == drm_crtc->buffer_id);
+	free(drm_crtc);
+
+	return ret;
+}
diff --git a/src/intel_driver.c b/src/intel_driver.c
index d67d8c8..b055437 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -782,26 +782,6 @@ intel_init_initial_framebuffer(ScrnInfoPtr scrn)
 	return TRUE;
 }
 
-Bool intel_crtc_on(xf86CrtcPtr crtc)
-{
-	ScrnInfoPtr scrn = crtc->scrn;
-	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(scrn);
-	int i;
-
-	if (!crtc->enabled)
-		return FALSE;
-
-	/* Kernel manages CRTC status based out output config */
-	for (i = 0; i < xf86_config->num_output; i++) {
-		xf86OutputPtr output = xf86_config->output[i];
-		if (output->crtc == crtc &&
-		    intel_output_dpms_status(output) == DPMSModeOn)
-			return TRUE;
-	}
-
-	return FALSE;
-}
-
 static void
 intel_flush_callback(CallbackListPtr *list,
 		     pointer user_data, pointer call_data)
commit 11db66fedf96f158cbbac8011a8ba0b29a20ba3a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 24 11:07:19 2012 +0100

    sna: Add some more DBG tracepoints around modesetting
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 990dd2e..c5b96f2 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -213,6 +213,8 @@ sna_output_backlight_set(xf86OutputPtr output, int level)
 	char path[BACKLIGHT_PATH_LEN], val[BACKLIGHT_VALUE_LEN];
 	int fd, len, ret;
 
+	DBG(("%s: level=%d\n", __FUNCTION__, level));
+
 	if (level > sna_output->backlight_max)
 		level = sna_output->backlight_max;
 	if (! sna_output->backlight_iface || level < 0)
@@ -262,9 +264,12 @@ sna_output_backlight_get(xf86OutputPtr output)
 	close(fd);
 
 	level = atoi(val);
+	DBG(("%s: level=%d (max=%d)\n",
+	     __FUNCTION__, level, sna_output->backlight_max));
+
 	if (level > sna_output->backlight_max)
 		level = sna_output->backlight_max;
-	if (level < 0)
+	else if (level < 0)
 		level = -1;
 	return level;
 }
@@ -394,6 +399,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	int fb_id, x, y;
 	int i, ret = FALSE;
 
+	DBG(("%s\n", __FUNCTION__));
+
 	assert(xf86_config->num_output < ARRAY_SIZE(output_ids));
 
 	for (i = 0; i < xf86_config->num_output; i++) {
@@ -466,6 +473,9 @@ sna_crtc_restore(struct sna *sna)
 	struct kgem_bo *bo;
 	int i;
 
+	DBG(("%s (fb_pixmap=%d, front=%d)\n", __FUNCTION__,
+	     sna->mode.fb_pixmap, sna->front->drawable.serialNumber));
+
 	if (sna->mode.fb_pixmap == sna->front->drawable.serialNumber)
 		return;
 
@@ -492,12 +502,8 @@ sna_crtc_restore(struct sna *sna)
 
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
-
-		if (!crtc->enabled)
-			continue;
-
-		if (!sna_crtc_apply(crtc))
-			return;
+		if (crtc->enabled)
+			sna_crtc_apply(crtc);
 	}
 
 	kgem_bo_retire(&sna->kgem, bo);
@@ -560,9 +566,11 @@ void sna_copy_fbcon(struct sna *sna)
 	int dx, dy;
 	int i;
 
-	if (sna->kgem.wedged)
+	if (wedged(sna))
 		return;
 
+	DBG(("%s\n", __FUNCTION__));
+
 	/* Scan the connectors for a framebuffer and assume that is the fbcon */
 	fbcon = NULL;
 	for (i = 0; fbcon == NULL && i < xf86_config->num_crtc; i++) {
@@ -579,8 +587,10 @@ void sna_copy_fbcon(struct sna *sna)
 					     mode_crtc->buffer_id);
 		drmModeFreeCrtc(mode_crtc);
 	}
-	if (fbcon == NULL)
+	if (fbcon == NULL) {
+		DBG(("%s: no fbcon found\n", __FUNCTION__));
 		return;
+	}
 
 	/* Wrap the fbcon in a pixmap so that we select the right formats
 	 * in the render copy in case we need to preserve the fbcon
@@ -601,6 +611,8 @@ void sna_copy_fbcon(struct sna *sna)
 	if (bo == NULL)
 		goto cleanup_scratch;
 
+	DBG(("%s: fbcon handle=%d\n", __FUNCTION__, bo->handle));
+
 	priv = sna_pixmap(sna->front);
 	assert(priv && priv->gpu_bo);
 
@@ -756,6 +768,7 @@ sna_crtc_hide_cursor(xf86CrtcPtr crtc)
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 
+	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
 	drmModeSetCursor(sna->kgem.fd, crtc_id(sna_crtc), 0, 64, 64);
 }
 
@@ -765,6 +778,7 @@ sna_crtc_show_cursor(xf86CrtcPtr crtc)
 	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 
+	DBG(("%s: CRTC:%d\n", __FUNCTION__, crtc_id(sna_crtc)));
 	drmModeSetCursor(sna->kgem.fd, crtc_id(sna_crtc),
 			 sna_crtc->cursor, 64, 64);
 }
@@ -909,6 +923,8 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 	struct sna_crtc *sna_crtc;
 	struct drm_i915_get_pipe_from_crtc_id get_pipe;
 
+	DBG(("%s\n", __FUNCTION__));
+
 	sna_crtc = calloc(sizeof(struct sna_crtc), 1);
 	if (sna_crtc == NULL)
 		return;
@@ -967,6 +983,8 @@ sna_output_detect(xf86OutputPtr output)
 	struct sna_output *sna_output = output->driver_private;
 	xf86OutputStatus status;
 
+	DBG(("%s\n", __FUNCTION__));
+
 	drmModeFreeConnector(sna_output->mode_output);
 	sna_output->mode_output =
 		drmModeGetConnector(sna->kgem.fd, sna_output->output_id);
@@ -1099,6 +1117,8 @@ sna_output_get_modes(xf86OutputPtr output)
 	DisplayModePtr Modes = NULL;
 	int i;
 
+	DBG(("%s\n", __FUNCTION__));
+
 	sna_output_attach_edid(output);
 
 	/* modes should already be available */
@@ -1190,6 +1210,8 @@ sna_output_dpms(xf86OutputPtr output, int dpms)
 	drmModeConnectorPtr koutput = sna_output->mode_output;
 	int i;
 
+	DBG(("%s: dpms=%d\n", __FUNCTION__, dpms));
+
 	for (i = 0; i < koutput->count_props; i++) {
 		drmModePropertyPtr props;
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index d4b7eb0..cab6d03 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -633,8 +633,10 @@ sna_handle_uevents(int fd, void *closure)
 	hotplug = udev_device_get_property_value(dev, "HOTPLUG");
 
 	if (memcmp(&s.st_rdev, &udev_devnum, sizeof (dev_t)) == 0 &&
-			hotplug && atoi(hotplug) == 1)
+	    hotplug && atoi(hotplug) == 1) {
+		DBG(("%s: hotplug event\n", __FUNCTION__));
 		RRGetInfo(xf86ScrnToScreen(scrn), TRUE);
+	}
 
 	udev_device_unref(dev);
 }
commit fb9da4cb9e12ca733c31879b8b37906f361a8c35
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 23 13:53:37 2012 +0100

    sna: Only mark an unattached output as inactive
    
    So that a latter attempt to set the DesiredMode may succeed and we do
    not modify the configuration without notifying clients.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 0c73c36..990dd2e 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -448,7 +448,7 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 			   "failed to set mode: %s\n", strerror(-ret));
 		ret = FALSE;
 	} else {
-		crtc->enabled = sna_crtc->active = sna_crtc_is_bound(sna, crtc);
+		sna_crtc->active = sna_crtc_is_bound(sna, crtc);
 		ret = TRUE;
 	}
 
commit 5ae032e22b127c7c95753197e0914a8028a3b22e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 23 13:27:07 2012 +0100

    sna: Suppress modesetting errors on return from VT switch
    
    If we presume that userspace will set the correct mode shortly
    afterwards, we can ignore the failure of the automatic restore.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 2e81640..d4b7eb0 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -571,13 +571,6 @@ static Bool sna_pre_init(ScrnInfoPtr scrn, int flags)
 	return sna_accel_pre_init(sna);
 }
 
-/**
- * Intialiazes the hardware for the 3D pipeline use in the 2D driver.
- *
- * Some state caching is performed to avoid redundant state emits.  This
- * function is also responsible for marking the state as clobbered for DRI
- * clients.
- */
 static void
 sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
 {
@@ -981,12 +974,17 @@ static Bool sna_enter_vt(int scrnIndex, int flags)
 	DBG(("%s\n", __FUNCTION__));
 
 	if (drmSetMaster(sna->kgem.fd)) {
-		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
+		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			   "drmSetMaster failed: %s\n",
 			   strerror(errno));
+		return FALSE;
 	}
 
-	return xf86SetDesiredModes(scrn);
+	if (!xf86SetDesiredModes(scrn))
+		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
+			   "failed to restore desired modes on VT switch\n");
+
+	return TRUE;
 }
 
 static Bool sna_switch_mode(int scrnIndex, DisplayModePtr mode, int flags)
commit 34882a979d9817d33bd6a8ae73a9f7083556578c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 23 10:27:44 2012 +0100

    sna: Keep checking retirement for flushing list
    
    Even after all outstanding requests have been completed we may still
    have buffers on the flushing list that need to become idle.
    Once such consequence would be to prevent the vblank flush from going
    idle, causing spurious wakeups every vrefresh when otherwise idle.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50078
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 470dd24..9a67c38 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1403,8 +1403,11 @@ bool kgem_retire(struct kgem *kgem)
 	retired |= kgem_retire__requests(kgem);
 	retired |= kgem_retire__partials(kgem);
 
-	DBG(("%s -- need_retire=%d\n", __FUNCTION__, kgem->need_retire));
-	kgem->need_retire = !list_is_empty(&kgem->requests);
+	kgem->need_retire =
+		!list_is_empty(&kgem->requests) ||
+		!list_is_empty(&kgem->flushing);
+	DBG(("%s -- retired=%d, need_retire=%d\n",
+	     __FUNCTION__, retired, kgem->need_retire));
 
 	kgem->retire(kgem);
 
commit 54fde5bf05d43f26e7f1893584af736a138d245f
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed May 16 15:34:08 2012 +0100

    intel: convert sna/uxa to using new glyph picture accessors.
    
    The compat-api.h takes care of old/new servers.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 2af7e80..0a2e042 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -221,7 +221,7 @@ glyph_cache_upload(ScreenPtr screen,
 	DBG(("%s: upload glyph %p to cache (%d, %d)x(%d, %d)\n",
 	     __FUNCTION__, glyph, x, y, glyph->info.width, glyph->info.height));
 	sna_composite(PictOpSrc,
-		      GlyphPicture(glyph)[screen->myNum], 0, cache->picture,
+		      GetGlyphPicture(glyph, screen), 0, cache->picture,
 		      0, 0,
 		      0, 0,
 		      x, y,
@@ -302,7 +302,7 @@ glyph_cache(ScreenPtr screen,
 	    struct sna_render *render,
 	    GlyphPtr glyph)
 {
-	PicturePtr glyph_picture = GlyphPicture(glyph)[screen->myNum];
+	PicturePtr glyph_picture = GetGlyphPicture(glyph, screen);
 	struct sna_glyph_cache *cache = &render->glyph[PICT_FORMAT_RGB(glyph_picture->format) != 0];
 	struct sna_glyph *priv;
 	int size, mask, pos, s;
@@ -460,7 +460,7 @@ glyphs_to_dst(struct sna *sna,
 				}
 				if (!glyph_cache(screen, &sna->render, glyph)) {
 					/* no cache for this glyph */
-					priv.atlas = GlyphPicture(glyph)[index];
+					priv.atlas = GetGlyphPicture(glyph, screen);
 					priv.coordinate.x = priv.coordinate.y = 0;
 				} else
 					priv = *sna_glyph(glyph);
@@ -600,7 +600,7 @@ glyphs_slow(struct sna *sna,
 			if (priv.atlas == NULL) {
 				if (!glyph_cache(screen, &sna->render, glyph)) {
 					/* no cache for this glyph */
-					priv.atlas = GlyphPicture(glyph)[index];
+					priv.atlas = GetGlyphPicture(glyph, screen);
 					priv.coordinate.x = priv.coordinate.y = 0;
 				} else
 					priv = *sna_glyph(glyph);
@@ -813,7 +813,7 @@ upload:
 				if (glyph_image == NULL) {
 					int dx, dy;
 
-					picture = GlyphPicture(g)[s];
+					picture = GetGlyphPicture(g, dst->pDrawable->pScreen);
 					if (picture == NULL)
 						goto next_image;
 
@@ -908,7 +908,7 @@ next_image:
 						r.src = priv->coordinate;
 					} else {
 						/* no cache for this glyph */
-						this_atlas = GlyphPicture(glyph)[index];
+						this_atlas = GetGlyphPicture(glyph, screen);
 						r.src.x = r.src.y = 0;
 					}
 				}
@@ -1046,7 +1046,6 @@ glyphs_fallback(CARD8 op,
 		GlyphListPtr list,
 		GlyphPtr *glyphs)
 {
-	int screen = dst->pDrawable->pScreen->myNum;
 	pixman_image_t *dst_image, *mask_image, *src_image;
 	int dx, dy, x, y;
 	BoxRec box;
@@ -1156,7 +1155,7 @@ glyphs_fallback(CARD8 op,
 				PicturePtr picture;
 				int gx, gy;
 
-				picture = GlyphPicture(g)[screen];
+				picture = GetGlyphPicture(g, dst->pDrawable->pScreen);
 				if (picture == NULL)
 					goto next_glyph;
 
diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 921b99c..7db094b 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -235,7 +235,7 @@ uxa_glyph_cache_upload_glyph(ScreenPtr screen,
 			     GlyphPtr glyph,
 			     int x, int y)
 {
-	PicturePtr pGlyphPicture = GlyphPicture(glyph)[screen->myNum];
+	PicturePtr pGlyphPicture = GetGlyphPicture(glyph, screen);
 	PixmapPtr pGlyphPixmap = (PixmapPtr) pGlyphPicture->pDrawable;
 	PixmapPtr pCachePixmap = (PixmapPtr) cache->picture->pDrawable;
 	PixmapPtr scratch;
@@ -449,7 +449,6 @@ uxa_check_glyphs(CARD8 op,
 		 INT16 xSrc,
 		 INT16 ySrc, int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 {
-	int screen = dst->pDrawable->pScreen->myNum;
 	pixman_image_t *image;
 	PixmapPtr scratch;
 	PicturePtr mask;
@@ -513,7 +512,7 @@ uxa_check_glyphs(CARD8 op,
 		n = list->len;
 		while (n--) {
 			GlyphPtr glyph = *glyphs++;
-			PicturePtr g = GlyphPicture(glyph)[screen];
+			PicturePtr g = GetGlyphPicture(glyph, dst->pDrawable->pScreen);
 			if (g) {
 				if (maskFormat) {
 					CompositePicture(PictOpAdd, g, NULL, mask,
@@ -579,7 +578,7 @@ static PicturePtr
 uxa_glyph_cache(ScreenPtr screen, GlyphPtr glyph, int *out_x, int *out_y)
 {
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
-	PicturePtr glyph_picture = GlyphPicture(glyph)[screen->myNum];
+	PicturePtr glyph_picture = GetGlyphPicture(glyph, screen);
 	uxa_glyph_cache_t *cache = &uxa_screen->glyphCaches[PICT_FORMAT_RGB(glyph_picture->format) != 0];
 	struct uxa_glyph *priv = NULL;
 	int size, mask, pos, s;
@@ -796,7 +795,7 @@ uxa_glyphs_via_mask(CARD8 op,
 				this_atlas = uxa_glyph_cache(screen, glyph, &src_x, &src_y);
 				if (this_atlas == NULL) {
 					/* no cache for this glyph */
-					this_atlas = GlyphPicture(glyph)[screen->myNum];
+					this_atlas = GetGlyphPicture(glyph, screen);
 					src_x = src_y = 0;
 				}
 			}
commit 43a34186d13d29c671431832469ca5301751b3cf
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed May 16 15:26:55 2012 +0100

    intel: convert to new screen conversion APIs
    
    The compat header takes care of the old server vs new server.
    
    this commit was autogenerated from util/modular/x-driver-screen-scrn-conv.sh
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/i830_render.c b/src/i830_render.c
index c12e87b..e169cc1 100644
--- a/src/i830_render.c
+++ b/src/i830_render.c
@@ -164,7 +164,7 @@ static Bool i830_get_dest_format(PicturePtr dest_picture, uint32_t * dst_format)
 		*dst_format = COLR_BUF_ARGB4444;
 		break;
 	default:
-		scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+		scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 		intel_debug_fallback(scrn, "Unsupported dest format 0x%x\n",
 				     (int)dest_picture->format);
 		return FALSE;
@@ -245,7 +245,7 @@ static uint32_t i8xx_get_card_format(intel_screen_private *intel,
 static void i830_texture_setup(PicturePtr picture, PixmapPtr pixmap, int unit)
 {
 
-	ScrnInfoPtr scrn = xf86Screens[picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(picture->pDrawable->pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	uint32_t format, tiling_bits, pitch, filter;
 	uint32_t wrap_mode;
@@ -346,7 +346,7 @@ i830_check_composite(int op,
 		     PicturePtr dest_picture,
 		     int width, int height)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 	uint32_t tmp1;
 
 	/* Check for unsupported compositing operations. */
@@ -399,7 +399,7 @@ i830_check_composite_target(PixmapPtr pixmap)
 Bool
 i830_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (picture->repeatType > RepeatReflect) {
@@ -446,7 +446,7 @@ i830_prepare_composite(int op, PicturePtr source_picture,
 		       PicturePtr mask_picture, PicturePtr dest_picture,
 		       PixmapPtr source, PixmapPtr mask, PixmapPtr dest)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drm_intel_bo *bo_table[] = {
 		NULL,		/* batch_bo */
@@ -665,7 +665,7 @@ i830_emit_composite_primitive(PixmapPtr dest,
 			      int maskX, int maskY,
 			      int dstX, int dstY, int w, int h)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	Bool is_affine_src, is_affine_mask = TRUE;
 	int per_vertex;
@@ -853,7 +853,7 @@ void
 i830_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
 	       int dstX, int dstY, int w, int h)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	intel_batch_start_atomic(scrn, 58 +	/* invarient */
diff --git a/src/i915_render.c b/src/i915_render.c
index c6d5ed7..5605edf 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -172,7 +172,7 @@ static Bool i915_get_dest_format(PicturePtr dest_picture, uint32_t * dst_format)
 		*dst_format = COLR_BUF_ARGB4444;
 		break;
 	default:
-		scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+		scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 		intel_debug_fallback(scrn,
 				     "Unsupported dest format 0x%x\n",
 				     (int)dest_picture->format);
@@ -189,7 +189,7 @@ i915_check_composite(int op,
 		     PicturePtr dest_picture,
 		     int width, int height)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 	uint32_t tmp1;
 
 	/* Check for unsupported compositing operations. */
@@ -243,7 +243,7 @@ Bool
 i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 {
 	if (picture->repeatType > RepeatReflect) {
-		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 		intel_debug_fallback(scrn, "Unsupported picture repeat %d\n",
 			     picture->repeatType);
 		return FALSE;
@@ -251,7 +251,7 @@ i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 
 	if (picture->filter != PictFilterNearest &&
 	    picture->filter != PictFilterBilinear) {
-		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 		intel_debug_fallback(scrn, "Unsupported filter 0x%x\n",
 				     picture->filter);
 		return FALSE;
@@ -266,7 +266,7 @@ i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 		w = picture->pDrawable->width;
 		h = picture->pDrawable->height;
 		if ((w > 2048) || (h > 2048)) {
-			ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+			ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 			intel_debug_fallback(scrn,
 					     "Picture w/h too large (%dx%d)\n",
 					     w, h);
@@ -281,7 +281,7 @@ i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 		}
 		if (i == sizeof(i915_tex_formats) / sizeof(i915_tex_formats[0]))
 		{
-			ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+			ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 			intel_debug_fallback(scrn, "Unsupported picture format "
 					     "0x%x\n",
 					     (int)picture->format);
@@ -296,7 +296,7 @@ i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 
 static Bool i915_texture_setup(PicturePtr picture, PixmapPtr pixmap, int unit)
 {
-	ScrnInfoPtr scrn = xf86Screens[picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(picture->pDrawable->pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	uint32_t format, pitch, filter;
 	uint32_t wrap_mode, tiling_bits;
@@ -660,7 +660,7 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 		       PicturePtr mask_picture, PicturePtr dest_picture,
 		       PixmapPtr source, PixmapPtr mask, PixmapPtr dest)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drm_intel_bo *bo_table[] = {
 		NULL,		/* batch_bo */
@@ -951,7 +951,7 @@ void
 i915_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
 	       int dstX, int dstY, int w, int h)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	/* 28 + 16 + 10 + 20 + 32 + 16 */
diff --git a/src/i965_render.c b/src/i965_render.c
index b981ecc..98231b8 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -181,7 +181,7 @@ i965_check_composite(int op,
 		     PicturePtr dest_picture,
 		     int width, int height)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 
 	/* Check for unsupported compositing operations. */
 	if (op >= sizeof(i965_blend_op) / sizeof(i965_blend_op[0])) {
@@ -219,7 +219,7 @@ Bool
 i965_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 {
 	if (picture->repeatType > RepeatReflect) {
-		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 		intel_debug_fallback(scrn,
 				     "extended repeat (%d) not supported\n",
 				     picture->repeatType);
@@ -228,7 +228,7 @@ i965_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 
 	if (picture->filter != PictFilterNearest &&
 	    picture->filter != PictFilterBilinear) {
-		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 		intel_debug_fallback(scrn, "Unsupported filter 0x%x\n",
 				     picture->filter);
 		return FALSE;
@@ -240,7 +240,7 @@ i965_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 		w = picture->pDrawable->width;
 		h = picture->pDrawable->height;
 		if ((w > 8192) || (h > 8192)) {
-			ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+			ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 			intel_debug_fallback(scrn,
 					     "Picture w/h too large (%dx%d)\n",
 					     w, h);
@@ -255,7 +255,7 @@ i965_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 		}
 		if (i == sizeof(i965_tex_formats) / sizeof(i965_tex_formats[0]))
 		{
-			ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+			ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 			intel_debug_fallback(scrn,
 					     "Unsupported picture format "
 					     "0x%x\n",
@@ -1978,7 +1978,7 @@ i965_prepare_composite(int op, PicturePtr source_picture,
 		       PicturePtr mask_picture, PicturePtr dest_picture,
 		       PixmapPtr source, PixmapPtr mask, PixmapPtr dest)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest_picture->pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest_picture->pDrawable->pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct gen4_render_state *render_state = intel->gen4_render_state;
 	gen4_composite_op *composite_op = &render_state->composite_op;
@@ -2202,7 +2202,7 @@ void
 i965_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
 	       int dstX, int dstY, int w, int h)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	intel_batch_start_atomic(scrn, 200);
diff --git a/src/intel.h b/src/intel.h
index e274db1..253a6bf 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -560,7 +560,7 @@ intel_check_pitch_2d(PixmapPtr pixmap)
 {
 	uint32_t pitch = intel_pixmap_pitch(pixmap);
 	if (pitch > KB(32)) {
-		ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 		intel_debug_fallback(scrn, "pitch exceeds 2d limit 32K\n");
 		return FALSE;
 	}
@@ -573,7 +573,7 @@ intel_check_pitch_3d(PixmapPtr pixmap)
 {
 	uint32_t pitch = intel_pixmap_pitch(pixmap);
 	if (pitch > KB(8)) {
-		ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 		intel_debug_fallback(scrn, "pitch exceeds 3d limit 8K\n");
 		return FALSE;
 	}
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index 2719c38..46f22bc 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -246,7 +246,7 @@ void intel_batch_submit(ScrnInfoPtr scrn)
 			if (!once) {
 				xf86DrvMsg(scrn->scrnIndex, X_ERROR, "Detected a hung GPU, disabling acceleration.\n");
 				xf86DrvMsg(scrn->scrnIndex, X_ERROR, "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
-				uxa_set_force_fallback(screenInfo.screens[scrn->scrnIndex], TRUE);
+				uxa_set_force_fallback(xf86ScrnToScreen(scrn), TRUE);
 				intel->force_fallback = TRUE;
 				once = 1;
 			}
diff --git a/src/intel_dri.c b/src/intel_dri.c
index a5ed545..36e96ff 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -97,7 +97,7 @@ static uint32_t pixmap_flink(PixmapPtr pixmap)
 static PixmapPtr get_front_buffer(DrawablePtr drawable)
 {
 	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	PixmapPtr pixmap;
 
@@ -130,7 +130,7 @@ static PixmapPtr get_front_buffer(DrawablePtr drawable)
 static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 {
 	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	PixmapPtr old = get_drawable_pixmap(drawable);
 	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
 	GCPtr gc;
@@ -182,7 +182,7 @@ static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 				   priv->stride,
 				   NULL);
 
-	intel_get_screen_private(xf86Screens[screen->myNum])->needs_flush = TRUE;
+	intel_get_screen_private(xf86ScreenToScrn(screen))->needs_flush = TRUE;
 	return old;
 }
 
@@ -248,7 +248,7 @@ static PixmapPtr fixup_shadow(DrawablePtr drawable, PixmapPtr pixmap)
 	intel_set_pixmap_private(old, priv);
 	old->refcnt++;
 
-	intel_get_screen_private(xf86Screens[screen->myNum])->needs_flush = TRUE;
+	intel_get_screen_private(xf86ScreenToScrn(screen))->needs_flush = TRUE;
 	return old;
 }
 
@@ -258,7 +258,7 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 		      int count)
 {
 	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	DRI2BufferPtr buffers;
 	I830DRI2BufferPrivatePtr privates;
@@ -385,7 +385,7 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 		     unsigned int format)
 {
 	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	DRI2Buffer2Ptr buffer;
 	I830DRI2BufferPrivatePtr privates;
@@ -534,7 +534,7 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
 	I830DRI2BufferPrivatePtr srcPrivate = sourceBuffer->driverPrivate;
 	I830DRI2BufferPrivatePtr dstPrivate = destBuffer->driverPrivate;
 	ScreenPtr screen = drawable->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	DrawablePtr src = (sourceBuffer->attachment == DRI2BufferFrontLeft)
 		? drawable : &srcPrivate->pixmap->drawable;
@@ -687,7 +687,7 @@ static int
 I830DRI2DrawablePipe(DrawablePtr pDraw)
 {
 	ScreenPtr pScreen = pDraw->pScreen;
-	ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+	ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
 	BoxRec box, crtcbox;
 	xf86CrtcPtr crtc;
 	int pipe = -1;
@@ -910,7 +910,7 @@ intel_glamor_create_back_pixmap(ScreenPtr screen,
 				   0);
 	intel_set_pixmap_bo(back_pixmap, back_bo);
 	if (!intel_glamor_create_textured_pixmap(back_pixmap)) {
-		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "Failed to create textured back pixmap.\n");
 		screen->DestroyPixmap(back_pixmap);
@@ -1026,7 +1026,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 static Bool
 can_exchange(DrawablePtr drawable, DRI2BufferPtr front, DRI2BufferPtr back)
 {
-	struct intel_screen_private *intel = intel_get_screen_private(xf86Screens[drawable->pScreen->myNum]);
+	struct intel_screen_private *intel = intel_get_screen_private(xf86ScreenToScrn(drawable->pScreen));
 	I830DRI2BufferPrivatePtr front_priv = front->driverPrivate;
 	I830DRI2BufferPrivatePtr back_priv = back->driverPrivate;
 	PixmapPtr front_pixmap = front_priv->pixmap;
@@ -1244,7 +1244,7 @@ I830DRI2ScheduleSwap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		     CARD64 remainder, DRI2SwapEventPtr func, void *data)
 {
 	ScreenPtr screen = draw->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drmVBlank vbl;
 	int ret, pipe = I830DRI2DrawablePipe(draw), flip = 0;
@@ -1431,7 +1431,7 @@ static int
 I830DRI2GetMSC(DrawablePtr draw, CARD64 *ust, CARD64 *msc)
 {
 	ScreenPtr screen = draw->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drmVBlank vbl;
 	int ret, pipe = I830DRI2DrawablePipe(draw);
@@ -1478,7 +1478,7 @@ I830DRI2ScheduleWaitMSC(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 			CARD64 divisor, CARD64 remainder)
 {
 	ScreenPtr screen = draw->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	DRI2FrameEventPtr wait_info;
 	drmVBlank vbl;
@@ -1616,7 +1616,7 @@ static int dri2_server_generation;
 
 Bool I830DRI2ScreenInit(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	DRI2InfoRec info;
 	int dri2_major = 1;
@@ -1701,7 +1701,7 @@ Bool I830DRI2ScreenInit(ScreenPtr screen)
 
 void I830DRI2CloseScreen(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	DRI2CloseScreen(screen);
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 4265de8..d67d8c8 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -217,7 +217,7 @@ I830LoadPalette(ScrnInfoPtr scrn, int numColors, int *indices,
  */
 static Bool i830CreateScreenResources(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	screen->CreateScreenResources = intel->CreateScreenResources;
@@ -840,7 +840,7 @@ I830HandleUEvents(int fd, void *closure)
 
 	if (memcmp(&s.st_rdev, &udev_devnum, sizeof (dev_t)) == 0 &&
 			hotplug && atoi(hotplug) == 1)
-		RRGetInfo(screenInfo.screens[scrn->scrnIndex], TRUE);
+		RRGetInfo(xf86ScrnToScreen(scrn), TRUE);
 
 	udev_device_unref(dev);
 }
@@ -919,7 +919,7 @@ I830UeventFini(ScrnInfoPtr scrn)
 static Bool
 I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	VisualPtr visual;
 #ifdef INTEL_XVMC
diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index a868157..4741d58 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -53,7 +53,7 @@ intel_glamor_exchange_buffers(struct intel_screen_private *intel,
 Bool
 intel_glamor_create_screen_resources(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (!(intel->uxa_flags & UXA_USE_GLAMOR))
@@ -104,7 +104,7 @@ PixmapPtr
 intel_glamor_create_pixmap(ScreenPtr screen, int w, int h,
 			   int depth, unsigned int usage)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (intel->uxa_flags & UXA_USE_GLAMOR)
@@ -116,7 +116,7 @@ intel_glamor_create_pixmap(ScreenPtr screen, int w, int h,
 Bool
 intel_glamor_create_textured_pixmap(PixmapPtr pixmap)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct intel_pixmap *priv;
 
@@ -136,7 +136,7 @@ intel_glamor_create_textured_pixmap(PixmapPtr pixmap)
 void
 intel_glamor_destroy_pixmap(PixmapPtr pixmap)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private * intel;
 
 	intel = intel_get_screen_private(scrn);
@@ -147,7 +147,7 @@ intel_glamor_destroy_pixmap(PixmapPtr pixmap)
 static void
 intel_glamor_need_flush(DrawablePtr pDrawable)
 {
-	ScrnInfoPtr scrn = xf86Screens[pDrawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pDrawable->pScreen);
 	intel_screen_private * intel;
 
 	intel = intel_get_screen_private(scrn);
@@ -175,7 +175,7 @@ intel_glamor_finish_access(PixmapPtr pixmap, uxa_access_t access)
 Bool
 intel_glamor_init(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if ((intel->uxa_flags & UXA_GLAMOR_EGL_INITIALIZED) == 0)
@@ -213,7 +213,7 @@ intel_glamor_flush(intel_screen_private * intel)
 {
 	ScreenPtr screen;
 
-	screen = screenInfo.screens[intel->scrn->scrnIndex];
+	screen = xf86ScrnToScreen(intel->scrn);
 	if (intel->uxa_flags & UXA_USE_GLAMOR)
 		glamor_block_handler(screen);
 }
@@ -221,7 +221,7 @@ intel_glamor_flush(intel_screen_private * intel)
 Bool
 intel_glamor_close_screen(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (intel->uxa_flags & UXA_USE_GLAMOR)
diff --git a/src/intel_hwmc.c b/src/intel_hwmc.c
index d626725..af8bd81 100644
--- a/src/intel_hwmc.c
+++ b/src/intel_hwmc.c
@@ -189,7 +189,7 @@ static XF86MCSurfaceInfoPtr surface_info_vld[] = {
 /* check chip type and load xvmc driver */
 Bool intel_xvmc_adaptor_init(ScreenPtr pScreen)
 {
-	ScrnInfoPtr scrn = xf86Screens[pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	static XF86MCAdaptorRec *pAdapt;
 	char *name;
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 0b1a369..383efc5 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -160,7 +160,7 @@ intel_uxa_pixmap_compute_size(PixmapPtr pixmap,
 			      int *stride,
 			      unsigned usage)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	int pitch, size;
 
@@ -241,7 +241,7 @@ intel_uxa_pixmap_compute_size(PixmapPtr pixmap,
 static Bool
 intel_uxa_check_solid(DrawablePtr drawable, int alu, Pixel planemask)
 {
-	ScrnInfoPtr scrn = xf86Screens[drawable->pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(drawable->pScreen);
 
 	if (!UXA_PM_IS_SOLID(drawable, planemask)) {
 		intel_debug_fallback(scrn, "planemask is not solid\n");
@@ -266,7 +266,7 @@ intel_uxa_check_solid(DrawablePtr drawable, int alu, Pixel planemask)
 static Bool
 intel_uxa_prepare_solid(PixmapPtr pixmap, int alu, Pixel planemask, Pixel fg)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drm_intel_bo *bo_table[] = {
 		NULL,		/* batch_bo */
@@ -299,7 +299,7 @@ intel_uxa_prepare_solid(PixmapPtr pixmap, int alu, Pixel planemask, Pixel fg)
 
 static void intel_uxa_solid(PixmapPtr pixmap, int x1, int y1, int x2, int y2)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	unsigned long pitch;
 	uint32_t cmd;
@@ -353,7 +353,7 @@ static Bool
 intel_uxa_check_copy(PixmapPtr source, PixmapPtr dest,
 		    int alu, Pixel planemask)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 
 	if (!UXA_PM_IS_SOLID(&source->drawable, planemask)) {
 		intel_debug_fallback(scrn, "planemask is not solid");
@@ -385,7 +385,7 @@ static Bool
 intel_uxa_prepare_copy(PixmapPtr source, PixmapPtr dest, int xdir,
 		      int ydir, int alu, Pixel planemask)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	drm_intel_bo *bo_table[] = {
 		NULL,		/* batch_bo */
@@ -417,7 +417,7 @@ static void
 intel_uxa_copy(PixmapPtr dest, int src_x1, int src_y1, int dst_x1,
 	      int dst_y1, int w, int h)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	uint32_t cmd;
 	int dst_x2, dst_y2, src_x2, src_y2;
@@ -501,7 +501,7 @@ intel_uxa_copy(PixmapPtr dest, int src_x1, int src_y1, int dst_x1,
 
 static void intel_uxa_done(PixmapPtr pixmap)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (IS_GEN6(intel) || IS_GEN7(intel)) {
@@ -523,7 +523,7 @@ static void intel_uxa_done(PixmapPtr pixmap)
  */
 static void i830_done_composite(PixmapPtr dest)
 {
-	ScrnInfoPtr scrn = xf86Screens[dest->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(dest->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 	if (intel->vertex_flush)
@@ -682,7 +682,7 @@ static Bool intel_uxa_pixmap_is_offscreen(PixmapPtr pixmap)
 
 static Bool intel_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pixmap->drawable.pScreen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct intel_pixmap *priv = intel_get_pixmap_private(pixmap);
 	dri_bo *bo = priv->bo;
@@ -787,7 +787,7 @@ static Bool intel_uxa_put_image(PixmapPtr pixmap,
 		    w == pixmap->drawable.width &&
 		    h == pixmap->drawable.height)
 		{
-			intel_screen_private *intel = intel_get_screen_private(xf86Screens[screen->myNum]);
+			intel_screen_private *intel = intel_get_screen_private(xf86ScreenToScrn(screen));
 			uint32_t tiling = priv->tiling;
 			int size, stride;
 			dri_bo *bo;
@@ -932,7 +932,7 @@ static Bool intel_uxa_get_image(PixmapPtr pixmap,
 
 		FreeScratchGC(gc);
 
-		intel_batch_submit(xf86Screens[screen->myNum]);
+		intel_batch_submit(xf86ScreenToScrn(screen));
 
 		x = y = 0;
 		pixmap = scratch;
@@ -1008,7 +1008,7 @@ static PixmapPtr
 intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 			unsigned usage)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct intel_pixmap *priv;
 	PixmapPtr pixmap, new_pixmap = NULL;
@@ -1146,7 +1146,7 @@ static Bool intel_uxa_destroy_pixmap(PixmapPtr pixmap)
 
 Bool intel_uxa_create_screen_resources(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	dri_bo *bo = intel->front_buffer;
 
@@ -1230,7 +1230,7 @@ intel_limits_init(intel_screen_private *intel)
 
 Bool intel_uxa_init(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
 #if HAS_DIXREGISTERPRIVATEKEY
diff --git a/src/intel_video.c b/src/intel_video.c
index 0834bb2..83d1eab 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -327,7 +327,7 @@ intel_overlay_put_image(intel_screen_private *intel,
 
 void I830InitVideo(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL;
 	XF86VideoAdaptorPtr overlayAdaptor = NULL, texturedAdaptor = NULL;
@@ -407,7 +407,7 @@ void I830InitVideo(ScreenPtr screen)
 
 static XF86VideoAdaptorPtr I830SetupImageVideoOverlay(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	XF86VideoAdaptorPtr adapt;
 	intel_adaptor_private *adaptor_priv;
@@ -516,7 +516,7 @@ static XF86VideoAdaptorPtr I830SetupImageVideoOverlay(ScreenPtr screen)
 
 static XF86VideoAdaptorPtr I830SetupImageVideoTextured(ScreenPtr screen)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	XF86VideoAdaptorPtr adapt;
 	intel_adaptor_private *adaptor_privs;
diff --git a/src/legacy/i810/i810_accel.c b/src/legacy/i810/i810_accel.c
index 6b57dbb..7120b4b 100644
--- a/src/legacy/i810/i810_accel.c
+++ b/src/legacy/i810/i810_accel.c
@@ -69,7 +69,7 @@ Bool
 I810AccelInit(ScreenPtr pScreen)
 {
    XAAInfoRecPtr infoPtr;
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
 
    if (I810_DEBUG & DEBUG_VERBOSE_ACCEL)
@@ -215,8 +215,8 @@ I810WaitLpRing(ScrnInfoPtr pScrn, int n, int timeout_millis)
 	 ErrorF("space: %d wanted %d\n", ring->space, n);
 #ifdef HAVE_DRI1
 	 if (pI810->directRenderingEnabled) {
-	    DRIUnlock(screenInfo.screens[pScrn->scrnIndex]);
-	    DRICloseScreen(screenInfo.screens[pScrn->scrnIndex]);
+	    DRIUnlock(xf86ScrnToScreen(pScrn));
+	    DRICloseScreen(xf86ScrnToScreen(pScrn));
 	 }
 #endif
 	 pI810->AccelInfoRec = NULL;	/* Stops recursive behavior */
diff --git a/src/legacy/i810/i810_cursor.c b/src/legacy/i810/i810_cursor.c
index 88829cb..580fe4b 100644
--- a/src/legacy/i810/i810_cursor.c
+++ b/src/legacy/i810/i810_cursor.c
@@ -63,7 +63,7 @@ I810CursorInit(ScreenPtr pScreen)
    I810Ptr pI810;
    xf86CursorInfoPtr infoPtr;
 
-   pScrn = xf86Screens[pScreen->myNum];
+   pScrn = xf86ScreenToScrn(pScreen);
    pI810 = I810PTR(pScrn);
    pI810->CursorInfoRec = infoPtr = xf86CreateCursorInfoRec();
    if (!infoPtr)
@@ -101,7 +101,7 @@ I810CursorInit(ScreenPtr pScreen)
 
 static Bool I810UseHWCursorARGB (ScreenPtr pScreen, CursorPtr pCurs)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
 
    if (!pI810->CursorARGBPhysical)
@@ -144,7 +144,7 @@ static void I810LoadCursorARGB (ScrnInfoPtr pScrn, CursorPtr pCurs)
 static Bool
 I810UseHWCursor(ScreenPtr pScreen, CursorPtr pCurs)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
 
    if (!pI810->CursorPhysical)
diff --git a/src/legacy/i810/i810_dga.c b/src/legacy/i810/i810_dga.c
index 44181c6..baf0011 100644
--- a/src/legacy/i810/i810_dga.c
+++ b/src/legacy/i810/i810_dga.c
@@ -70,7 +70,7 @@ DGAFunctionRec I810DGAFuncs = {
 Bool
 I810DGAInit(ScreenPtr pScreen)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
    DGAModePtr modes = NULL, newmodes = NULL, currentMode;
    DisplayModePtr pMode, firstMode;
diff --git a/src/legacy/i810/i810_dri.c b/src/legacy/i810/i810_dri.c
index 9129069..2f02dd9 100644
--- a/src/legacy/i810/i810_dri.c
+++ b/src/legacy/i810/i810_dri.c
@@ -155,7 +155,7 @@ mylog2(unsigned int n)
 Bool
 I810DRIScreenInit(ScreenPtr pScreen)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
    DRIInfoPtr pDRIInfo;
    I810DRIPtr pI810DRI;
@@ -891,7 +891,7 @@ I810DRIScreenInit(ScreenPtr pScreen)
 void
 I810DRICloseScreen(ScreenPtr pScreen)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
    I810DRIPtr pI810DRI = (I810DRIPtr) pI810->pDRIInfo->devPrivate;
 
@@ -956,7 +956,7 @@ Bool
 I810DRIFinishScreenInit(ScreenPtr pScreen)
 {
    I810SAREARec *sPriv = (I810SAREARec *) DRIGetSAREAPrivate(pScreen);
-   ScrnInfoPtr        pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr        pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr info  = I810PTR(pScrn);
 
    memset(sPriv, 0, sizeof(sPriv));
@@ -976,7 +976,7 @@ I810DRISwapContext(ScreenPtr pScreen, DRISyncType syncType,
 		   DRIContextType oldContextType, void *oldContext,
 		   DRIContextType newContextType, void *newContext)
 {
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
 
    if (syncType == DRI_3D_SYNC &&
@@ -1002,7 +1002,7 @@ static void
 I810DRIInitBuffers(WindowPtr pWin, RegionPtr prgn, CARD32 index)
 {
    ScreenPtr pScreen = pWin->drawable.pScreen;
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
    BoxPtr pbox = REGION_RECTS(prgn);
    int nbox = REGION_NUM_RECTS(prgn);
@@ -1047,7 +1047,7 @@ I810DRIMoveBuffers(WindowPtr pParent, DDXPointRec ptOldOrg,
 		   RegionPtr prgnSrc, CARD32 index)
 {
    ScreenPtr pScreen = pParent->drawable.pScreen;
-   ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+   ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
    I810Ptr pI810 = I810PTR(pScrn);
    BoxPtr pboxTmp, pboxNext, pboxBase;
    DDXPointPtr pptTmp, pptNew2 = NULL;
@@ -1249,7 +1249,7 @@ static void I810DRIRefreshArea(ScrnInfoPtr pScrn, int num, BoxPtr pbox)
 
 static void I810EnablePageFlip(ScreenPtr pScreen)
 {
-    ScrnInfoPtr         pScrn      = xf86Screens[pScreen->myNum];
+    ScrnInfoPtr         pScrn      = xf86ScreenToScrn(pScreen);
     I810Ptr       pI810       = I810PTR(pScrn);
     I810SAREAPtr  pSAREAPriv = DRIGetSAREAPrivate(pScreen);
     int cpp=2;
@@ -1298,7 +1298,7 @@ static void I810DRITransitionMultiToSingle3d(ScreenPtr pScreen)
 
 static void I810DRITransitionTo3d(ScreenPtr pScreen)
 {
-    ScrnInfoPtr    pScrn = xf86Screens[pScreen->myNum];
+    ScrnInfoPtr    pScrn = xf86ScreenToScrn(pScreen);
     I810Ptr  pI810  = I810PTR(pScrn);
 
     I810EnablePageFlip(pScreen);
@@ -1307,7 +1307,7 @@ static void I810DRITransitionTo3d(ScreenPtr pScreen)
 
 static void I810DRITransitionTo2d(ScreenPtr pScreen)
 {
-    ScrnInfoPtr         pScrn      = xf86Screens[pScreen->myNum];
+    ScrnInfoPtr         pScrn      = xf86ScreenToScrn(pScreen);
     I810Ptr       pI810       = I810PTR(pScrn);
     I810SAREAPtr  pSAREAPriv = DRIGetSAREAPrivate(pScreen);
 
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 60053d8..09d52c5 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -1353,7 +1353,7 @@ I810ModeInit(ScrnInfoPtr scrn, DisplayModePtr mode)
 
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-      DRILock(screenInfo.screens[scrn->scrnIndex], 0);
+      DRILock(xf86ScrnToScreen(scrn), 0);
       pI810->LockHeld = 1;
    }
 #endif
@@ -1362,7 +1362,7 @@ I810ModeInit(ScrnInfoPtr scrn, DisplayModePtr mode)
 
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-      DRIUnlock(screenInfo.screens[scrn->scrnIndex]);
+      DRIUnlock(xf86ScrnToScreen(scrn));
       pI810->LockHeld = 0;
    }
 #endif
@@ -1577,7 +1577,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
    I810Ptr pI810;
    VisualPtr visual;
 
-   scrn = xf86Screens[screen->myNum];
+   scrn = xf86ScreenToScrn(screen);
    pI810 = I810PTR(scrn);
    hwp = VGAHWPTR(scrn);
 
diff --git a/src/legacy/i810/i810_hwmc.c b/src/legacy/i810/i810_hwmc.c
index ba50e1e..c7100e0 100644
--- a/src/legacy/i810/i810_hwmc.c
+++ b/src/legacy/i810/i810_hwmc.c
@@ -197,7 +197,7 @@ static XF86MCAdaptorPtr ppAdapt[1] =
  **************************************************************************/
 void I810InitMC(ScreenPtr pScreen)
 {
-  ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+  ScrnInfoPtr pScrn = xf86ScreenToScrn(pScreen);
   I810Ptr pI810 = I810PTR(pScrn);
   int i;
 
diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index 5a568a6..2999ee0 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -154,7 +154,7 @@ static Atom xvBrightness, xvContrast, xvColorKey;
 
 void I810InitVideo(ScreenPtr screen)
 {
-    ScrnInfoPtr pScrn = xf86Screens[screen->myNum];
+    ScrnInfoPtr pScrn = xf86ScreenToScrn(screen);
     XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL;
     XF86VideoAdaptorPtr newAdaptor = NULL;
     int num_adaptors;
@@ -377,7 +377,7 @@ static void I810ResetVideo(ScrnInfoPtr pScrn)
 static XF86VideoAdaptorPtr 
 I810SetupImageVideo(ScreenPtr screen)
 {
-    ScrnInfoPtr pScrn = xf86Screens[screen->myNum];
+    ScrnInfoPtr pScrn = xf86ScreenToScrn(screen);
     I810Ptr pI810 = I810PTR(pScrn);
     XF86VideoAdaptorPtr adapt;
     I810PortPrivPtr pPriv;
@@ -941,7 +941,7 @@ I810AllocateMemory(
 	xf86FreeOffscreenLinear(linear);
    }
 
-   screen = screenInfo.screens[pScrn->scrnIndex];
+   screen = xf86ScrnToScreen(pScrn);
 
    new_linear = xf86AllocateOffscreenLinear(screen, size, 4,
                                             NULL, NULL, NULL);
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6c6650b..7a01ee7 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -332,7 +332,7 @@ to_sna(ScrnInfoPtr scrn)
 constant static inline struct sna *
 to_sna_from_screen(ScreenPtr screen)
 {
-	return to_sna(xf86Screens[screen->myNum]);
+	return to_sna(xf86ScreenToScrn(screen));
 }
 
 constant static inline struct sna *
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 517eaad..4fa4320 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -686,7 +686,7 @@ sna_dri_copy_region(DrawablePtr draw,
 static int
 sna_dri_get_pipe(DrawablePtr pDraw)
 {
-	ScrnInfoPtr pScrn = xf86Screens[pDraw->pScreen->myNum];
+	ScrnInfoPtr pScrn = xf86ScreenToScrn(pDraw->pScreen);
 	BoxRec box, crtcbox;
 	xf86CrtcPtr crtc;
 	int pipe;
@@ -1496,7 +1496,7 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		       CARD64 remainder, DRI2SwapEventPtr func, void *data)
 {
 	ScreenPtr screen = draw->pScreen;
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
 	drmVBlank vbl;
 	int pipe;
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 9ec3ecf..2e81640 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -641,7 +641,7 @@ sna_handle_uevents(int fd, void *closure)
 
 	if (memcmp(&s.st_rdev, &udev_devnum, sizeof (dev_t)) == 0 &&
 			hotplug && atoi(hotplug) == 1)
-		RRGetInfo(screenInfo.screens[scrn->scrnIndex], TRUE);
+		RRGetInfo(xf86ScrnToScreen(scrn), TRUE);
 
 	udev_device_unref(dev);
 }
@@ -826,7 +826,7 @@ agp_aperture_size(struct pci_device *dev, int gen)
 static Bool
 sna_screen_init(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 {
-	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(screen);
 	struct sna *sna = to_sna(scrn);
 	VisualPtr visual;
 
diff --git a/uxa/uxa.c b/uxa/uxa.c
index b4a1da6..0ba6869 100644
--- a/uxa/uxa.c
+++ b/uxa/uxa.c
@@ -366,7 +366,7 @@ void uxa_set_force_fallback(ScreenPtr screen, Bool value)
 static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 {
 	uxa_screen_t *uxa_screen = uxa_get_screen(pScreen);
-	ScrnInfoPtr scrn = xf86Screens[pScreen->myNum];
+	ScrnInfoPtr scrn = xf86ScreenToScrn(pScreen);
 #ifdef RENDER
 	PictureScreenPtr ps = GetPictureScreenIfSet(pScreen);
 #endif
commit 9aabc04d76fa976abb4a74f4e9676c7556e88df3
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed May 16 15:14:32 2012 +0100

    intel: add compat header file for conversion fns
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/Makefile.am b/src/Makefile.am
index 448a354..fd139ee 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -43,6 +43,7 @@ NULL:=#
 intel_drv_la_SOURCES = \
 	intel_list.h \
 	intel_module.c \
+	compat-api.h \
 	$(NULL)
 
 if UXA
diff --git a/src/compat-api.h b/src/compat-api.h
new file mode 100644
index 0000000..1bb7724
--- /dev/null
+++ b/src/compat-api.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Dave Airlie <airlied at redhat.com>
+ */
+
+/* this file provides API compat between server post 1.13 and pre it,
+   it should be reused inside as many drivers as possible */
+#ifndef COMPAT_API_H
+#define COMPAT_API_H
+
+#ifndef GLYPH_HAS_GLYPH_PICTURE_ACCESSOR
+#define GetGlyphPicture(g, s) GlyphPicture((g))[(s)->myNum]
+#define SetGlyphPicture(g, s, p) GlyphPicture((g))[(s)->myNum] = p
+#endif
+
+#ifndef XF86_HAS_SCRN_CONV
+#define xf86ScreenToScrn(s) xf86Screens[(s)->myNum]
+#define xf86ScrnToScreen(s) screenInfo.screens[(s)->scrnIndex]
+#endif
+
+#endif
diff --git a/src/intel.h b/src/intel.h
index f806aea..e274db1 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -69,6 +69,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "intel_driver.h"
 #include "intel_list.h"
+#include "compat-api.h"
 
 #if HAVE_UDEV
 #include <libudev.h>
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index 2c0b53e..f4caf56 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -51,6 +51,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xorg-server.h"
 #include <pciaccess.h>
 
+#include "compat-api.h"
 #ifdef HAVE_DRI1
 #include "xf86drm.h"
 #include "sarea.h"
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 790f5ff..6c6650b 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -59,6 +59,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <xf86drm.h>
 #include <xf86drmMode.h>
 
+#include "../compat-api.h"
 #define _XF86DRI_SERVER_
 #include <dri2.h>
 #include <i915_drm.h>
diff --git a/uxa/uxa-priv.h b/uxa/uxa-priv.h
index b24ec4f..b74a625 100644
--- a/uxa/uxa-priv.h
+++ b/uxa/uxa-priv.h
@@ -59,6 +59,8 @@
 #endif
 #include "damage.h"
 
+#include "../src/compat-api.h"
+
 /* Provide substitutes for gcc's __FUNCTION__ on other compilers */
 #if !defined(__GNUC__) && !defined(__FUNCTION__)
 # if defined(__STDC__) && (__STDC_VERSION__>=199901L)	/* C99 */
commit c9ce6ae8a6fc7b1a91aef1e59640c39024d0426d
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed May 23 08:33:20 2012 +0100

    legacy/i810: rename pScreen->screen
    
    For consistency before moving to new APIs.
    
    This just changes the files where the API changes will touch.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 76f36f2..60053d8 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -78,12 +78,12 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "../legacy.h"
 
 static Bool I810PreInit(ScrnInfoPtr scrn, int flags);
-static Bool I810ScreenInit(int Index, ScreenPtr pScreen, int argc,
+static Bool I810ScreenInit(int Index, ScreenPtr screen, int argc,
 			   char **argv);
 static Bool I810EnterVT(int scrnIndex, int flags);
 static void I810LeaveVT(int scrnIndex, int flags);
-static Bool I810CloseScreen(int scrnIndex, ScreenPtr pScreen);
-static Bool I810SaveScreen(ScreenPtr pScreen, Bool unblank);
+static Bool I810CloseScreen(int scrnIndex, ScreenPtr screen);
+static Bool I810SaveScreen(ScreenPtr screen, Bool unblank);
 static void I810FreeScreen(int scrnIndex, int flags);
 static void I810DisplayPowerManagementSet(ScrnInfoPtr scrn,
 					  int PowerManagermentMode,
@@ -1570,14 +1570,14 @@ I810AllocateFront(ScrnInfoPtr scrn)
 }
 
 static Bool
-I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
+I810ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 {
    ScrnInfoPtr scrn;
    vgaHWPtr hwp;
    I810Ptr pI810;
    VisualPtr visual;
 
-   scrn = xf86Screens[pScreen->myNum];
+   scrn = xf86Screens[screen->myNum];
    pI810 = I810PTR(scrn);
    hwp = VGAHWPTR(scrn);
 
@@ -1623,7 +1623,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
    pI810->directRenderingEnabled = !pI810->directRenderingDisabled;
    
    if (pI810->directRenderingEnabled==TRUE)
-     pI810->directRenderingEnabled = I810DRIScreenInit(pScreen);
+     pI810->directRenderingEnabled = I810DRIScreenInit(screen);
 
 #else
    pI810->directRenderingEnabled = FALSE;
@@ -1648,10 +1648,10 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
    if (!I810ModeInit(scrn, scrn->currentMode))
       return FALSE;
 
-   I810SaveScreen(pScreen, FALSE);
+   I810SaveScreen(screen, FALSE);
    I810AdjustFrame(scrnIndex, scrn->frameX0, scrn->frameY0, 0);
 
-   if (!fbScreenInit(pScreen, pI810->FbBase + scrn->fbOffset,
+   if (!fbScreenInit(screen, pI810->FbBase + scrn->fbOffset,
 		     scrn->virtualX, scrn->virtualY,
 		     scrn->xDpi, scrn->yDpi,
 		     scrn->displayWidth, scrn->bitsPerPixel))
@@ -1659,8 +1659,8 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
    if (scrn->bitsPerPixel > 8) {
       /* Fixup RGB ordering */
-      visual = pScreen->visuals + pScreen->numVisuals;
-      while (--visual >= pScreen->visuals) {
+      visual = screen->visuals + screen->numVisuals;
+      while (--visual >= screen->visuals) {
 	 if ((visual->class | DynamicClass) == DirectColor) {
 	    visual->offsetRed = scrn->offset.red;
 	    visual->offsetGreen = scrn->offset.green;
@@ -1672,14 +1672,14 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
       }
    }
 
-   fbPictureInit(pScreen, NULL, 0);
+   fbPictureInit(screen, NULL, 0);
 
-   xf86SetBlackWhitePixels(pScreen);
+   xf86SetBlackWhitePixels(screen);
 
 #ifdef HAVE_DRI1
    if (pI810->LpRing->mem.Start == 0 && pI810->directRenderingEnabled) {
       pI810->directRenderingEnabled = FALSE;
-      I810DRICloseScreen(pScreen);
+      I810DRICloseScreen(screen);
    }
 
    if (!pI810->directRenderingEnabled) {
@@ -1692,10 +1692,10 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 #endif
 
 #ifdef XFreeXDGA
-   I810DGAInit(pScreen);
+   I810DGAInit(screen);
 #endif
 
-   if (!xf86InitFBManager(pScreen, &(pI810->FbMemBox))) {
+   if (!xf86InitFBManager(screen, &(pI810->FbMemBox))) {
       xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "Failed to init memory manager\n");
       return FALSE;
@@ -1705,7 +1705,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
       if (pI810->LpRing->mem.Size != 0) {
 	 I810SetRingRegs(scrn);
 
-	 if (!I810AccelInit(pScreen)) {
+	 if (!I810AccelInit(screen)) {
 	    xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		       "Hardware acceleration initialization failed\n");
 	 }  else /* PK added 16.02.2004 */
@@ -1713,57 +1713,57 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
       }
    }
 
-   miInitializeBackingStore(pScreen);
-   xf86SetBackingStore(pScreen);
-   xf86SetSilkenMouse(pScreen);
+   miInitializeBackingStore(screen);
+   xf86SetBackingStore(screen);
+   xf86SetSilkenMouse(screen);
 
-   miDCInitialize(pScreen, xf86GetPointerScreenFuncs());
+   miDCInitialize(screen, xf86GetPointerScreenFuncs());
 
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_SW_CURSOR, FALSE)) {
-      if (!I810CursorInit(pScreen)) {
+      if (!I810CursorInit(screen)) {
 	 xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		    "Hardware cursor initialization failed\n");
       }
    }
 
-   if (!miCreateDefColormap(pScreen))
+   if (!miCreateDefColormap(screen))
       return FALSE;
 
    /* Use driver specific palette load routines for Direct Color support. -jens */
    if (scrn->bitsPerPixel == 16) {
       if (scrn->depth == 15) {
-	 if (!xf86HandleColormaps(pScreen, 256, 8, I810LoadPalette15, NULL,
+	 if (!xf86HandleColormaps(screen, 256, 8, I810LoadPalette15, NULL,
 				  CMAP_PALETTED_TRUECOLOR |
 				  CMAP_RELOAD_ON_MODE_SWITCH))
 	    return FALSE;
       } else {
-	 if (!xf86HandleColormaps(pScreen, 256, 8, I810LoadPalette16, NULL,
+	 if (!xf86HandleColormaps(screen, 256, 8, I810LoadPalette16, NULL,
 				  CMAP_PALETTED_TRUECOLOR |
 				  CMAP_RELOAD_ON_MODE_SWITCH))
 	    return FALSE;
       }
    } else {
-      if (!xf86HandleColormaps(pScreen, 256, 8, I810LoadPalette24, NULL,
+      if (!xf86HandleColormaps(screen, 256, 8, I810LoadPalette24, NULL,
 			       CMAP_PALETTED_TRUECOLOR |
 			       CMAP_RELOAD_ON_MODE_SWITCH))
 	 return FALSE;
    }
 
-   xf86DPMSInit(pScreen, I810DisplayPowerManagementSet, 0);
+   xf86DPMSInit(screen, I810DisplayPowerManagementSet, 0);
 
-   I810InitVideo(pScreen);
+   I810InitVideo(screen);
 
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       /* Now that mi, fb, drm and others have done their thing,
        * complete the DRI setup.
        */
-      pI810->directRenderingEnabled = I810DRIFinishScreenInit(pScreen);
+      pI810->directRenderingEnabled = I810DRIFinishScreenInit(screen);
    }
 #ifdef XvMCExtension
    if ((pI810->directRenderingEnabled) && (pI810->numSurfaces)) {
       /* Initialize the hardware motion compensation code */
-      I810InitMC(pScreen);
+      I810InitMC(screen);
    }
 #endif
 #endif
@@ -1774,9 +1774,9 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
       xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Direct rendering disabled\n");
    }
 
-   pScreen->SaveScreen = I810SaveScreen;
-   pI810->CloseScreen = pScreen->CloseScreen;
-   pScreen->CloseScreen = I810CloseScreen;
+   screen->SaveScreen = I810SaveScreen;
+   pI810->CloseScreen = screen->CloseScreen;
+   screen->CloseScreen = I810CloseScreen;
 
    if (serverGeneration == 1)
       xf86ShowUnusedOptions(scrn->scrnIndex, scrn->options);
@@ -1957,7 +1957,7 @@ I810LeaveVT(int scrnIndex, int flags)
 }
 
 static Bool
-I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
+I810CloseScreen(int scrnIndex, ScreenPtr screen)
 {
    ScrnInfoPtr scrn = xf86Screens[scrnIndex];
    vgaHWPtr hwp = VGAHWPTR(scrn);
@@ -1975,7 +1975,7 @@ I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
    }
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-      I810DRICloseScreen(pScreen);
+      I810DRICloseScreen(screen);
       pI810->directRenderingEnabled = FALSE;
    }
 #endif
@@ -2021,8 +2021,8 @@ I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
    pI810->LpRing = NULL;
 
    scrn->vtSema = FALSE;
-   pScreen->CloseScreen = pI810->CloseScreen;
-   return (*pScreen->CloseScreen) (scrnIndex, pScreen);
+   screen->CloseScreen = pI810->CloseScreen;
+   return (*screen->CloseScreen) (scrnIndex, screen);
 }
 
 static void
@@ -2047,9 +2047,9 @@ I810ValidMode(int scrnIndex, DisplayModePtr mode, Bool verbose, int flags)
 }
 
 static Bool
-I810SaveScreen(ScreenPtr pScreen, Bool unblack)
+I810SaveScreen(ScreenPtr screen, Bool unblack)
 {
-   return vgaHWSaveScreen(pScreen, unblack);
+   return vgaHWSaveScreen(screen, unblack);
 }
 
 static void
diff --git a/src/legacy/i810/i810_video.c b/src/legacy/i810/i810_video.c
index a0e6acd..5a568a6 100644
--- a/src/legacy/i810/i810_video.c
+++ b/src/legacy/i810/i810_video.c
@@ -152,17 +152,17 @@ static Atom xvBrightness, xvContrast, xvColorKey;
 #define RGB15ToColorKey(c) \
         (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
 
-void I810InitVideo(ScreenPtr pScreen)
+void I810InitVideo(ScreenPtr screen)
 {
-    ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+    ScrnInfoPtr pScrn = xf86Screens[screen->myNum];
     XF86VideoAdaptorPtr *adaptors, *newAdaptors = NULL;
     XF86VideoAdaptorPtr newAdaptor = NULL;
     int num_adaptors;
 	
     if (pScrn->bitsPerPixel != 8) 
     {
-	newAdaptor = I810SetupImageVideo(pScreen);
-	I810InitOffscreenImages(pScreen);
+	newAdaptor = I810SetupImageVideo(screen);
+	I810InitOffscreenImages(screen);
     }
 
     num_adaptors = xf86XVListGenericAdaptors(pScrn, &adaptors);
@@ -185,7 +185,7 @@ void I810InitVideo(ScreenPtr pScreen)
     }
 
     if(num_adaptors)
-        xf86XVScreenInit(pScreen, adaptors, num_adaptors);
+        xf86XVScreenInit(screen, adaptors, num_adaptors);
 
     if(newAdaptors)
 	free(newAdaptors);
@@ -375,9 +375,9 @@ static void I810ResetVideo(ScrnInfoPtr pScrn)
 
 
 static XF86VideoAdaptorPtr 
-I810SetupImageVideo(ScreenPtr pScreen)
+I810SetupImageVideo(ScreenPtr screen)
 {
-    ScrnInfoPtr pScrn = xf86Screens[pScreen->myNum];
+    ScrnInfoPtr pScrn = xf86Screens[screen->myNum];
     I810Ptr pI810 = I810PTR(pScrn);
     XF86VideoAdaptorPtr adapt;
     I810PortPrivPtr pPriv;
@@ -423,12 +423,12 @@ I810SetupImageVideo(ScreenPtr pScreen)
     pPriv->currentBuf = 0;
 
     /* gotta uninit this someplace */
-    REGION_NULL(pScreen, &pPriv->clip);
+    REGION_NULL(screen, &pPriv->clip);
 
     pI810->adaptor = adapt;
 
-    pI810->BlockHandler = pScreen->BlockHandler;
-    pScreen->BlockHandler = I810BlockHandler;
+    pI810->BlockHandler = screen->BlockHandler;
+    screen->BlockHandler = I810BlockHandler;
 
     xvBrightness = MAKE_ATOM("XV_BRIGHTNESS");
     xvContrast   = MAKE_ATOM("XV_CONTRAST");
@@ -522,7 +522,7 @@ I810StopVideo(ScrnInfoPtr pScrn, pointer data, Bool shutdown)
 
   I810OverlayRegPtr overlay = (I810OverlayRegPtr) (pI810->FbBase + pI810->OverlayStart); 
 
-  REGION_EMPTY(pScrn->pScreen, &pPriv->clip);   
+  REGION_EMPTY(pScrn->screen, &pPriv->clip);   
 
   if(shutdown) {
      if(pPriv->videoStatus & CLIENT_VIDEO_ON) {
@@ -579,7 +579,7 @@ I810SetPortAttribute(
                  break;
 	}
 	OVERLAY_UPDATE(pI810->OverlayPhysical);
-	REGION_EMPTY(pScrn->pScreen, &pPriv->clip);   
+	REGION_EMPTY(pScrn->screen, &pPriv->clip);   
   } else return BadMatch;
 
   return Success;
@@ -928,7 +928,7 @@ I810AllocateMemory(
   FBLinearPtr linear,
   int size
 ){
-   ScreenPtr pScreen;
+   ScreenPtr screen;
    FBLinearPtr new_linear;
 
    if(linear) {
@@ -941,21 +941,21 @@ I810AllocateMemory(
 	xf86FreeOffscreenLinear(linear);
    }
 
-   pScreen = screenInfo.screens[pScrn->scrnIndex];
+   screen = screenInfo.screens[pScrn->scrnIndex];
 
-   new_linear = xf86AllocateOffscreenLinear(pScreen, size, 4,
+   new_linear = xf86AllocateOffscreenLinear(screen, size, 4,
                                             NULL, NULL, NULL);
 
    if(!new_linear) {
         int max_size;
 
-        xf86QueryLargestOffscreenLinear(pScreen, &max_size, 4, 
+        xf86QueryLargestOffscreenLinear(screen, &max_size, 4, 
 				       PRIORITY_EXTREME);
 
         if(max_size < size) return NULL;
 
-        xf86PurgeUnlockedOffscreenAreas(pScreen);
-        new_linear = xf86AllocateOffscreenLinear(pScreen, size, 4, 
+        xf86PurgeUnlockedOffscreenAreas(screen);
+        new_linear = xf86AllocateOffscreenLinear(screen, size, 4, 
                                                  NULL, NULL, NULL);
    } 
 
@@ -995,7 +995,7 @@ I810PutImage(
     dstBox.y2 = drw_y + drw_h;
 
     I810ClipVideo(&dstBox, &x1, &x2, &y1, &y2, 
-		  REGION_EXTENTS(pScrn->pScreen, clipBoxes), width, height);
+		  REGION_EXTENTS(pScrn->screen, clipBoxes), width, height);
     
     if((x1 >= x2) || (y1 >= y2))
        return Success;
@@ -1081,8 +1081,8 @@ I810PutImage(
     }
 
     /* update cliplist */
-    if(!REGION_EQUAL(pScrn->pScreen, &pPriv->clip, clipBoxes)) {
-	REGION_COPY(pScrn->pScreen, &pPriv->clip, clipBoxes);
+    if(!REGION_EQUAL(pScrn->screen, &pPriv->clip, clipBoxes)) {
+	REGION_COPY(pScrn->screen, &pPriv->clip, clipBoxes);
 	/* draw these */
 	xf86XVFillKeyHelperDrawable(pDraw, pPriv->colorKey, clipBoxes);
     }
@@ -1151,17 +1151,17 @@ I810BlockHandler (
     pointer     pTimeout,
     pointer     pReadmask
 ){
-    ScreenPtr   pScreen = screenInfo.screens[i];
+    ScreenPtr   screen = screenInfo.screens[i];
     ScrnInfoPtr pScrn = xf86Screens[i];
     I810Ptr      pI810 = I810PTR(pScrn);
     I810PortPrivPtr pPriv = GET_PORT_PRIVATE(pScrn);
     I810OverlayRegPtr overlay = (I810OverlayRegPtr) (pI810->FbBase + pI810->OverlayStart); 
 
-    pScreen->BlockHandler = pI810->BlockHandler;
+    screen->BlockHandler = pI810->BlockHandler;
     
-    (*pScreen->BlockHandler) (i, blockData, pTimeout, pReadmask);
+    (*screen->BlockHandler) (i, blockData, pTimeout, pReadmask);
 
-    pScreen->BlockHandler = I810BlockHandler;
+    screen->BlockHandler = I810BlockHandler;
 
     if(pPriv->videoStatus & TIMER_MASK) {
 	UpdateCurrentTime();
@@ -1382,7 +1382,7 @@ I810DisplaySurface(
     pPriv->isOn = TRUE;
     /* we've prempted the XvImage stream so set its free timer */
     if(pI810Priv->videoStatus & CLIENT_VIDEO_ON) {
-      REGION_EMPTY(pScrn->pScreen, & pI810Priv->clip);   
+      REGION_EMPTY(pScrn->screen, & pI810Priv->clip);   
       UpdateCurrentTime();
       pI810Priv->videoStatus = FREE_TIMER;
       pI810Priv->freeTime = currentTime.milliseconds + FREE_DELAY;
@@ -1394,7 +1394,7 @@ I810DisplaySurface(
 
 
 static void 
-I810InitOffscreenImages(ScreenPtr pScreen)
+I810InitOffscreenImages(ScreenPtr screen)
 {
     XF86OffscreenImagePtr offscreenImages;
 
@@ -1417,6 +1417,6 @@ I810InitOffscreenImages(ScreenPtr pScreen)
     offscreenImages[0].num_attributes = 1;
     offscreenImages[0].attributes = Attributes;
 
-    xf86XVRegisterOffscreenImages(pScreen, offscreenImages, 1);
+    xf86XVRegisterOffscreenImages(screen, offscreenImages, 1);
 }
 
commit 70ee0f84f5b9c6c13a0338f8b2757e587c849c14
Author: Dave Airlie <airlied at redhat.com>
Date:   Wed May 23 08:31:52 2012 +0100

    legacy/i810: rename pScrn->scrn
    
    This is need to make the compat api stuff easier to implement.
    
    Signed-off-by: Dave Airlie <airlied at redhat.com>

diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index 02da574..76f36f2 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -77,7 +77,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "../legacy.h"
 
-static Bool I810PreInit(ScrnInfoPtr pScrn, int flags);
+static Bool I810PreInit(ScrnInfoPtr scrn, int flags);
 static Bool I810ScreenInit(int Index, ScreenPtr pScreen, int argc,
 			   char **argv);
 static Bool I810EnterVT(int scrnIndex, int flags);
@@ -85,7 +85,7 @@ static void I810LeaveVT(int scrnIndex, int flags);
 static Bool I810CloseScreen(int scrnIndex, ScreenPtr pScreen);
 static Bool I810SaveScreen(ScreenPtr pScreen, Bool unblank);
 static void I810FreeScreen(int scrnIndex, int flags);
-static void I810DisplayPowerManagementSet(ScrnInfoPtr pScrn,
+static void I810DisplayPowerManagementSet(ScrnInfoPtr scrn,
 					  int PowerManagermentMode,
 					  int flags);
 static ModeStatus I810ValidMode(int scrnIndex, DisplayModePtr mode,
@@ -152,24 +152,24 @@ int I830EntityIndex = -1;
  *
  */
 static Bool
-I810GetRec(ScrnInfoPtr pScrn)
+I810GetRec(ScrnInfoPtr scrn)
 {
-   if (pScrn->driverPrivate)
+   if (scrn->driverPrivate)
       return TRUE;
 
-   pScrn->driverPrivate = xnfcalloc(sizeof(I810Rec), 1);
+   scrn->driverPrivate = xnfcalloc(sizeof(I810Rec), 1);
    return TRUE;
 }
 
 static void
-I810FreeRec(ScrnInfoPtr pScrn)
+I810FreeRec(ScrnInfoPtr scrn)
 {
-   if (!pScrn)
+   if (!scrn)
       return;
-   if (!pScrn->driverPrivate)
+   if (!scrn->driverPrivate)
       return;
-   free(pScrn->driverPrivate);
-   pScrn->driverPrivate = NULL;
+   free(scrn->driverPrivate);
+   scrn->driverPrivate = NULL;
 }
 
 struct pci_device *
@@ -188,11 +188,11 @@ intel_host_bridge (void)
 }
 
 static void
-I810ProbeDDC(ScrnInfoPtr pScrn, int index)
+I810ProbeDDC(ScrnInfoPtr scrn, int index)
 {
    vbeInfoPtr pVbe;
 
-   if (xf86LoadSubModule(pScrn, "vbe")) {
+   if (xf86LoadSubModule(scrn, "vbe")) {
       pVbe = VBEInit(NULL, index);
       ConfiguredMonitor = vbeDoEDID(pVbe, NULL);
       vbeFree(pVbe);
@@ -200,24 +200,24 @@ I810ProbeDDC(ScrnInfoPtr pScrn, int index)
 }
 
 static xf86MonPtr
-I810DoDDC(ScrnInfoPtr pScrn, int index)
+I810DoDDC(ScrnInfoPtr scrn, int index)
 {
    vbeInfoPtr pVbe;
    xf86MonPtr MonInfo = NULL;
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    /* Honour Option "noDDC" */
    if (xf86ReturnOptValBool(pI810->Options, OPTION_NO_DDC, FALSE)) {
       return MonInfo;
    }
 
-   if (xf86LoadSubModule(pScrn, "vbe") && (pVbe = VBEInit(NULL, index))) {
+   if (xf86LoadSubModule(scrn, "vbe") && (pVbe = VBEInit(NULL, index))) {
       MonInfo = vbeDoEDID(pVbe, NULL);
       xf86PrintEDID(MonInfo);
-      xf86SetDDCproperties(pScrn, MonInfo);
+      xf86SetDDCproperties(scrn, MonInfo);
       vbeFree(pVbe);
    } else {
-      xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+      xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		 "this driver cannot do DDC without VBE\n");
    }
 
@@ -232,7 +232,7 @@ I810DoDDC(ScrnInfoPtr pScrn, int index)
  *
  */
 static Bool
-I810PreInit(ScrnInfoPtr pScrn, int flags)
+I810PreInit(ScrnInfoPtr scrn, int flags)
 {
    I810Ptr pI810;
    ClockRangePtr clockRanges;
@@ -244,89 +244,89 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
    Bool enable;
    struct intel_chipset chipset;
 
-   if (pScrn->numEntities != 1)
+   if (scrn->numEntities != 1)
       return FALSE;
 
    /* Allocate driverPrivate */
-   if (!I810GetRec(pScrn))
+   if (!I810GetRec(scrn))
       return FALSE;
 
-   pI810 = I810PTR(pScrn);
+   pI810 = I810PTR(scrn);
 
-   pI810->pEnt = xf86GetEntityInfo(pScrn->entityList[0]);
+   pI810->pEnt = xf86GetEntityInfo(scrn->entityList[0]);
    if (pI810->pEnt->location.type != BUS_PCI)
       return FALSE;
 
    if (flags & PROBE_DETECT) {
-      I810ProbeDDC(pScrn, pI810->pEnt->index);
+      I810ProbeDDC(scrn, pI810->pEnt->index);
       return TRUE;
    }
 
    /* The vgahw module should be loaded here when needed */
-   if (!xf86LoadSubModule(pScrn, "vgahw"))
+   if (!xf86LoadSubModule(scrn, "vgahw"))
       return FALSE;
 
    /* Allocate a vgaHWRec */
-   if (!vgaHWGetHWRec(pScrn))
+   if (!vgaHWGetHWRec(scrn))
       return FALSE;
 
    pI810->PciInfo = xf86GetPciInfoForEntity(pI810->pEnt->index);
 
-   /* Set pScrn->monitor */
-   pScrn->monitor = pScrn->confScreen->monitor;
+   /* Set scrn->monitor */
+   scrn->monitor = scrn->confScreen->monitor;
 
    flags24 = Support24bppFb | PreferConvert32to24 | SupportConvert32to24;
-   if (!xf86SetDepthBpp(pScrn, 16, 0, 16, flags24)) {
+   if (!xf86SetDepthBpp(scrn, 16, 0, 16, flags24)) {
       return FALSE;
    } else {
-      switch (pScrn->depth) {
+      switch (scrn->depth) {
       case 8:
       case 15:
       case 16:
       case 24:
 	 break;
       default:
-	 xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+	 xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		    "Given depth (%d) is not supported by i810 driver\n",
-		    pScrn->depth);
+		    scrn->depth);
 	 return FALSE;
       }
    }
-   xf86PrintDepthBpp(pScrn);
+   xf86PrintDepthBpp(scrn);
 
-   switch (pScrn->bitsPerPixel) {
+   switch (scrn->bitsPerPixel) {
    case 8:
    case 16:
    case 24:
       break;
    default:
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "Given bpp (%d) is not supported by i810 driver\n",
-		 pScrn->bitsPerPixel);
+		 scrn->bitsPerPixel);
       return FALSE;
    }
 
-   if (!xf86SetWeight(pScrn, defaultWeight, defaultWeight))
+   if (!xf86SetWeight(scrn, defaultWeight, defaultWeight))
       return FALSE;
 
-   if (!xf86SetDefaultVisual(pScrn, -1))
+   if (!xf86SetDefaultVisual(scrn, -1))
       return FALSE;
 
    /* We use a programmable clock */
-   pScrn->progClock = TRUE;
+   scrn->progClock = TRUE;
 
-   pI810->cpp = pScrn->bitsPerPixel / 8;
+   pI810->cpp = scrn->bitsPerPixel / 8;
 
    /* Process the options */
-   xf86CollectOptions(pScrn, NULL);
+   xf86CollectOptions(scrn, NULL);
    if (!(pI810->Options = malloc(sizeof(I810Options))))
       return FALSE;
    memcpy(pI810->Options, I810Options, sizeof(I810Options));
-   xf86ProcessOptions(pScrn->scrnIndex, pScrn->options, pI810->Options);
+   xf86ProcessOptions(scrn->scrnIndex, scrn->options, pI810->Options);
 
-   pScrn->rgbBits = 8;
+   scrn->rgbBits = 8;
    if (xf86ReturnOptValBool(pI810->Options, OPTION_DAC_6BIT, FALSE))
-      pScrn->rgbBits = 6;
+      scrn->rgbBits = 6;
 
    if (xf86ReturnOptValBool(pI810->Options, OPTION_SHOW_CACHE, FALSE))
      pI810->showCache = TRUE;
@@ -335,7 +335,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 
    /* 6-BIT dac isn't reasonable for modes with > 8bpp */
    if (xf86ReturnOptValBool(pI810->Options, OPTION_DAC_6BIT, FALSE) &&
-       pScrn->bitsPerPixel > 8) {
+       scrn->bitsPerPixel > 8) {
       OptionInfoPtr ptr;
 
       ptr = xf86TokenToOptinfo(pI810->Options, OPTION_DAC_6BIT);
@@ -346,8 +346,8 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
       pI810->noAccel = TRUE;
 
    if (!pI810->noAccel) {
-      if (!xf86LoadSubModule(pScrn, "xaa")) {
-	 I810FreeRec(pScrn);
+      if (!xf86LoadSubModule(scrn, "xaa")) {
+	 I810FreeRec(scrn);
 	 return FALSE;
       }
    }
@@ -358,11 +358,11 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 
    if (!pI810->directRenderingDisabled) {
      if (pI810->noAccel) {
-       xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "DRI is disabled because it "
+       xf86DrvMsg(scrn->scrnIndex, X_WARNING, "DRI is disabled because it "
 		  "needs 2D acceleration.\n");
        pI810->directRenderingDisabled=TRUE;
-     } else if (pScrn->depth!=16) {
-       xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "DRI is disabled because it "
+     } else if (scrn->depth!=16) {
+       xf86DrvMsg(scrn->scrnIndex, X_WARNING, "DRI is disabled because it "
 		  "runs only at 16-bit depth.\n");
        pI810->directRenderingDisabled=TRUE;
      }
@@ -373,49 +373,49 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
    /* after xf86ProcessOptions,
     * because it is controlled by options [no]vbe and [no]ddc
     */
-   I810DoDDC(pScrn, pI810->pEnt->index);
+   I810DoDDC(scrn, pI810->pEnt->index);
 
-   intel_detect_chipset(pScrn, pI810->PciInfo, &chipset);
+   intel_detect_chipset(scrn, pI810->PciInfo, &chipset);
 
    /*
     * Set the Chipset and ChipRev, allowing config file entries to
     * override.
     */
    if (pI810->pEnt->device->chipset && *pI810->pEnt->device->chipset) {
-      pScrn->chipset = pI810->pEnt->device->chipset;
+      scrn->chipset = pI810->pEnt->device->chipset;
       from = X_CONFIG;
    } else if (pI810->pEnt->device->chipID >= 0) {
-      pScrn->chipset = (char *)xf86TokenToString(intel_chipsets,
+      scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
 						 pI810->pEnt->device->chipID);
       from = X_CONFIG;
-      xf86DrvMsg(pScrn->scrnIndex, X_CONFIG, "ChipID override: 0x%04X\n",
+      xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipID override: 0x%04X\n",
 		 pI810->pEnt->device->chipID);
    } else {
       from = X_PROBED;
-      pScrn->chipset = (char *)xf86TokenToString(intel_chipsets,
+      scrn->chipset = (char *)xf86TokenToString(intel_chipsets,
 						 DEVICE_ID(pI810->PciInfo));
    }
    if (pI810->pEnt->device->chipRev >= 0) {
-      xf86DrvMsg(pScrn->scrnIndex, X_CONFIG, "ChipRev override: %d\n",
+      xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "ChipRev override: %d\n",
 		 pI810->pEnt->device->chipRev);
    }
 
-   xf86DrvMsg(pScrn->scrnIndex, from, "Chipset: \"%s\"\n",
-	      (pScrn->chipset != NULL) ? pScrn->chipset : "Unknown i810");
+   xf86DrvMsg(scrn->scrnIndex, from, "Chipset: \"%s\"\n",
+	      (scrn->chipset != NULL) ? scrn->chipset : "Unknown i810");
 
    pI810->LinearAddr = pI810->PciInfo->regions[0].base_addr;
-   xf86DrvMsg(pScrn->scrnIndex, from, "Linear framebuffer at 0x%lX\n",
+   xf86DrvMsg(scrn->scrnIndex, from, "Linear framebuffer at 0x%lX\n",
 	      (unsigned long)pI810->LinearAddr);
 
    pI810->MMIOAddr = pI810->PciInfo->regions[1].base_addr;
-   xf86DrvMsg(pScrn->scrnIndex, from, "IO registers at addr 0x%lX\n",
+   xf86DrvMsg(scrn->scrnIndex, from, "IO registers at addr 0x%lX\n",
 	      (unsigned long)pI810->MMIOAddr);
 
    /* AGP GART support is required.  Don't proceed any further if it isn't
     * present.
     */
    if (!xf86AgpGARTSupported()) {
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "AGP GART support is not available.  Make sure your kernel has\n"
 		 "\tagpgart support or that the agpgart kernel module is loaded.\n");
       return FALSE;
@@ -442,40 +442,40 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
     *
     *  Changed to 8 Meg so we can have acceleration by default (Mark).
     */
-   mem = I810CheckAvailableMemory(pScrn);
+   mem = I810CheckAvailableMemory(scrn);
    if (pI810->directRenderingDisabled || mem < 131072)  /* < 128 MB */
-       pScrn->videoRam = 8192;
+       scrn->videoRam = 8192;
    else if (mem < 196608)
-       pScrn->videoRam = 16384;  /* < 192 MB */
+       scrn->videoRam = 16384;  /* < 192 MB */
    else
-       pScrn->videoRam = 24576;
+       scrn->videoRam = 24576;
    
    from = X_DEFAULT;
    
    if (pI810->pEnt->device->videoRam) {
-      pScrn->videoRam = pI810->pEnt->device->videoRam;
+      scrn->videoRam = pI810->pEnt->device->videoRam;
       from = X_CONFIG;
    }
 
-   if (mem > 0 && mem < pScrn->videoRam) {
-      xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "%dk of memory was requested,"
+   if (mem > 0 && mem < scrn->videoRam) {
+      xf86DrvMsg(scrn->scrnIndex, X_WARNING, "%dk of memory was requested,"
 		 " but the\n\t maximum AGP memory available is %dk.\n",
-		 pScrn->videoRam, mem);
+		 scrn->videoRam, mem);
       from = X_PROBED;
       if (mem > (6 * 1024)) {
-	 xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+	 xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		    "Reducing video memory to 4MB\n");
-	 pScrn->videoRam = 4096;
+	 scrn->videoRam = 4096;
       } else {
-	 xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "Less than 6MB of AGP memory"
+	 xf86DrvMsg(scrn->scrnIndex, X_ERROR, "Less than 6MB of AGP memory"
 		    " is available. Cannot proceed.\n");
-	 I810FreeRec(pScrn);
+	 I810FreeRec(scrn);
 	 return FALSE;
       }
    }
 
-   xf86DrvMsg(pScrn->scrnIndex, from,
-	      "Will alloc AGP framebuffer: %d kByte\n", pScrn->videoRam);
+   xf86DrvMsg(scrn->scrnIndex, from,
+	      "Will alloc AGP framebuffer: %d kByte\n", scrn->videoRam);
 
    /* Calculate Fixed Offsets depending on graphics aperture size */
    {
@@ -502,14 +502,14 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
    {
       Gamma zeros = { 0.0, 0.0, 0.0 };
 
-      if (!xf86SetGamma(pScrn, zeros)) {
+      if (!xf86SetGamma(scrn, zeros)) {
 	 return FALSE;
       }
    }
 
    pI810->MaxClock = 0;
    if (pI810->pEnt->device->dacSpeeds[0]) {
-      switch (pScrn->bitsPerPixel) {
+      switch (scrn->bitsPerPixel) {
       case 8:
 	 pI810->MaxClock = pI810->pEnt->device->dacSpeeds[DAC_BPP8];
 	 break;
@@ -526,7 +526,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
       if (!pI810->MaxClock)
 	 pI810->MaxClock = pI810->pEnt->device->dacSpeeds[0];
    } else {
-      switch (pScrn->bitsPerPixel) {
+      switch (scrn->bitsPerPixel) {
       case 8:
 	 pI810->MaxClock = 203000;
 	 break;
@@ -549,59 +549,59 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
    clockRanges->interlaceAllowed = TRUE;
    clockRanges->doubleScanAllowed = FALSE;
 
-   i = xf86ValidateModes(pScrn, pScrn->monitor->Modes,
-			 pScrn->display->modes, clockRanges,
+   i = xf86ValidateModes(scrn, scrn->monitor->Modes,
+			 scrn->display->modes, clockRanges,
 #ifndef HAVE_DRI1
-			 0, 320, 1600, 64 * pScrn->bitsPerPixel,
+			 0, 320, 1600, 64 * scrn->bitsPerPixel,
 #else
-			 i810_pitches, 0, 0, 64 * pScrn->bitsPerPixel,
+			 i810_pitches, 0, 0, 64 * scrn->bitsPerPixel,
 #endif
 			 200, 1200,
-			 pScrn->display->virtualX, pScrn->display->virtualY,
-			 pScrn->videoRam * 1024, LOOKUP_BEST_REFRESH);
+			 scrn->display->virtualX, scrn->display->virtualY,
+			 scrn->videoRam * 1024, LOOKUP_BEST_REFRESH);
 
    if (i == -1) {
-      I810FreeRec(pScrn);
+      I810FreeRec(scrn);
       return FALSE;
    }
 
-   xf86PruneDriverModes(pScrn);
+   xf86PruneDriverModes(scrn);
 
-   if (!i || !pScrn->modes) {
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR, "No valid modes found\n");
-      I810FreeRec(pScrn);
+   if (!i || !scrn->modes) {
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR, "No valid modes found\n");
+      I810FreeRec(scrn);
       return FALSE;
    }
 
-   xf86SetCrtcForModes(pScrn, INTERLACE_HALVE_V);
+   xf86SetCrtcForModes(scrn, INTERLACE_HALVE_V);
 
-   pScrn->currentMode = pScrn->modes;
+   scrn->currentMode = scrn->modes;
 
-   xf86PrintModes(pScrn);
+   xf86PrintModes(scrn);
 
-   xf86SetDpi(pScrn, 0, 0);
+   xf86SetDpi(scrn, 0, 0);
 
-   if (!xf86LoadSubModule(pScrn, "fb")) {
-      I810FreeRec(pScrn);
+   if (!xf86LoadSubModule(scrn, "fb")) {
+      I810FreeRec(scrn);
       return FALSE;
    }
 
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_SW_CURSOR, FALSE)) {
-      if (!xf86LoadSubModule(pScrn, "ramdac")) {
-	 I810FreeRec(pScrn);
+      if (!xf86LoadSubModule(scrn, "ramdac")) {
+	 I810FreeRec(scrn);
 	 return FALSE;
       }
    }
 
    if (xf86GetOptValInteger
        (pI810->Options, OPTION_COLOR_KEY, &(pI810->colorKey))) {
-      xf86DrvMsg(pScrn->scrnIndex, X_CONFIG,
+      xf86DrvMsg(scrn->scrnIndex, X_CONFIG,
 		 "video overlay key set to 0x%x\n", pI810->colorKey);
    } else {
-      pI810->colorKey = (1 << pScrn->offset.red) |
-	    (1 << pScrn->offset.green) |
-	    (((pScrn->mask.blue >> pScrn->offset.blue) -
-	      1) << pScrn->offset.blue);
+      pI810->colorKey = (1 << scrn->offset.red) |
+	    (1 << scrn->offset.green) |
+	    (((scrn->mask.blue >> scrn->offset.blue) -
+	      1) << scrn->offset.blue);
    }
 
    pI810->allowPageFlip=FALSE;
@@ -612,14 +612,14 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
      pI810->allowPageFlip = enable;
      if (pI810->allowPageFlip == TRUE)
      {
-       if (!xf86LoadSubModule(pScrn, "shadowfb")) {
+       if (!xf86LoadSubModule(scrn, "shadowfb")) {
 	 pI810->allowPageFlip = 0;
-	 xf86DrvMsg(pScrn->scrnIndex, X_ERROR, 
+	 xf86DrvMsg(scrn->scrnIndex, X_ERROR, 
 		    "Couldn't load shadowfb module:\n");
        }
      }
      
-     xf86DrvMsg(pScrn->scrnIndex, X_CONFIG, "page flipping %s\n",
+     xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "page flipping %s\n",
 		enable ? "enabled" : "disabled");
      
    }
@@ -627,20 +627,20 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 
    if (xf86GetOptValInteger(pI810->Options, OPTION_XVMC_SURFACES,
 			    &(pI810->numSurfaces))) {
-      xf86DrvMsg(pScrn->scrnIndex, X_CONFIG, "%d XvMC Surfaces Requested.\n",
+      xf86DrvMsg(scrn->scrnIndex, X_CONFIG, "%d XvMC Surfaces Requested.\n",
 		 pI810->numSurfaces);
       if (pI810->numSurfaces > 7) {
-	 xf86DrvMsg(pScrn->scrnIndex, X_PROBED,
+	 xf86DrvMsg(scrn->scrnIndex, X_PROBED,
 		    "Using 7 XvMC Surfaces (Maximum Allowed).\n");
 	 pI810->numSurfaces = 7;
       }
       if (pI810->numSurfaces < 6) {
-	 xf86DrvMsg(pScrn->scrnIndex, X_PROBED,
+	 xf86DrvMsg(scrn->scrnIndex, X_PROBED,
 		    "Using 6 XvMC Surfaces (Minimum Allowed).\n");
 	 pI810->numSurfaces = 6;
       }
    } else {
-      xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+      xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		 "XvMC is Disabled: use XvMCSurfaces config option to enable.\n");
       pI810->numSurfaces = 0;
    }
@@ -648,7 +648,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 #ifdef HAVE_DRI1
    /* Load the dri module if requested. */
    if (xf86ReturnOptValBool(pI810->Options, OPTION_DRI, FALSE)) {
-      xf86LoadSubModule(pScrn, "dri");
+      xf86LoadSubModule(scrn, "dri");
    }
 #endif
 
@@ -656,9 +656,9 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 }
 
 static Bool
-I810MapMMIO(ScrnInfoPtr pScrn)
+I810MapMMIO(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
    struct pci_device *const device = pI810->PciInfo;
    int err;
 
@@ -669,7 +669,7 @@ I810MapMMIO(ScrnInfoPtr pScrn)
 			       (void **) &pI810->MMIOBase);
    if (err) 
    {
-      xf86DrvMsg (pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg (scrn->scrnIndex, X_ERROR,
 		  "Unable to map mmio BAR. %s (%d)\n",
 		  strerror (err), err);
       return FALSE;
@@ -678,13 +678,13 @@ I810MapMMIO(ScrnInfoPtr pScrn)
 }
 
 static Bool
-I810MapMem(ScrnInfoPtr pScrn)
+I810MapMem(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
    struct pci_device *const device = pI810->PciInfo;
    int err;
 
-   if (!I810MapMMIO(pScrn))
+   if (!I810MapMMIO(scrn))
       return FALSE;
 
    err = pci_device_map_range (device,
@@ -694,7 +694,7 @@ I810MapMem(ScrnInfoPtr pScrn)
 			       (void **) &pI810->FbBase);
    if (err) 
    {
-      xf86DrvMsg (pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg (scrn->scrnIndex, X_ERROR,
 		  "Unable to map frame buffer BAR. %s (%d)\n",
 		  strerror (err), err);
       return FALSE;
@@ -706,31 +706,31 @@ I810MapMem(ScrnInfoPtr pScrn)
 }
 
 static void
-I810UnmapMMIO(ScrnInfoPtr pScrn)
+I810UnmapMMIO(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    pci_device_unmap_range (pI810->PciInfo, pI810->MMIOBase, I810_REG_SIZE);
    pI810->MMIOBase = NULL;
 }
 
 static Bool
-I810UnmapMem(ScrnInfoPtr pScrn)
+I810UnmapMem(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    pci_device_unmap_range (pI810->PciInfo, pI810->FbBase, pI810->FbMapSize);
    pI810->FbBase = NULL;
-   I810UnmapMMIO(pScrn);
+   I810UnmapMMIO(scrn);
    return TRUE;
 }
 
 /* Famous last words
  */
 void
-I810PrintErrorState(ScrnInfoPtr pScrn)
+I810PrintErrorState(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    ErrorF("pgetbl_ctl: 0x%lx pgetbl_err: 0x%lx\n",
 	  (unsigned long) INREG(PGETBL_CTL), (unsigned long) INREG(PGE_ERR));
@@ -764,24 +764,24 @@ I810PrintErrorState(ScrnInfoPtr pScrn)
  * mask out bits here - just read the registers.
  */
 static void
-DoSave(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
+DoSave(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
        Bool saveFonts)
 {
    I810Ptr pI810;
    vgaHWPtr hwp;
    int i;
 
-   pI810 = I810PTR(pScrn);
-   hwp = VGAHWPTR(pScrn);
+   pI810 = I810PTR(scrn);
+   hwp = VGAHWPTR(scrn);
 
    /*
     * This function will handle creating the data structure and filling
     * in the generic VGA portion.
     */
    if (saveFonts)
-      vgaHWSave(pScrn, vgaReg, VGA_SR_MODE | VGA_SR_FONTS | VGA_SR_CMAP);
+      vgaHWSave(scrn, vgaReg, VGA_SR_MODE | VGA_SR_FONTS | VGA_SR_CMAP);
    else
-      vgaHWSave(pScrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
+      vgaHWSave(scrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
 
    /*
     * The port I/O code necessary to read in the extended registers
@@ -819,21 +819,21 @@ DoSave(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
 
    if ((i810Reg->LprbTail & TAIL_ADDR) != (i810Reg->LprbHead & HEAD_ADDR) &&
        i810Reg->LprbLen & RING_VALID) {
-      I810PrintErrorState(pScrn);
+      I810PrintErrorState(scrn);
       FatalError("Active ring not flushed\n");
    }
 }
 
 static void
-I810Save(ScrnInfoPtr pScrn)
+I810Save(ScrnInfoPtr scrn)
 {
    vgaHWPtr hwp;
    I810Ptr pI810;
    uint32_t temp;
 
-   hwp = VGAHWPTR(pScrn);
-   pI810 = I810PTR(pScrn);
-   DoSave(pScrn, &hwp->SavedReg, &pI810->SavedReg, TRUE);
+   hwp = VGAHWPTR(scrn);
+   pI810 = I810PTR(scrn);
+   DoSave(scrn, &hwp->SavedReg, &pI810->SavedReg, TRUE);
 
    temp = INREG(MEMMODE);
    temp |= 4;
@@ -905,7 +905,7 @@ i810PrintMode(vgaRegPtr vgaReg, I810RegPtr mode)
 }
 
 static void
-DoRestore(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
+DoRestore(ScrnInfoPtr scrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
 	  Bool restoreFonts)
 {
    I810Ptr pI810;
@@ -914,15 +914,15 @@ DoRestore(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
    unsigned int itemp;
    int i;
 
-   pI810 = I810PTR(pScrn);
-   hwp = VGAHWPTR(pScrn);
+   pI810 = I810PTR(scrn);
+   hwp = VGAHWPTR(scrn);
 
    if (I810_DEBUG & DEBUG_VERBOSE_VGA) {
       ErrorF("Setting mode in I810Restore:\n");
       i810PrintMode(vgaReg, i810Reg);
    }
 
-   vgaHWProtect(pScrn, TRUE);
+   vgaHWProtect(scrn, TRUE);
 
    usleep(50000);
 
@@ -966,9 +966,9 @@ DoRestore(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
     *           restore clock-select bits.
     */
    if (restoreFonts)
-      vgaHWRestore(pScrn, vgaReg, VGA_SR_FONTS | VGA_SR_MODE | VGA_SR_CMAP);
+      vgaHWRestore(scrn, vgaReg, VGA_SR_FONTS | VGA_SR_MODE | VGA_SR_CMAP);
    else
-      vgaHWRestore(pScrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
+      vgaHWRestore(scrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
 
    hwp->writeCrtc(hwp, EXT_VERT_TOTAL, i810Reg->ExtVertTotal);
    hwp->writeCrtc(hwp, EXT_VERT_DISPLAY, i810Reg->ExtVertDispEnd);
@@ -1081,13 +1081,13 @@ DoRestore(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
    if (!(vgaReg->Attribute[0x10] & 0x1)) {
       usleep(50000);
       if (restoreFonts)
-	 vgaHWRestore(pScrn, vgaReg,
+	 vgaHWRestore(scrn, vgaReg,
 		      VGA_SR_FONTS | VGA_SR_MODE | VGA_SR_CMAP);
       else
-	 vgaHWRestore(pScrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
+	 vgaHWRestore(scrn, vgaReg, VGA_SR_MODE | VGA_SR_CMAP);
    }
 
-   vgaHWProtect(pScrn, FALSE);
+   vgaHWProtect(scrn, FALSE);
 
    temp = hwp->readCrtc(hwp, IO_CTNL);
    temp &= ~(EXTENDED_ATTR_CNTL | EXTENDED_CRTC_CNTL);
@@ -1096,10 +1096,10 @@ DoRestore(ScrnInfoPtr pScrn, vgaRegPtr vgaReg, I810RegPtr i810Reg,
 }
 
 static void
-I810SetRingRegs(ScrnInfoPtr pScrn)
+I810SetRingRegs(ScrnInfoPtr scrn)
 {
    unsigned int itemp;
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    OUTREG(LP_RING + RING_TAIL, 0);
    OUTREG(LP_RING + RING_HEAD, 0);
@@ -1116,15 +1116,15 @@ I810SetRingRegs(ScrnInfoPtr pScrn)
 }
 
 static void
-I810Restore(ScrnInfoPtr pScrn)
+I810Restore(ScrnInfoPtr scrn)
 {
    vgaHWPtr hwp;
    I810Ptr pI810;
 
-   hwp = VGAHWPTR(pScrn);
-   pI810 = I810PTR(pScrn);
+   hwp = VGAHWPTR(scrn);
+   pI810 = I810PTR(scrn);
 
-   DoRestore(pScrn, &hwp->SavedReg, &pI810->SavedReg, TRUE);
+   DoRestore(scrn, &hwp->SavedReg, &pI810->SavedReg, TRUE);
 }
 
 /*
@@ -1141,9 +1141,9 @@ I810Restore(ScrnInfoPtr pScrn)
     (double)m / ((double)n * (1 << p)) * 4 * REF_FREQ
 
 static void
-I810CalcVCLK(ScrnInfoPtr pScrn, double freq)
+I810CalcVCLK(ScrnInfoPtr scrn, double freq)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
    I810RegPtr i810Reg = &pI810->ModeReg;
    int m, n, p;
    double f_out;
@@ -1188,7 +1188,7 @@ I810CalcVCLK(ScrnInfoPtr pScrn, double freq)
    i810Reg->VideoClk2_N = (n_best - 2) & 0x3FF;
    i810Reg->VideoClk2_DivisorSel = (p_best << 4);
 
-   xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
+   xf86DrvMsgVerb(scrn->scrnIndex, X_INFO, 3,
 		  "Setting dot clock to %.1f MHz " "[ 0x%x 0x%x 0x%x ] "
 		  "[ %d %d %d ]\n", CALC_VCLK(m_best, n_best, p_best),
 		  i810Reg->VideoClk2_M, i810Reg->VideoClk2_N,
@@ -1196,36 +1196,36 @@ I810CalcVCLK(ScrnInfoPtr pScrn, double freq)
 }
 
 static Bool
-I810SetMode(ScrnInfoPtr pScrn, DisplayModePtr mode)
+I810SetMode(ScrnInfoPtr scrn, DisplayModePtr mode)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
    I810RegPtr i810Reg = &pI810->ModeReg;
-   vgaRegPtr pVga = &VGAHWPTR(pScrn)->ModeReg;
+   vgaRegPtr pVga = &VGAHWPTR(scrn)->ModeReg;
    double dclk = mode->Clock / 1000.0;
 
-   switch (pScrn->bitsPerPixel) {
+   switch (scrn->bitsPerPixel) {
    case 8:
-      pVga->CRTC[0x13] = pScrn->displayWidth >> 3;
-      i810Reg->ExtOffset = pScrn->displayWidth >> 11;
+      pVga->CRTC[0x13] = scrn->displayWidth >> 3;
+      i810Reg->ExtOffset = scrn->displayWidth >> 11;
       i810Reg->PixelPipeCfg1 = DISPLAY_8BPP_MODE;
       i810Reg->BitBLTControl = COLEXP_8BPP;
       break;
    case 16:
-      if (pScrn->weight.green == 5) {
+      if (scrn->weight.green == 5) {
 	 i810Reg->PixelPipeCfg1 = DISPLAY_15BPP_MODE;
       } else {
 	 i810Reg->PixelPipeCfg1 = DISPLAY_16BPP_MODE;
       }
-      pVga->CRTC[0x13] = pScrn->displayWidth >> 2;
-      i810Reg->ExtOffset = pScrn->displayWidth >> 10;
+      pVga->CRTC[0x13] = scrn->displayWidth >> 2;
+      i810Reg->ExtOffset = scrn->displayWidth >> 10;
       i810Reg->BitBLTControl = COLEXP_16BPP;
 
       /* Enable Palette Programming for Direct Color visuals. -jens */
       i810Reg->PixelPipeCfg2 = DISPLAY_GAMMA_ENABLE;
       break;
    case 24:
-      pVga->CRTC[0x13] = (pScrn->displayWidth * 3) >> 3;
-      i810Reg->ExtOffset = (pScrn->displayWidth * 3) >> 11;
+      pVga->CRTC[0x13] = (scrn->displayWidth * 3) >> 3;
+      i810Reg->ExtOffset = (scrn->displayWidth * 3) >> 11;
 
       i810Reg->PixelPipeCfg1 = DISPLAY_24BPP_MODE;
       i810Reg->BitBLTControl = COLEXP_24BPP;
@@ -1310,13 +1310,13 @@ I810SetMode(ScrnInfoPtr pScrn, DisplayModePtr mode)
     * Calculate the VCLK that most closely matches the requested dot
     * clock.
     */
-   I810CalcVCLK(pScrn, dclk);
+   I810CalcVCLK(scrn, dclk);
 
    /* Since we program the clocks ourselves, always use VCLK2. */
    pVga->MiscOutReg |= 0x0C;
 
    /* Calculate the FIFO Watermark and Burst Length. */
-   i810Reg->LMI_FIFO_Watermark = I810CalcWatermark(pScrn, dclk, FALSE);
+   i810Reg->LMI_FIFO_Watermark = I810CalcWatermark(scrn, dclk, FALSE);
 
    /* Setup the ring buffer */
    i810Reg->LprbTail = 0;
@@ -1333,36 +1333,36 @@ I810SetMode(ScrnInfoPtr pScrn, DisplayModePtr mode)
 }
 
 static Bool
-I810ModeInit(ScrnInfoPtr pScrn, DisplayModePtr mode)
+I810ModeInit(ScrnInfoPtr scrn, DisplayModePtr mode)
 {
    vgaHWPtr hwp;
    I810Ptr pI810;
 
-   hwp = VGAHWPTR(pScrn);
-   pI810 = I810PTR(pScrn);
+   hwp = VGAHWPTR(scrn);
+   pI810 = I810PTR(scrn);
 
    vgaHWUnlock(hwp);
 
-   if (!vgaHWInit(pScrn, mode))
+   if (!vgaHWInit(scrn, mode))
       return FALSE;
 
-   pScrn->vtSema = TRUE;
+   scrn->vtSema = TRUE;
 
-   if (!I810SetMode(pScrn, mode))
+   if (!I810SetMode(scrn, mode))
       return FALSE;
 
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-      DRILock(screenInfo.screens[pScrn->scrnIndex], 0);
+      DRILock(screenInfo.screens[scrn->scrnIndex], 0);
       pI810->LockHeld = 1;
    }
 #endif
 
-   DoRestore(pScrn, &hwp->ModeReg, &pI810->ModeReg, FALSE);
+   DoRestore(scrn, &hwp->ModeReg, &pI810->ModeReg, FALSE);
 
 #ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-      DRIUnlock(screenInfo.screens[pScrn->scrnIndex]);
+      DRIUnlock(screenInfo.screens[scrn->scrnIndex]);
       pI810->LockHeld = 0;
    }
 #endif
@@ -1371,14 +1371,14 @@ I810ModeInit(ScrnInfoPtr pScrn, DisplayModePtr mode)
 }
 
 static void
-I810LoadPalette15(ScrnInfoPtr pScrn, int numColors, int *indices,
+I810LoadPalette15(ScrnInfoPtr scrn, int numColors, int *indices,
 		  LOCO * colors, VisualPtr pVisual)
 {
    vgaHWPtr hwp;
    int i, j, index;
    unsigned char r, g, b;
 
-   hwp = VGAHWPTR(pScrn);
+   hwp = VGAHWPTR(scrn);
 
    for (i = 0; i < numColors; i++) {
       index = indices[i];
@@ -1395,14 +1395,14 @@ I810LoadPalette15(ScrnInfoPtr pScrn, int numColors, int *indices,
 }
 
 static void
-I810LoadPalette16(ScrnInfoPtr pScrn, int numColors, int *indices,
+I810LoadPalette16(ScrnInfoPtr scrn, int numColors, int *indices,
 		  LOCO * colors, VisualPtr pVisual)
 {
    vgaHWPtr hwp;
    int i, index;
    unsigned char r, g, b;
 
-   hwp = VGAHWPTR(pScrn);
+   hwp = VGAHWPTR(scrn);
 
    /* Load all four entries in each of the 64 color ranges.  -jens */
    for (i = 0; i < numColors; i++) {
@@ -1459,14 +1459,14 @@ I810LoadPalette16(ScrnInfoPtr pScrn, int numColors, int *indices,
 }
 
 static void
-I810LoadPalette24(ScrnInfoPtr pScrn, int numColors, int *indices,
+I810LoadPalette24(ScrnInfoPtr scrn, int numColors, int *indices,
 		  LOCO * colors, VisualPtr pVisual)
 {
    vgaHWPtr hwp;
    int i, index;
    unsigned char r, g, b;
 
-   hwp = VGAHWPTR(pScrn);
+   hwp = VGAHWPTR(scrn);
 
    for (i = 0; i < numColors; i++) {
       index = indices[i];
@@ -1481,9 +1481,9 @@ I810LoadPalette24(ScrnInfoPtr pScrn, int numColors, int *indices,
 }
 
 Bool
-I810AllocateFront(ScrnInfoPtr pScrn)
+I810AllocateFront(ScrnInfoPtr scrn)
 {
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
    int cache_lines = -1;
 
    if (pI810->DoneFrontAlloc)
@@ -1492,16 +1492,16 @@ I810AllocateFront(ScrnInfoPtr pScrn)
    memset(&(pI810->FbMemBox), 0, sizeof(BoxRec));
    /* Alloc FrontBuffer/Ring/Accel memory */
    pI810->FbMemBox.x1 = 0;
-   pI810->FbMemBox.x2 = pScrn->displayWidth;
+   pI810->FbMemBox.x2 = scrn->displayWidth;
    pI810->FbMemBox.y1 = 0;
-   pI810->FbMemBox.y2 = pScrn->virtualY;
+   pI810->FbMemBox.y2 = scrn->virtualY;
 
    xf86GetOptValInteger(pI810->Options, OPTION_CACHE_LINES, &cache_lines);
 
    if (cache_lines < 0) {
       /* make sure there is enough for two DVD sized YUV buffers */
-      cache_lines = (pScrn->depth == 24) ? 256 : 384;
-      if (pScrn->displayWidth <= 1024)
+      cache_lines = (scrn->depth == 24) ? 256 : 384;
+      if (scrn->displayWidth <= 1024)
 	 cache_lines *= 2;
    }
    /* Make sure there's enough space for cache_lines.
@@ -1517,9 +1517,9 @@ I810AllocateFront(ScrnInfoPtr pScrn)
    {
       int maxCacheLines;
 
-      maxCacheLines = (pScrn->videoRam * 1024 /
-		       (pScrn->bitsPerPixel / 8) /
-		       pScrn->displayWidth) - pScrn->virtualY;
+      maxCacheLines = (scrn->videoRam * 1024 /
+		       (scrn->bitsPerPixel / 8) /
+		       scrn->displayWidth) - scrn->virtualY;
       if (maxCacheLines < 0)
 	 maxCacheLines = 0;
       if (cache_lines > maxCacheLines)
@@ -1527,7 +1527,7 @@ I810AllocateFront(ScrnInfoPtr pScrn)
    }
    pI810->FbMemBox.y2 += cache_lines;
 
-   xf86DrvMsg(pScrn->scrnIndex, X_INFO,
+   xf86DrvMsg(scrn->scrnIndex, X_INFO,
 	      "Adding %i scanlines for pixmap caching\n", cache_lines);
 
    /* Reserve room for the framebuffer and pixcache.  Put at the top
@@ -1538,7 +1538,7 @@ I810AllocateFront(ScrnInfoPtr pScrn)
    if (!I810AllocLow(&(pI810->FrontBuffer),
 		     &(pI810->SysMem),
 		     ALIGN((pI810->FbMemBox.x2 * pI810->FbMemBox.y2 * pI810->cpp), 4096))) {
-      xf86DrvMsg(pScrn->scrnIndex,
+      xf86DrvMsg(scrn->scrnIndex,
 		 X_WARNING, "Framebuffer allocation failed\n");
       return FALSE;
    }
@@ -1551,16 +1551,16 @@ I810AllocateFront(ScrnInfoPtr pScrn)
       pI810->LpRing->tail = 0;
       pI810->LpRing->space = 0;
    } else {
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "Ring buffer allocation failed\n");
       return (FALSE);
    }
 
    if (I810AllocLow(&pI810->Scratch, &(pI810->SysMem), 64 * 1024) ||
        I810AllocLow(&pI810->Scratch, &(pI810->SysMem), 16 * 1024)) {
-      xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Allocated Scratch Memory\n");
+      xf86DrvMsg(scrn->scrnIndex, X_INFO, "Allocated Scratch Memory\n");
    } else {
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "Scratch memory allocation failed\n");
       return (FALSE);
    }
@@ -1572,18 +1572,18 @@ I810AllocateFront(ScrnInfoPtr pScrn)
 static Bool
 I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 {
-   ScrnInfoPtr pScrn;
+   ScrnInfoPtr scrn;
    vgaHWPtr hwp;
    I810Ptr pI810;
    VisualPtr visual;
 
-   pScrn = xf86Screens[pScreen->myNum];
-   pI810 = I810PTR(pScrn);
-   hwp = VGAHWPTR(pScrn);
+   scrn = xf86Screens[pScreen->myNum];
+   pI810 = I810PTR(scrn);
+   hwp = VGAHWPTR(scrn);
 
    pI810->LpRing = calloc(sizeof(I810RingBuffer),1);
    if (!pI810->LpRing) {
-     xf86DrvMsg(pScrn->scrnIndex, X_ERROR, 
+     xf86DrvMsg(scrn->scrnIndex, X_ERROR, 
 		"Could not allocate lpring data structure.\n");
      return FALSE;
    }
@@ -1591,8 +1591,8 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
    miClearVisualTypes();
 
    /* Re-implemented Direct Color support, -jens */
-   if (!miSetVisualTypes(pScrn->depth, miGetDefaultVisualMask(pScrn->depth),
-			 pScrn->rgbBits, pScrn->defaultVisual))
+   if (!miSetVisualTypes(scrn->depth, miGetDefaultVisualMask(scrn->depth),
+			 scrn->rgbBits, scrn->defaultVisual))
       return FALSE;
 
    if (!miSetPixmapDepths())
@@ -1627,47 +1627,47 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
 #else
    pI810->directRenderingEnabled = FALSE;
-   if (!I810AllocateGARTMemory(pScrn))
+   if (!I810AllocateGARTMemory(scrn))
       return FALSE;
-   if (!I810AllocateFront(pScrn))
+   if (!I810AllocateFront(scrn))
       return FALSE;
 #endif
 
-   if (!I810MapMem(pScrn))
+   if (!I810MapMem(scrn))
       return FALSE;
 
-   pScrn->memPhysBase = (unsigned long)pI810->LinearAddr;
-   pScrn->fbOffset = 0;
+   scrn->memPhysBase = (unsigned long)pI810->LinearAddr;
+   scrn->fbOffset = 0;
 
    vgaHWSetMmioFuncs(hwp, pI810->MMIOBase, 0);
    vgaHWGetIOBase(hwp);
-   if (!vgaHWMapMem(pScrn))
+   if (!vgaHWMapMem(scrn))
       return FALSE;
 
-   I810Save(pScrn);
-   if (!I810ModeInit(pScrn, pScrn->currentMode))
+   I810Save(scrn);
+   if (!I810ModeInit(scrn, scrn->currentMode))
       return FALSE;
 
    I810SaveScreen(pScreen, FALSE);
-   I810AdjustFrame(scrnIndex, pScrn->frameX0, pScrn->frameY0, 0);
+   I810AdjustFrame(scrnIndex, scrn->frameX0, scrn->frameY0, 0);
 
-   if (!fbScreenInit(pScreen, pI810->FbBase + pScrn->fbOffset,
-		     pScrn->virtualX, pScrn->virtualY,
-		     pScrn->xDpi, pScrn->yDpi,
-		     pScrn->displayWidth, pScrn->bitsPerPixel))
+   if (!fbScreenInit(pScreen, pI810->FbBase + scrn->fbOffset,
+		     scrn->virtualX, scrn->virtualY,
+		     scrn->xDpi, scrn->yDpi,
+		     scrn->displayWidth, scrn->bitsPerPixel))
       return FALSE;
 
-   if (pScrn->bitsPerPixel > 8) {
+   if (scrn->bitsPerPixel > 8) {
       /* Fixup RGB ordering */
       visual = pScreen->visuals + pScreen->numVisuals;
       while (--visual >= pScreen->visuals) {
 	 if ((visual->class | DynamicClass) == DirectColor) {
-	    visual->offsetRed = pScrn->offset.red;
-	    visual->offsetGreen = pScrn->offset.green;
-	    visual->offsetBlue = pScrn->offset.blue;
-	    visual->redMask = pScrn->mask.red;
-	    visual->greenMask = pScrn->mask.green;
-	    visual->blueMask = pScrn->mask.blue;
+	    visual->offsetRed = scrn->offset.red;
+	    visual->offsetGreen = scrn->offset.green;
+	    visual->offsetBlue = scrn->offset.blue;
+	    visual->redMask = scrn->mask.red;
+	    visual->greenMask = scrn->mask.green;
+	    visual->blueMask = scrn->mask.blue;
 	 }
       }
    }
@@ -1684,9 +1684,9 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
    if (!pI810->directRenderingEnabled) {
       pI810->DoneFrontAlloc = FALSE;
-      if (!I810AllocateGARTMemory(pScrn))
+      if (!I810AllocateGARTMemory(scrn))
 	 return FALSE;
-      if (!I810AllocateFront(pScrn))
+      if (!I810AllocateFront(scrn))
 	 return FALSE;
    }
 #endif
@@ -1696,20 +1696,20 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 #endif
 
    if (!xf86InitFBManager(pScreen, &(pI810->FbMemBox))) {
-      xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+      xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		 "Failed to init memory manager\n");
       return FALSE;
    }
 
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_NOACCEL, FALSE)) {
       if (pI810->LpRing->mem.Size != 0) {
-	 I810SetRingRegs(pScrn);
+	 I810SetRingRegs(scrn);
 
 	 if (!I810AccelInit(pScreen)) {
-	    xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+	    xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		       "Hardware acceleration initialization failed\n");
 	 }  else /* PK added 16.02.2004 */
-	     I810EmitFlush(pScrn);
+	     I810EmitFlush(scrn);
       }
    }
 
@@ -1721,7 +1721,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
    if (!xf86ReturnOptValBool(pI810->Options, OPTION_SW_CURSOR, FALSE)) {
       if (!I810CursorInit(pScreen)) {
-	 xf86DrvMsg(pScrn->scrnIndex, X_ERROR,
+	 xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 		    "Hardware cursor initialization failed\n");
       }
    }
@@ -1730,8 +1730,8 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
       return FALSE;
 
    /* Use driver specific palette load routines for Direct Color support. -jens */
-   if (pScrn->bitsPerPixel == 16) {
-      if (pScrn->depth == 15) {
+   if (scrn->bitsPerPixel == 16) {
+      if (scrn->depth == 15) {
 	 if (!xf86HandleColormaps(pScreen, 256, 8, I810LoadPalette15, NULL,
 				  CMAP_PALETTED_TRUECOLOR |
 				  CMAP_RELOAD_ON_MODE_SWITCH))
@@ -1769,9 +1769,9 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 #endif
 
    if (pI810->directRenderingEnabled) {
-      xf86DrvMsg(pScrn->scrnIndex, X_INFO, "Direct rendering enabled\n");
+      xf86DrvMsg(scrn->scrnIndex, X_INFO, "Direct rendering enabled\n");
    } else {
-      xf86DrvMsg(pScrn->scrnIndex, X_WARNING, "Direct rendering disabled\n");
+      xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Direct rendering disabled\n");
    }
 
    pScreen->SaveScreen = I810SaveScreen;
@@ -1779,7 +1779,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
    pScreen->CloseScreen = I810CloseScreen;
 
    if (serverGeneration == 1)
-      xf86ShowUnusedOptions(pScrn->scrnIndex, pScrn->options);
+      xf86ShowUnusedOptions(scrn->scrnIndex, scrn->options);
 
    return TRUE;
 }
@@ -1787,9 +1787,9 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 Bool
 I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
 {
-   ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
 #if 0
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 #endif
    if (I810_DEBUG & DEBUG_VERBOSE_CURSOR)
       ErrorF("I810SwitchMode %p %x\n", (void *)mode, flags);
@@ -1812,17 +1812,17 @@ I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
    }
 # endif
    if (pI810->AccelInfoRec != NULL) {
-      I810RefreshRing(pScrn);
-      I810Sync(pScrn);
+      I810RefreshRing(scrn);
+      I810Sync(scrn);
       pI810->AccelInfoRec->NeedToSync = FALSE;
    }
-   I810Restore(pScrn);
+   I810Restore(scrn);
 
 # ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
-       if (!I810DRILeave(pScrn))
+       if (!I810DRILeave(scrn))
 	   return FALSE;
-       if (!I810DRIEnter(pScrn))
+       if (!I810DRIEnter(scrn))
 	   return FALSE;
 
        if (I810_DEBUG & DEBUG_VERBOSE_DRI)
@@ -1832,33 +1832,33 @@ I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
    }
 # endif
 #endif
-   return I810ModeInit(pScrn, mode);
+   return I810ModeInit(scrn, mode);
 }
 
 void
 I810AdjustFrame(int scrnIndex, int x, int y, int flags)
 {
-   ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
-   I810Ptr pI810 = I810PTR(pScrn);
-   vgaHWPtr hwp = VGAHWPTR(pScrn);
+   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   I810Ptr pI810 = I810PTR(scrn);
+   vgaHWPtr hwp = VGAHWPTR(scrn);
    int Base;
 
 #if 1
    if (pI810->showCache) {
      int lastline = pI810->FbMapSize / 
-       ((pScrn->displayWidth * pScrn->bitsPerPixel) / 8);
-     lastline -= pScrn->currentMode->VDisplay;
+       ((scrn->displayWidth * scrn->bitsPerPixel) / 8);
+     lastline -= scrn->currentMode->VDisplay;
      if (y > 0)
-       y += pScrn->currentMode->VDisplay;
+       y += scrn->currentMode->VDisplay;
      if (y > lastline) y = lastline;
    }
 #endif
-   Base = (y * pScrn->displayWidth + x) >> 2;
+   Base = (y * scrn->displayWidth + x) >> 2;
 
    if (I810_DEBUG & DEBUG_VERBOSE_CURSOR)
       ErrorF("I810AdjustFrame %d,%d %x\n", x, y, flags);
 
-   switch (pScrn->bitsPerPixel) {
+   switch (scrn->bitsPerPixel) {
    case 8:
       break;
    case 16:
@@ -1890,20 +1890,20 @@ I810AdjustFrame(int scrnIndex, int x, int y, int flags)
 static Bool
 I810EnterVT(int scrnIndex, int flags)
 {
-   ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
+   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
 
 #ifdef HAVE_DRI1
-   I810Ptr pI810 = I810PTR(pScrn);
+   I810Ptr pI810 = I810PTR(scrn);
 #endif
 
    if (I810_DEBUG & DEBUG_VERBOSE_DRI)
       ErrorF("\n\nENTER VT\n");
 
-   if (!I810BindGARTMemory(pScrn)) {
+   if (!I810BindGARTMemory(scrn)) {
       return FALSE;
    }
 #ifdef HAVE_DRI1
-   if (!I810DRIEnter(pScrn)) {
+   if (!I810DRIEnter(scrn)) {
       return FALSE;
    }
    if (pI810->directRenderingEnabled) {
@@ -1914,18 +1914,18 @@ I810EnterVT(int scrnIndex, int flags)
    }
 #endif
 
-   if (!I810ModeInit(pScrn, pScrn->currentMode))
+   if (!I810ModeInit(scrn, scrn->currentMode))
       return FALSE;
-   I810AdjustFrame(scrnIndex, pScrn->frameX0, pScrn->frameY0, 0);
+   I810AdjustFrame(scrnIndex, scrn->frameX0, scrn->frameY0, 0);
    return TRUE;
 }
 
 static void
 I810LeaveVT(int scrnIndex, int flags)
 {
-   ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
-   vgaHWPtr hwp = VGAHWPTR(pScrn);
-   I810Ptr pI810 = I810PTR(pScrn);
+   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   vgaHWPtr hwp = VGAHWPTR(scrn);
+   I810Ptr pI810 = I810PTR(scrn);
 
    if (I810_DEBUG & DEBUG_VERBOSE_DRI)
       ErrorF("\n\n\nLeave VT\n");
@@ -1940,16 +1940,16 @@ I810LeaveVT(int scrnIndex, int flags)
 #endif
 
    if (pI810->AccelInfoRec != NULL) {
-      I810RefreshRing(pScrn);
-      I810Sync(pScrn);
+      I810RefreshRing(scrn);
+      I810Sync(scrn);
       pI810->AccelInfoRec->NeedToSync = FALSE;
    }
-   I810Restore(pScrn);
+   I810Restore(scrn);
 
-   if (!I810UnbindGARTMemory(pScrn))
+   if (!I810UnbindGARTMemory(scrn))
       return;
 #ifdef HAVE_DRI1
-   if (!I810DRILeave(pScrn))
+   if (!I810DRILeave(scrn))
       return;
 #endif
 
@@ -1959,18 +1959,18 @@ I810LeaveVT(int scrnIndex, int flags)
 static Bool
 I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
 {
-   ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
-   vgaHWPtr hwp = VGAHWPTR(pScrn);
-   I810Ptr pI810 = I810PTR(pScrn);
+   ScrnInfoPtr scrn = xf86Screens[scrnIndex];
+   vgaHWPtr hwp = VGAHWPTR(scrn);
+   I810Ptr pI810 = I810PTR(scrn);
    XAAInfoRecPtr infoPtr = pI810->AccelInfoRec;
 
-   if (pScrn->vtSema == TRUE) {
+   if (scrn->vtSema == TRUE) {
       if (pI810->AccelInfoRec != NULL) {
-	 I810RefreshRing(pScrn);
-	 I810Sync(pScrn);
+	 I810RefreshRing(scrn);
+	 I810Sync(scrn);
 	 pI810->AccelInfoRec->NeedToSync = FALSE;
       }
-      I810Restore(pScrn);
+      I810Restore(scrn);
       vgaHWLock(hwp);
    }
 #ifdef HAVE_DRI1
@@ -1980,14 +1980,14 @@ I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
    }
 #endif
 
-   if (pScrn->vtSema == TRUE) {
-      I810UnbindGARTMemory(pScrn);
-      I810Restore(pScrn);
+   if (scrn->vtSema == TRUE) {
+      I810UnbindGARTMemory(scrn);
+      I810Restore(scrn);
       vgaHWLock(hwp);
    }
 
-   I810UnmapMem(pScrn);
-   vgaHWUnmapMem(pScrn);
+   I810UnmapMem(scrn);
+   vgaHWUnmapMem(scrn);
 
    if (pI810->ScanlineColorExpandBuffers) {
       free(pI810->ScanlineColorExpandBuffers);
@@ -2020,7 +2020,7 @@ I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
    free(pI810->LpRing);
    pI810->LpRing = NULL;
 
-   pScrn->vtSema = FALSE;
+   scrn->vtSema = FALSE;
    pScreen->CloseScreen = pI810->CloseScreen;
    return (*pScreen->CloseScreen) (scrnIndex, pScreen);
 }
@@ -2053,7 +2053,7 @@ I810SaveScreen(ScreenPtr pScreen, Bool unblack)
 }
 
 static void
-I810DisplayPowerManagementSet(ScrnInfoPtr pScrn, int PowerManagementMode,
+I810DisplayPowerManagementSet(ScrnInfoPtr scrn, int PowerManagementMode,
 			      int flags)
 {
    I810Ptr pI810;
@@ -2061,7 +2061,7 @@ I810DisplayPowerManagementSet(ScrnInfoPtr pScrn, int PowerManagementMode,
    int DPMSSyncSelect = 0;
    vgaHWPtr hwp;
 
-   pI810 = I810PTR(pScrn);
+   pI810 = I810PTR(scrn);
    switch (PowerManagementMode) {
    case DPMSModeOn:
       /* Screen: On; HSync: On, VSync: On */
@@ -2085,7 +2085,7 @@ I810DisplayPowerManagementSet(ScrnInfoPtr pScrn, int PowerManagementMode,
       break;
    }
 
-   hwp = VGAHWPTR(pScrn);
+   hwp = VGAHWPTR(scrn);
 
    /* Turn the screen on/off */
    SEQ01 |= hwp->readSeq(hwp, 0x01) & ~0x20;
commit 3410db008c6c20dc6590a89c05b40a0c6de6744a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 22 15:54:52 2012 +0100

    sna/dri: Avoid confusion of attempting to exchange pixmaps for DPMS-off flips
    
    Not only do we confuse ourselves, but we end up confusing the damage
    tracking on the root window whenever we update the Screen Pixmap. So for
    the time being, don't.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 2b97e68..517eaad 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -937,11 +937,13 @@ can_flip(struct sna * sna,
 	WindowPtr win = (WindowPtr)draw;
 	PixmapPtr pixmap;
 
-	if (!sna->scrn->vtSema)
+	if (draw->type == DRAWABLE_PIXMAP)
 		return FALSE;
 
-	if (draw->type == DRAWABLE_PIXMAP)
+	if (!sna->scrn->vtSema) {
+		DBG(("%s: no, not attached to VT\n", __FUNCTION__));
 		return FALSE;
+	}
 
 	if (front->format != back->format) {
 		DBG(("%s: no, format mismatch, front = %d, back = %d\n",
@@ -1184,23 +1186,25 @@ static void sna_dri_flip_event(struct sna *sna,
 				      serverClient,
 				      M_ANY, DixWriteAccess) == Success) {
 			if (can_flip(sna, drawable, flip->front, flip->back) &&
-			    !sna_dri_flip_continue(sna, drawable, flip)) {
+			    sna_dri_flip_continue(sna, drawable, flip)) {
 				DRI2SwapComplete(flip->client, drawable,
-						 0, 0, 0,
-						 DRI2_BLIT_COMPLETE,
-						 flip->client ? flip->event_complete : NULL,
-						 flip->event_data);
-				sna_dri_frame_event_info_free(flip);
+						0, 0, 0,
+						DRI2_FLIP_COMPLETE,
+						flip->client ? flip->event_complete : NULL,
+						flip->event_data);
 			} else {
+				DBG(("%s: no longer able to flip\n",
+				     __FUNCTION__));
+
 				DRI2SwapComplete(flip->client, drawable,
-						 0, 0, 0,
-						 DRI2_FLIP_COMPLETE,
-						 flip->client ? flip->event_complete : NULL,
-						 flip->event_data);
+						0, 0, 0,
+						DRI2_EXCHANGE_COMPLETE,
+						flip->client ? flip->event_complete : NULL,
+						flip->event_data);
+				sna_dri_frame_event_info_free(flip);
 			}
-		} else {
+		} else
 			sna_dri_frame_event_info_free(flip);
-		}
 		break;
 
 #if DRI2INFOREC_VERSION >= 7
@@ -1296,31 +1300,10 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	     (long long)divisor,
 	     (long long)remainder));
 
-	/* Drawable not displayed... just complete the swap */
+	/* XXX In theory we can just exchange pixmaps.... */
 	pipe = sna_dri_get_pipe(draw);
-	if (pipe == -1) {
-		RegionRec region;
-
-		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
-
-		sna_dri_exchange_attachment(front, back);
-		get_private(back)->pixmap = get_private(front)->pixmap;
-		get_private(front)->pixmap = NULL;
-		set_bo(get_private(back)->pixmap, get_private(back)->bo);
-
-		/* XXX can we query whether we need to process damage? */
-		region.extents.x1 = draw->x;
-		region.extents.y1 = draw->y;
-		region.extents.x2 = draw->x + draw->width;
-		region.extents.y2 = draw->y + draw->height;
-		region.data = NULL;
-		DamageRegionAppend(draw, &region);
-		DamageRegionProcessPending(draw);
-
-		DRI2SwapComplete(client, draw, 0, 0, 0,
-				 DRI2_EXCHANGE_COMPLETE, func, data);
-		return TRUE;
-	}
+	if (pipe == -1)
+		return FALSE;
 
 	/* Truncate to match kernel interfaces; means occasional overflow
 	 * misses, but that's generally not a big deal */
commit 28c4fb0b70e098972af972d30a660d167a8973ad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 22 09:49:53 2012 +0100

    sna: Swallow disconnection event upon mode restoration
    
    So that we can VT switch back to X even though the output configuration
    has now changed.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index cacacdf..0c73c36 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -447,8 +447,10 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(-ret));
 		ret = FALSE;
-	} else
-		ret = sna_crtc->active = sna_crtc_is_bound(sna, crtc);
+	} else {
+		crtc->enabled = sna_crtc->active = sna_crtc_is_bound(sna, crtc);
+		ret = TRUE;
+	}
 
 	if (crtc->scrn->pScreen)
 		xf86_reload_cursors(crtc->scrn->pScreen);
commit b17c44c8d234cb2c651e88c7892bcd5b33139c1c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 22 09:43:51 2012 +0100

    sna: Remove incorrect assertion for mode restoration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index e302d70..cacacdf 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -683,8 +683,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		if (!bo)
 			return FALSE;
 
-		/* recreate the fb in case the size has changed */
-		assert(bo->delta == 0);
+		/* XXX recreate the fb in case the size has changed? */
 		sna_mode->fb_id = get_fb(sna, bo,
 					 scrn->virtualX, scrn->virtualY);
 		if (sna_mode->fb_id == 0)
commit 49b7742999ee11b0c36754ea10bc5609ebe8c609
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 22 09:40:25 2012 +0100

    sna: Force config restoration after VT switch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index c213ff4..9ec3ecf 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -728,8 +728,8 @@ static void sna_leave_vt(int scrnIndex, int flags)
 	DBG(("%s\n", __FUNCTION__));
 
 	xf86RotateFreeShadow(scrn);
-
 	xf86_hide_cursors(scrn);
+	sna_mode_remove_fb(sna);
 
 	ret = drmDropMaster(sna->kgem.fd);
 	if (ret)
commit 952e3dcba3a651d22d44dbb039ad97e111d9bc63
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 21 19:48:52 2012 +0100

    sna: Only override active on DPMSModeOff
    
    Along the enable path we then only want to change the value if we
    actually call sna_crtc_apply.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 002eba3..e302d70 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -511,9 +511,10 @@ sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 	DBG(("%s(pipe %d, dpms mode -> %d):= active=%d\n",
 	     __FUNCTION__, sna_crtc->pipe, mode, mode == DPMSModeOn));
 
-	sna_crtc->active = false;
 	if (mode != DPMSModeOff)
 		sna_crtc_restore(sna_crtc->sna);
+	else
+		sna_crtc->active = false;
 }
 
 static struct kgem_bo *sna_create_bo_for_fbcon(struct sna *sna,
commit afdaf184594bfe3633305969eb1166c28e1006bf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 21 14:32:39 2012 +0100

    sna: Add a log message for the change of CRTC mode
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index d850025..002eba3 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -385,11 +385,10 @@ mode_to_kmode(drmModeModeInfoPtr kmode, DisplayModePtr mode)
 static Bool
 sna_crtc_apply(xf86CrtcPtr crtc)
 {
-	ScrnInfoPtr scrn = crtc->scrn;
-	struct sna *sna = to_sna(scrn);
+	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
-	struct sna_mode *mode = &sna->mode;
 	xf86CrtcConfigPtr xf86_config = XF86_CRTC_CONFIG_PTR(crtc->scrn);
+	struct sna_mode *mode = &sna->mode;
 	uint32_t output_ids[16];
 	int output_count = 0;
 	int fb_id, x, y;
@@ -427,6 +426,12 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		y = 0;
 	}
 
+	xf86DrvMsg(crtc->scrn->scrnIndex, X_INFO,
+		   "switch to mode %dx%d on crtc %d (pipe %d)\n",
+		   sna_crtc->kmode.hdisplay,
+		   sna_crtc->kmode.vdisplay,
+		   crtc_id(sna_crtc), sna_crtc->pipe);
+
 	DBG(("%s: applying crtc [%d] mode=%dx%d@%d, fb=%d%s update to %d outputs\n",
 	     __FUNCTION__, crtc_id(sna_crtc),
 	     sna_crtc->kmode.hdisplay,
@@ -445,8 +450,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 	} else
 		ret = sna_crtc->active = sna_crtc_is_bound(sna, crtc);
 
-	if (scrn->pScreen)
-		xf86_reload_cursors(scrn->pScreen);
+	if (crtc->scrn->pScreen)
+		xf86_reload_cursors(crtc->scrn->pScreen);
 
 	return ret;
 }
@@ -2210,5 +2215,7 @@ bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
 	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
 		return false;
 
+	DBG(("%s: mode valid?=%d, fb attached?=%d\n", __FUNCTION__,
+	     mode.mode_valid, sna->mode.fb_id == mode.fb_id));
 	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
 }
commit 9fb18462ec1428a486f998585e7b4caf4dd08f53
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 21 14:12:23 2012 +0100

    sna: Confirm that the modeset takes
    
    If we attempt to change to a mode with a disabled connector the kernel
    will silently switch off that connector (and crtc) and report that the
    modeswitch is successful.
    
    Reported-by: Kyle Hill <kyle.hill at tacomafia.net>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=50078
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 3f8beea..d850025 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -442,10 +442,8 @@ sna_crtc_apply(xf86CrtcPtr crtc)
 		xf86DrvMsg(crtc->scrn->scrnIndex, X_ERROR,
 			   "failed to set mode: %s\n", strerror(-ret));
 		ret = FALSE;
-	} else {
-		sna_crtc->active = 1;
-		ret = TRUE;
-	}
+	} else
+		ret = sna_crtc->active = sna_crtc_is_bound(sna, crtc);
 
 	if (scrn->pScreen)
 		xf86_reload_cursors(scrn->pScreen);
@@ -508,8 +506,8 @@ sna_crtc_dpms(xf86CrtcPtr crtc, int mode)
 	DBG(("%s(pipe %d, dpms mode -> %d):= active=%d\n",
 	     __FUNCTION__, sna_crtc->pipe, mode, mode == DPMSModeOn));
 
-	sna_crtc->active = mode == DPMSModeOn;
-	if (mode == DPMSModeOn)
+	sna_crtc->active = false;
+	if (mode != DPMSModeOff)
 		sna_crtc_restore(sna_crtc->sna);
 }
 
commit cd3b48854403ee63723ef570c1a7f9f8447e7105
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 19 12:56:45 2012 +0100

    sna/gen7: Trim a dead assignment
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 362ddff..9eed660 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1663,7 +1663,7 @@ inline static int gen7_get_rectangles(struct sna *sna,
 				      int want,
 				      void (*emit_state)(struct sna *sna, const struct sna_composite_op *op))
 {
-	int rem = vertex_space(sna);
+	int rem;
 
 start:
 	rem = vertex_space(sna);
commit cf79cd3e27b7077f67f202b16ed39eb1cc70ac65
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat May 19 12:44:11 2012 +0100

    sna: Emit a new batch if we need to clip after PolyText without space
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8b1ab65..02c11cf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11266,7 +11266,7 @@ skip:
 		if (++extents == last_extents)
 			break;
 
-		if (kgem_check_batch(&sna->kgem, 3)) {
+		if (kgem_check_batch(&sna->kgem, 3 + 5)) {
 			b = sna->kgem.batch + sna->kgem.nbatch;
 			sna->kgem.nbatch += 3;
 
commit f91dcc44dcc15850f82666b1bcdd27182400e7dc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 18 20:09:41 2012 +0100

    sna: Give the proxy a unique name
    
    So that if we cache the current destination bo (for example, gen3) then
    a new proxy (or even just a new batchbuffer) will indeed cause the
    destination buffer to be updated.
    
    Reported-and-tested-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48636
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index df69b90..470dd24 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3499,7 +3499,8 @@ void kgem_clear_dirty(struct kgem *kgem)
 	}
 }
 
-struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
+struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
+				  struct kgem_bo *target,
 				  int offset, int length)
 {
 	struct kgem_bo *bo;
@@ -3512,6 +3513,7 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	if (bo == NULL)
 		return NULL;
 
+	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->reusable = false;
 	bo->size.bytes = length;
 
@@ -3903,7 +3905,7 @@ done:
 	bo->used = ALIGN(bo->used, 64);
 	assert(bo->mem);
 	*ret = (char *)bo->mem + offset;
-	return kgem_create_proxy(&bo->base, offset, size);
+	return kgem_create_proxy(kgem, &bo->base, offset, size);
 }
 
 bool kgem_buffer_is_inplace(struct kgem_bo *_bo)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0c26630..0a95da7 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -195,7 +195,8 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
 
 struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
-struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
+struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
+				  struct kgem_bo *target,
 				  int offset, int length);
 
 struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 32d26c8..a52cfb5 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -258,7 +258,8 @@ sna_render_finish_solid(struct sna *sna, bool force)
 	DBG(("sna_render_finish_solid reset\n"));
 
 	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
-	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
+	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
+					 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	if (force)
 		cache->size = 1;
@@ -308,7 +309,7 @@ sna_render_get_solid(struct sna *sna, uint32_t color)
 	DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
 
 create:
-	cache->bo[i] = kgem_create_proxy(cache->cache_bo,
+	cache->bo[i] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
 					 i*sizeof(uint32_t), sizeof(uint32_t));
 	cache->bo[i]->pitch = 4;
 
@@ -331,7 +332,8 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 
 	for (i = 0; i < 256; i++) {
 		color[i] = i << 24;
-		cache->bo[i] = kgem_create_proxy(cache->cache_bo,
+		cache->bo[i] = kgem_create_proxy(&sna->kgem,
+						 cache->cache_bo,
 						 sizeof(uint32_t)*i,
 						 sizeof(uint32_t));
 		cache->bo[i]->pitch = 4;
@@ -356,7 +358,8 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	 * zeroth slot simplifies some of the checks.
 	 */
 	cache->color[0] = 0xffffffff;
-	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
+	cache->bo[0] = kgem_create_proxy(&sna->kgem, cache->cache_bo,
+					 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	cache->dirty = 1;
 	cache->size = 1;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 880e173..7feaa24 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -870,7 +870,7 @@ sna_render_pixmap_partial(struct sna *sna,
 	}
 
 	/* How many tiles across are we? */
-	channel->bo = kgem_create_proxy(bo,
+	channel->bo = kgem_create_proxy(&sna->kgem, bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
 	if (channel->bo == NULL)
@@ -989,7 +989,7 @@ sna_render_picture_partial(struct sna *sna,
 		return 0;
 
 	/* How many tiles across are we? */
-	channel->bo = kgem_create_proxy(bo,
+	channel->bo = kgem_create_proxy(&sna->kgem, bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
 	if (channel->bo == NULL)
@@ -1821,7 +1821,7 @@ sna_render_composite_redirect(struct sna *sna,
 			}
 
 			/* How many tiles across are we? */
-			op->dst.bo = kgem_create_proxy(op->dst.bo,
+			op->dst.bo = kgem_create_proxy(&sna->kgem, op->dst.bo,
 						       box.y1 * op->dst.bo->pitch + offset,
 						       h * op->dst.bo->pitch);
 			if (!op->dst.bo) {
commit ee073d613bba38f90951405d5ecddfcf3ac5e043
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 18 15:19:00 2012 +0100

    sna/traps: Fix processing of full-steps
    
    A missing factor of Y-height caused the computation of coverage for the
    spans to be completely wrong. This affects the vertical segments of
    rounded rectangles, for instance.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 357c4c4..c0565fa 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -607,14 +607,14 @@ cell_list_add_span(struct cell_list *cells,
 
 	cell = cell_list_find(cells, ix1);
 	if (ix1 != ix2) {
-		cell->uncovered_area += 2*fx1;
+		cell->uncovered_area += 2*fx1*FAST_SAMPLES_Y;
 		cell->covered_height += FAST_SAMPLES_Y;
 
 		cell = cell_list_find(cells, ix2);
-		cell->uncovered_area -= 2*fx2;
+		cell->uncovered_area -= 2*fx2*FAST_SAMPLES_Y;
 		cell->covered_height -= FAST_SAMPLES_Y;
 	} else
-		cell->uncovered_area += 2*(fx1-fx2);
+		cell->uncovered_area += 2*(fx1-fx2)*FAST_SAMPLES_Y;
 }
 
 static void
@@ -1025,6 +1025,7 @@ nonzero_subrow(struct active_list *active, struct cell_list *coverages)
 		} else {
 			edge->prev->next = next;
 			next->prev = edge->prev;
+			active->min_height = -1;
 		}
 
 		edge = next;
@@ -1718,7 +1719,7 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 	int stride = scratch->devKind;
 	int width = scratch->drawable.width;
 
-	__DBG(("%s: mono=%d, buf=%d\n", __FUNCTION__, mono, buf));
+	__DBG(("%s: mono=%d, buf?=%d\n", __FUNCTION__, mono, buf != NULL));
 	assert(!mono);
 	assert(converter->ymin == 0);
 	assert(converter->xmin == 0);
@@ -1750,9 +1751,9 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 			do_full_step = can_full_step(active);
 		}
 
-		__DBG(("%s: y=%d [%d], do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
+		__DBG(("%s: y=%d, do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
 		       __FUNCTION__,
-		       i, i+ymin, do_full_step,
+		       i, do_full_step,
 		       polygon->y_buckets[i] != NULL,
 		       active->min_height,
 		       active->is_vertical));
commit 8ba800c63906fb29d34f40b9437092a665bffb14
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 22:46:18 2012 +0100

    sna: Don't consider uploading inplace if the dst bo is unmappable
    
    Handle (and take advantage of) the fallback at the high level rather
    than masquerading an inplace write.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b671b0..8b1ab65 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2726,6 +2726,9 @@ static bool upload_inplace(struct sna *sna,
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
+		if (!kgem_bo_can_map(&sna->kgem, priv->gpu_bo))
+			return false;
+
 		if (!kgem_bo_is_busy(priv->gpu_bo))
 			return true;
 
commit dad24721a13ce3a357e8ddae3c2dea61045f6fc2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 21:44:22 2012 +0100

    Revert "sna: Always try to operate inplace if we an LLC gpu bo"
    
    This reverts commit 10b4a9bb5f46ab9d9c8b165084ce4174b54a8d39 as it
    causes a regression for pixel data uploads to active buffers.

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8915724..1b671b0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -904,9 +904,6 @@ static inline bool pixmap_inplace(struct sna *sna,
 	if (priv->mapped)
 		return true;
 
-	if (sna->kgem.has_llc && pixmap != sna->front)
-		return !priv->cpu_bo;
-
 	return (pixmap->devKind * pixmap->drawable.height >> 12) >
 		sna->kgem.half_cpu_cache_pages;
 }
@@ -1266,9 +1263,6 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 	}
 
-	if (sna->kgem.has_llc && pixmap != sna->front)
-		return !priv->cpu_bo;
-
 	DBG(("%s: (%dx%d), inplace? %d\n",
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
commit 681c6e72412fff96b203a09be6ac8d393f3489a5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 19:43:02 2012 +0100

    sna: Track flushing status of live bo
    
    Currently we only move a bo with an outstanding kernel flush onto the
    flushing list if it is no longer in use. This leaves us potentially
    stalling on a flush if we try then to write to the object believing it
    to be retired and idle.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 759860c..df69b90 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -303,9 +303,9 @@ void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 		assert(list_is_empty(&bo->vma));
 		bo->rq = NULL;
 		list_del(&bo->request);
-		bo->needs_flush = bo->flush;
 	}
 
+	bo->needs_flush = false;
 	bo->domain = DOMAIN_NONE;
 }
 
@@ -1280,7 +1280,6 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 	bool retired = false;
 
 	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
-		assert(bo->refcnt == 0);
 		assert(bo->rq == &_kgem_static_request);
 		assert(bo->exec == NULL);
 
@@ -1289,16 +1288,19 @@ static bool kgem_retire__flushing(struct kgem *kgem)
 
 		DBG(("%s: moving %d from flush to inactive\n",
 		     __FUNCTION__, bo->handle));
-		if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
-			bo->needs_flush = false;
-			bo->domain = DOMAIN_NONE;
-			bo->rq = NULL;
-			list_del(&bo->request);
-			kgem_bo_move_to_inactive(kgem, bo);
-		} else
-			kgem_bo_free(kgem, bo);
+		bo->needs_flush = false;
+		bo->domain = DOMAIN_NONE;
+		bo->rq = NULL;
+		list_del(&bo->request);
 
-		retired = true;
+		if (!bo->refcnt) {
+			assert(bo->reusable);
+			if (kgem_bo_set_purgeable(kgem, bo)) {
+				kgem_bo_move_to_inactive(kgem, bo);
+				retired = true;
+			} else
+				kgem_bo_free(kgem, bo);
+		}
 	}
 
 	return retired;
@@ -1331,12 +1333,18 @@ static bool kgem_retire__requests(struct kgem *kgem)
 			assert(bo->domain == DOMAIN_GPU);
 
 			list_del(&bo->request);
-			bo->rq = NULL;
 
 			if (bo->needs_flush)
 				bo->needs_flush = kgem_busy(kgem, bo->handle);
-			if (!bo->needs_flush)
+			if (bo->needs_flush) {
+				DBG(("%s: moving %d to flushing\n",
+				     __FUNCTION__, bo->handle));
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			} else {
 				bo->domain = DOMAIN_NONE;
+				bo->rq = NULL;
+			}
 
 			if (bo->refcnt)
 				continue;
@@ -1348,20 +1356,17 @@ static bool kgem_retire__requests(struct kgem *kgem)
 				continue;
 			}
 
-			if (bo->needs_flush) {
-				DBG(("%s: moving %d to flushing\n",
-				     __FUNCTION__, bo->handle));
-				list_add(&bo->request, &kgem->flushing);
-				bo->rq = &_kgem_static_request;
-			} else if (kgem_bo_set_purgeable(kgem, bo)) {
-				DBG(("%s: moving %d to inactive\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_move_to_inactive(kgem, bo);
-				retired = true;
-			} else {
-				DBG(("%s: closing %d\n",
-				     __FUNCTION__, bo->handle));
-				kgem_bo_free(kgem, bo);
+			if (!bo->needs_flush) {
+				if (kgem_bo_set_purgeable(kgem, bo)) {
+					DBG(("%s: moving %d to inactive\n",
+					     __FUNCTION__, bo->handle));
+					kgem_bo_move_to_inactive(kgem, bo);
+					retired = true;
+				} else {
+					DBG(("%s: closing %d\n",
+					     __FUNCTION__, bo->handle));
+					kgem_bo_free(kgem, bo);
+				}
 			}
 		}
 
commit d99502a33d5bdbad010b7a036c1aee989fe29947
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 17:56:43 2012 +0100

    sna/glyphs: Pass the extents to the backend for preparing to composite glyphs
    
    This information should not be required, but it might come in handy, so
    pass it along.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 07b2a94..2af7e80 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -920,7 +920,7 @@ next_image:
 					if (!sna->render.composite(sna, PictOpAdd,
 								   this_atlas, NULL, mask,
 								   0, 0, 0, 0, 0, 0,
-								   0, 0,
+								   width, height,
 								   &tmp)) {
 						FreePicture(mask, 0);
 						return FALSE;
commit fb21c2df1a10d66f115a5978b0db508058fdc412
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 17 16:30:31 2012 +0100

    sna/io: Assert that we do not replace a bo->flush object
    
    These should be pinned by the higher layers and so we should never be
    attempting to replace them. If we do replace a bo->flush, then we will
    end up miscounting outstanding flush bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index b4e59d9..3f39de5 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -1147,6 +1147,7 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.height,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
+	assert(!bo->flush);
 
 	if ((!kgem_bo_can_map(kgem, bo) || kgem_bo_is_busy(bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
@@ -1222,6 +1223,7 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
 	     pixmap->drawable.height,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
+	assert(!bo->flush);
 
 	if (kgem_bo_is_busy(bo)) {
 		struct kgem_bo *new_bo;
commit 596c0a68709a93bb376647c1b566e9df4f23b35d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 15 18:53:39 2012 +0100

    sna: Assign GCops after checking for fallback to pass sanity checks
    
    We assert that prior to installing the fallback GCops the current ops
    are the default set. This is broken if we point GCops to our GPU ops,
    but then fallback. So check for the fallback first.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ab26dd0..8915724 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6751,13 +6751,13 @@ spans_fallback:
 				}
 				assert(gc->miTranslate);
 
-				gc->ops = &sna_gc_ops__tmp;
 				DBG(("%s: miZeroLine (solid dash)\n", __FUNCTION__));
 				if (!sna_fill_init_blt(&fill,
 						       data.sna, data.pixmap,
 						       data.bo, gc->alu, color))
 					goto fallback;
 
+				gc->ops = &sna_gc_ops__tmp;
 				miZeroDashLine(drawable, gc, mode, n, pt);
 				fill.done(data.sna, &fill);
 
commit 3c9759ef2ad755bbe720d4aa031ec67dbc3b7734
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 14 14:12:06 2012 +0100

    sna/gen3: Fix pre-multiplication of opacity masks
    
    The return type for the multipliation was only 8-bits wide dating back
    to the time when the function just computed a single channel and didn't
    try to blend the output back into the argb pixel value. Inlining the
    shift into the function means that we need the output to be 32-bits wide
    instead.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49887
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index a0de1ee..b8f0970 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2543,7 +2543,7 @@ mul_8_8(uint8_t a, uint8_t b)
     return ((t >> 8) + t) >> 8;
 }
 
-static inline uint8_t multa(uint32_t s, uint32_t m, int shift)
+static inline uint32_t multa(uint32_t s, uint32_t m, int shift)
 {
 	return mul_8_8((s >> shift) & 0xff, m >> 24) << shift;
 }
@@ -2944,6 +2944,7 @@ gen3_render_composite(struct sna *sna,
 
 					tmp->src.u.gen3.type = SHADER_CONSTANT;
 					tmp->src.u.gen3.mode = v;
+					tmp->src.is_opaque = false;
 
 					tmp->mask.u.gen3.type = SHADER_NONE;
 				}
commit 053bd5bf2425aed44e1c2bb981d98ee5171211a0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 14 12:08:09 2012 +0100

    sna: Use the correct storage for box_from_seg
    
    Fixes regression from 3aa98289e3a2 with clipped segments. As we clipped
    the empty box rather than the segment, we never drew any outlines.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 776cba3..ab26dd0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6918,7 +6918,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 					nbox = ARRAY_SIZE(boxes);
 				n -= nbox;
 				do {
-					box_from_seg(b, seg, gc);
+					box_from_seg(b, seg++, gc);
 					if (b->y2 > b->y1 && b->x2 > b->x1) {
 						b->x1 += dx;
 						b->x2 += dx;
@@ -6926,7 +6926,6 @@ sna_poly_segment_blt(DrawablePtr drawable,
 						b->y2 += dy;
 						b++;
 					}
-					seg++;
 				} while (--nbox);
 
 				if (b != boxes) {
@@ -6943,10 +6942,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 					nbox = ARRAY_SIZE(boxes);
 				n -= nbox;
 				do {
-					box_from_seg(b, seg, gc);
-					if (b->y2 > b->y1 && b->x2 > b->x1)
-						b++;
-					seg++;
+					box_from_seg(b++, seg++, gc);
 				} while (--nbox);
 
 				if (b != boxes) {
@@ -6972,7 +6968,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 			do {
 				BoxRec box;
 
-				box_from_seg(b, seg, gc);
+				box_from_seg(&box, seg++, gc);
 				box.x1 += drawable->x;
 				box.x2 += drawable->x;
 				box.y1 += drawable->y;
@@ -6998,12 +6994,10 @@ sna_poly_segment_blt(DrawablePtr drawable,
 						}
 					}
 				}
-
-				seg++;
 			} while (--n);
 		} else {
 			do {
-				box_from_seg(b, seg, gc);
+				box_from_seg(b, seg++, gc);
 				b->x1 += drawable->x;
 				b->x2 += drawable->x;
 				b->y1 += drawable->y;
@@ -7020,8 +7014,6 @@ sna_poly_segment_blt(DrawablePtr drawable,
 						b = boxes;
 					}
 				}
-
-				seg++;
 			} while (--n);
 		}
 		RegionUninit(&clip);
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 8d5594d..357c4c4 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2475,7 +2475,7 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		depth = maskFormat->depth;
 		if (depth == 1) {
 			format = PIXMAN_a1;
-		} else if (depth < 8) {
+		} else if (depth <= 4) {
 			format = PIXMAN_a4;
 			depth = 4;
 		} else
commit b654b8794db7b00666ce5c59535a9302932c483b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 14 11:36:15 2012 +0100

    sna/trapezoids: Fix picture creation for fallback trapezoids
    
    Being a little lax in not updating the format after upconversion to
    PICT_a8, meant we were trying to composite with a depth 1, 8 bpp a8
    image and thoroughly confusing everybody when creating the upload
    trapezoid mask.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 3bc3284..8d5594d 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2473,7 +2473,13 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		bounds.x1 -= dst->pDrawable->x;
 		bounds.y1 -= dst->pDrawable->y;
 		depth = maskFormat->depth;
-		format = maskFormat->format | (BitsPerPixel(depth) << 24);
+		if (depth == 1) {
+			format = PIXMAN_a1;
+		} else if (depth < 8) {
+			format = PIXMAN_a4;
+			depth = 4;
+		} else
+			format = PIXMAN_a8;
 
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
@@ -2511,15 +2517,22 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 								       0, 0,
 								       0, 0,
 								       width, height);
+						format = PIXMAN_a8;
+						depth = 8;
 						pixman_image_unref (a8);
 					}
 				}
 
 				pixman_image_unref(image);
 			}
+			if (format != PIXMAN_a8) {
+				screen->DestroyPixmap(scratch);
+				return;
+			}
 		} else {
 			scratch = sna_pixmap_create_unattached(screen,
-							       width, height, depth);
+							       width, height,
+							       depth);
 			if (!scratch)
 				return;
 
@@ -2532,6 +2545,7 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 					pixman_rasterize_trapezoid(image,
 								   (pixman_trapezoid_t *)traps,
 								   -bounds.x1, -bounds.y1);
+				pixman_image_unref(image);
 			}
 		}
 
commit 80567f61afe77a003e663b17c1fc6b6c3ed04042
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 14 10:40:38 2012 +0100

    sna/trapezoids: Do not reduce SRC to a clear pixmap to unbounded
    
    As we instruct the migration code to drop the clear when copying from
    the GPU to the CPU, we then need to emit the zeros during the span
    writing.
    
    Fixes some occassional corruption behind complex clip masks.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c06c29d..3bc3284 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4176,12 +4176,10 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 			return true;
 		if (priv->clear && priv->clear_color == 0xff)
 			op = PictOpSrc;
-		if ((color >> 24) == 0)
-			return true;
 		unbounded = true;
 		break;
 	case PictOpSrc:
-		unbounded = !(priv->clear && priv->clear_color == 0);
+		unbounded = true;
 		break;
 	default:
 		DBG(("%s: fallback -- can not perform op [%d] in place\n",
@@ -4270,7 +4268,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	DBG(("%s: move-to-cpu\n", __FUNCTION__));
 	region.data = NULL;
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region,
-					     op == PictOpSrc ? MOVE_WRITE : MOVE_WRITE | MOVE_READ))
+					     op == PictOpSrc ? MOVE_WRITE | MOVE_INPLACE_HINT : MOVE_WRITE | MOVE_READ))
 		return true;
 
 	get_drawable_deltas(dst->pDrawable, pixmap, &dst_x, &dst_y);
commit 067e9375809ea6cfa0c0b5e2159b359535c3b362
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 14 10:00:00 2012 +0100

    sna: Trim unused partial buffer uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8eee2b8..759860c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1091,6 +1091,17 @@ static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 	bo->reusable = true;
 }
 
+static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
+{
+	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+
+	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
+	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
+
+	if (ALIGN(bo->delta + bo->size.bytes, 64) == io->used)
+		io->used = bo->delta;
+}
+
 static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
@@ -2882,6 +2893,8 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->proxy) {
 		_list_del(&bo->vma);
 		_list_del(&bo->request);
+		if (bo->io && bo->exec == NULL)
+			_kgem_bo_delete_partial(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
 		free(bo);
commit effb0b9ec579624e38f0007cd7096e7da04048ca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 13 19:29:44 2012 +0100

    sna: Tweak usage of kgem_bo_can_map vs kgem_bo_mapped
    
    More often than not we only want to consider whether we can map the bo
    and decide whether doing so is the better option. Whether the bo is
    already mapped is not such an issue any more with the throttling.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 2931f8a..0c26630 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -445,7 +445,8 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 
 static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
+	DBG_HDR(("%s: map=%p, tiling=%d, domain=%d\n",
+		 __FUNCTION__, bo->map, bo->tiling, bo->domain));
 
 	if (bo->map == NULL)
 		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index c39b1f1..b4e59d9 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -531,7 +531,7 @@ static bool upload_inplace(struct kgem *kgem,
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) {
+	if (kgem_bo_is_busy(bo)) {
 		unsigned int bytes = 0;
 		while (n--) {
 			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
@@ -1148,7 +1148,7 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
 
-	if ((!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) &&
+	if ((!kgem_bo_can_map(kgem, bo) || kgem_bo_is_busy(bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
 		return true;
 
commit 37aa1e1291127ff4d27407fac936af64d0e353a8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 13 16:59:30 2012 +0100

    sna: Simplify partial buffer tracking
    
    As we only want to track partial buffers with asynchronous reuse,
    exclude all overs from the active buffers list. Secondly, keep the list
    in most-recently-used order rather than by size.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 04adbf4..8eee2b8 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -114,60 +114,6 @@ static inline int bytes(struct kgem_bo *bo)
 #define bucket(B) (B)->size.pages.bucket
 #define num_pages(B) (B)->size.pages.count
 
-#ifndef NDEBUG
-static bool validate_partials(struct kgem *kgem)
-{
-	struct kgem_partial_bo *bo, *next;
-
-	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
-		assert(next->base.list.prev == &bo->base.list);
-		assert(bo->base.refcnt >= 1);
-		assert(bo->base.io);
-
-		if (&next->base.list == &kgem->active_partials)
-			break;
-
-		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
-			ErrorF("active error: this rem: %d, next rem: %d\n",
-			       bytes(&bo->base) - bo->used,
-			       bytes(&next->base) - next->used);
-			goto err;
-		}
-	}
-
-	list_for_each_entry_safe(bo, next, &kgem->inactive_partials, base.list) {
-		assert(next->base.list.prev == &bo->base.list);
-		assert(bo->base.io);
-		assert(bo->base.refcnt == 1);
-
-		if (&next->base.list == &kgem->inactive_partials)
-			break;
-
-		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
-			ErrorF("inactive error: this rem: %d, next rem: %d\n",
-			       bytes(&bo->base) - bo->used,
-			       bytes(&next->base) - next->used);
-			goto err;
-		}
-	}
-
-	return true;
-
-err:
-	ErrorF("active partials:\n");
-	list_for_each_entry(bo, &kgem->active_partials, base.list)
-		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
-		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
-	ErrorF("inactive partials:\n");
-	list_for_each_entry(bo, &kgem->inactive_partials, base.list)
-		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
-		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
-	return false;
-}
-#else
-#define validate_partials(kgem) 1
-#endif
-
 static void kgem_sna_reset(struct kgem *kgem)
 {
 	struct sna *sna = container_of(kgem, struct sna, kgem);
@@ -643,8 +589,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
 
+	list_init(&kgem->batch_partials);
 	list_init(&kgem->active_partials);
-	list_init(&kgem->inactive_partials);
+	list_init(&kgem->cached_partials);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -1269,33 +1216,6 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
-static void bubble_sort_partial(struct list *head, struct kgem_partial_bo *bo)
-{
-	int remain = bytes(&bo->base) - bo->used;
-
-	while (bo->base.list.prev != head) {
-		struct kgem_partial_bo *p;
-
-		p = list_entry(bo->base.list.prev,
-			       struct kgem_partial_bo,
-			       base.list);
-		if (remain <= bytes(&p->base) - p->used)
-			break;
-
-		assert(p->base.list.next == &bo->base.list);
-		bo->base.list.prev = p->base.list.prev;
-		p->base.list.prev->next = &bo->base.list;
-		p->base.list.prev = &bo->base.list;
-
-		p->base.list.next = bo->base.list.next;
-		bo->base.list.next->prev = &p->base.list;
-		bo->base.list.next = &p->base.list;
-
-		assert(p->base.list.next->prev == &p->base.list);
-		assert(bo->base.list.prev->next == &bo->base.list);
-	}
-}
-
 static void kgem_partial_buffer_release(struct kgem *kgem,
 					struct kgem_partial_bo *bo)
 {
@@ -1314,55 +1234,40 @@ static void kgem_partial_buffer_release(struct kgem *kgem,
 	}
 }
 
-static void kgem_retire_partials(struct kgem *kgem)
+static bool kgem_retire__partials(struct kgem *kgem)
 {
-	struct kgem_partial_bo *bo, *next;
+	struct list *list[] = {
+		&kgem->active_partials,
+		&kgem->cached_partials,
+		NULL
+	}, **head = list;
+	bool retired = false;
 
-	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
-		assert(next->base.list.prev == &bo->base.list);
-		assert(bo->base.io);
+	do while (!list_is_empty(*head)) {
+		struct kgem_partial_bo *bo =
+			list_last_entry(*head,
+					struct kgem_partial_bo,
+					base.list);
 
 		if (bo->base.rq)
-			continue;
+			break;
 
 		DBG(("%s: releasing upload cache for handle=%d? %d\n",
 		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
+		list_del(&bo->base.list);
 		kgem_partial_buffer_release(kgem, bo);
+		kgem_bo_unref(kgem, &bo->base);
+		retired = true;
+	} while (*++head);
 
-		assert(bo->base.refcnt > 0);
-		if (bo->base.refcnt != 1)
-			continue;
-
-		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
-		     bo->base.handle, bo->used, bytes(&bo->base)));
-
-		assert(bo->base.refcnt == 1);
-		assert(bo->base.exec == NULL);
-		if (!bo->mmapped || bo->base.presumed_offset == 0) {
-			list_del(&bo->base.list);
-			kgem_bo_unref(kgem, &bo->base);
-			continue;
-		}
-
-		bo->base.dirty = false;
-		bo->base.needs_flush = false;
-		bo->used = 0;
-
-		DBG(("%s: transferring partial handle=%d to inactive\n",
-		     __FUNCTION__, bo->base.handle));
-		list_move_tail(&bo->base.list, &kgem->inactive_partials);
-		bubble_sort_partial(&kgem->inactive_partials, bo);
-	}
-	assert(validate_partials(kgem));
+	return retired;
 }
 
-bool kgem_retire(struct kgem *kgem)
+static bool kgem_retire__flushing(struct kgem *kgem)
 {
 	struct kgem_bo *bo, *next;
 	bool retired = false;
 
-	DBG(("%s\n", __FUNCTION__));
-
 	list_for_each_entry_safe(bo, next, &kgem->flushing, request) {
 		assert(bo->refcnt == 0);
 		assert(bo->rq == &_kgem_static_request);
@@ -1385,6 +1290,14 @@ bool kgem_retire(struct kgem *kgem)
 		retired = true;
 	}
 
+	return retired;
+}
+
+static bool kgem_retire__requests(struct kgem *kgem)
+{
+	struct kgem_bo *bo;
+	bool retired = false;
+
 	while (!list_is_empty(&kgem->requests)) {
 		struct kgem_request *rq;
 
@@ -1461,10 +1374,21 @@ bool kgem_retire(struct kgem *kgem)
 		free(rq);
 	}
 
-	kgem_retire_partials(kgem);
+	return retired;
+}
+
+bool kgem_retire(struct kgem *kgem)
+{
+	bool retired = false;
+
+	DBG(("%s\n", __FUNCTION__));
+
+	retired |= kgem_retire__flushing(kgem);
+	retired |= kgem_retire__requests(kgem);
+	retired |= kgem_retire__partials(kgem);
 
-	kgem->need_retire = !list_is_empty(&kgem->requests);
 	DBG(("%s -- need_retire=%d\n", __FUNCTION__, kgem->need_retire));
+	kgem->need_retire = !list_is_empty(&kgem->requests);
 
 	kgem->retire(kgem);
 
@@ -1548,7 +1472,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+	list_for_each_entry_safe(bo, next, &kgem->batch_partials, base.list) {
 		DBG(("%s: partial handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
 		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
 		     bo->write, bo->mmapped));
@@ -1570,9 +1494,12 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 		if (bo->mmapped) {
 			assert(!bo->need_io);
-			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
+			if (bo->used + PAGE_SIZE <= bytes(&bo->base) &&
+			    (kgem->has_llc || !IS_CPU_MAP(bo->base.map))) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
+				list_move(&bo->base.list,
+					  &kgem->active_partials);
 				continue;
 			}
 			goto decouple;
@@ -1650,11 +1577,13 @@ static void kgem_finish_partials(struct kgem *kgem)
 decouple:
 		DBG(("%s: releasing handle=%d\n",
 		     __FUNCTION__, bo->base.handle));
-		list_del(&bo->base.list);
-		kgem_bo_unref(kgem, &bo->base);
+		if (!list_is_empty(&bo->base.vma)) {
+			list_move(&bo->base.list, &kgem->cached_partials);
+		} else {
+			list_del(&bo->base.list);
+			kgem_bo_unref(kgem, &bo->base);
+		}
 	}
-
-	assert(validate_partials(kgem));
 }
 
 static void kgem_cleanup(struct kgem *kgem)
@@ -1987,25 +1916,6 @@ void kgem_throttle(struct kgem *kgem)
 	kgem->need_throttle = 0;
 }
 
-static void kgem_expire_partial(struct kgem *kgem)
-{
-	kgem_retire_partials(kgem);
-	while (!list_is_empty(&kgem->inactive_partials)) {
-		struct kgem_partial_bo *bo =
-			list_first_entry(&kgem->inactive_partials,
-					 struct kgem_partial_bo,
-					 base.list);
-
-		DBG(("%s: discarding unused partial buffer: %d, last write? %d\n",
-		     __FUNCTION__, bytes(&bo->base), bo->write));
-		assert(bo->base.list.prev == &kgem->inactive_partials);
-		assert(bo->base.io);
-		assert(bo->base.refcnt == 1);
-		list_del(&bo->base.list);
-		kgem_bo_unref(kgem, &bo->base);
-	}
-}
-
 void kgem_purge_cache(struct kgem *kgem)
 {
 	struct kgem_bo *bo, *next;
@@ -2042,8 +1952,6 @@ bool kgem_expire_cache(struct kgem *kgem)
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
 
-	kgem_expire_partial(kgem);
-
 	if (kgem->need_purge)
 		kgem_purge_cache(kgem);
 
@@ -2136,7 +2044,6 @@ void kgem_cleanup_cache(struct kgem *kgem)
 
 	kgem_retire(kgem);
 	kgem_cleanup(kgem);
-	kgem_expire_partial(kgem);
 
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
 		while (!list_is_empty(&kgem->inactive[i]))
@@ -2967,22 +2874,6 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 	return NULL;
 }
 
-static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
-{
-	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
-
-	if (list_is_empty(&io->base.list))
-		return;
-
-	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
-	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
-
-	if (ALIGN(bo->delta + bo->size.bytes, 64) == io->used) {
-		io->used = bo->delta;
-		bubble_sort_partial(&kgem->active_partials, io);
-	}
-}
-
 void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d, proxy? %d\n",
@@ -2991,8 +2882,6 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->proxy) {
 		_list_del(&bo->vma);
 		_list_del(&bo->request);
-		if (bo->io && (bo->exec == NULL || bo->proxy->rq == NULL))
-			_kgem_bo_delete_partial(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
 		free(bo);
@@ -3651,7 +3540,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (kgem->has_llc)
 		flags &= ~KGEM_BUFFER_INPLACE;
 
-	list_for_each_entry(bo, &kgem->active_partials, base.list) {
+	list_for_each_entry(bo, &kgem->batch_partials, base.list) {
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
 
@@ -3668,7 +3557,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->write = 0;
 			offset = 0;
 			bo->used = size;
-			bubble_sort_partial(&kgem->active_partials, bo);
 			goto done;
 		}
 
@@ -3689,11 +3577,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 
-		if (bo->used && bo->base.rq == NULL && bo->base.refcnt == 1) {
-			bo->used = 0;
-			bubble_sort_partial(&kgem->active_partials, bo);
-		}
-
 		if (bo->used + size <= bytes(&bo->base)) {
 			DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
 			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
@@ -3701,42 +3584,30 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->used += size;
 			goto done;
 		}
-
-		DBG(("%s: too small (%d < %d)\n",
-		     __FUNCTION__, bytes(&bo->base) - bo->used, size));
-		break;
 	}
 
 	if (flags & KGEM_BUFFER_WRITE) {
-		do list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
+		list_for_each_entry(bo, &kgem->active_partials, base.list) {
 			assert(bo->base.io);
-			assert(bo->base.refcnt == 1);
+			assert(bo->base.refcnt >= 1);
+			assert(bo->mmapped);
+			assert(!bo->base.vmap);
 
-			if (size > bytes(&bo->base))
-				continue;
-
-			if (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
-			    !bo->base.vmap) {
+			if ((bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
 				     __FUNCTION__, bo->write, flags));
 				continue;
 			}
 
-			DBG(("%s: reusing inactive partial buffer? size=%d, total=%d\n",
-			     __FUNCTION__, size, bytes(&bo->base)));
-			offset = 0;
-			bo->used = size;
-			list_move(&bo->base.list, &kgem->active_partials);
-
-			if (bo->mmapped) {
-				if (IS_CPU_MAP(bo->base.map))
-					kgem_bo_sync__cpu(kgem, &bo->base);
-				else
-					kgem_bo_sync__gtt(kgem, &bo->base);
+			if (bo->used + size <= bytes(&bo->base)) {
+				DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
+				     __FUNCTION__, bo->used, size, bytes(&bo->base)));
+				offset = bo->used;
+				bo->used += size;
+				list_move(&bo->base.list, &kgem->batch_partials);
+				goto done;
 			}
-
-			goto done;
-		} while (__kgem_throttle_retire(kgem, 0));
+		}
 	}
 
 #if !DBG_NO_MAP_UPLOAD
@@ -4005,32 +3876,13 @@ init:
 	offset = 0;
 
 	assert(list_is_empty(&bo->base.list));
-	list_add(&bo->base.list, &kgem->active_partials);
+	list_add(&bo->base.list, &kgem->batch_partials);
+
 	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
 
 done:
 	bo->used = ALIGN(bo->used, 64);
-	/* adjust the position within the list to maintain decreasing order */
-	alloc = bytes(&bo->base) - bo->used;
-	{
-		struct kgem_partial_bo *p, *first;
-
-		first = p = list_first_entry(&bo->base.list,
-					     struct kgem_partial_bo,
-					     base.list);
-		while (&p->base.list != &kgem->active_partials &&
-		       alloc < bytes(&p->base) - p->used) {
-			DBG(("%s: this=%d, right=%d\n",
-			     __FUNCTION__, alloc, bytes(&p->base) -p->used));
-			p = list_first_entry(&p->base.list,
-					     struct kgem_partial_bo,
-					     base.list);
-		}
-		if (p != first)
-			list_move_tail(&bo->base.list, &p->base.list);
-		assert(validate_partials(kgem));
-	}
 	assert(bo->mem);
 	*ret = (char *)bo->mem + offset;
 	return kgem_create_proxy(&bo->base, offset, size);
@@ -4082,7 +3934,6 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 			DBG(("%s: trimming partial buffer from %d to %d\n",
 			     __FUNCTION__, io->used, min));
 			io->used = min;
-			bubble_sort_partial(&kgem->active_partials, io);
 		}
 		bo->size.bytes -= stride;
 	}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 4958eff..2931f8a 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list active_partials, inactive_partials;
+	struct list batch_partials, active_partials, cached_partials;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
commit 7048a71fb6ff5e7c37e21a794f7b42d9a1da3473
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 13 15:42:28 2012 +0100

    sna: Flush batch if GPU is idle upon wakeup
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1467fa3..776cba3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12334,10 +12334,7 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
-	if (!sna->kgem.need_retire) {
-		kgem_submit(&sna->kgem);
-		sna->kgem.flush_now = 0;
-	}
+	sna_accel_wakeup_handler(sna, NULL);
 
 	if (sna_accel_do_flush(sna))
 		sna_accel_flush(sna);
@@ -12383,6 +12380,10 @@ void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 {
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
+	if (!sna->kgem.need_retire) {
+		kgem_submit(&sna->kgem);
+		sna->kgem.flush_now = 0;
+	}
 	if (sna->kgem.need_purge)
 		kgem_purge_cache(&sna->kgem);
 }
commit 12e340c4b8efa70f3305f489af30461b7c0e5edd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 13 11:01:35 2012 +0100

    sna: Optimise kgem_clear_dirty() by keeping dirty buffers at the front
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2abf9d5..04adbf4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -974,7 +974,7 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 	bo->exec = kgem_add_handle(kgem, bo);
 	bo->rq = kgem->next_request;
 
-	list_move(&bo->request, &kgem->next_request->buffers);
+	list_move_tail(&bo->request, &kgem->next_request->buffers);
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
@@ -3164,7 +3164,8 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			delta += bo->delta;
 			assert(bo->handle == bo->proxy->handle);
 			/* need to release the cache upon batch submit */
-			list_move(&bo->request, &kgem->next_request->buffers);
+			list_move_tail(&bo->request,
+				       &kgem->next_request->buffers);
 			bo->exec = &_kgem_dummy_exec;
 			bo = bo->proxy;
 		}
@@ -3193,6 +3194,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			DBG(("%s: marking handle=%d dirty\n",
 			     __FUNCTION__, bo->handle));
 			bo->needs_flush = bo->dirty = true;
+			list_move(&bo->request, &kgem->next_request->buffers);
 		}
 
 		delta += bo->presumed_offset;
@@ -3582,8 +3584,12 @@ void kgem_clear_dirty(struct kgem *kgem)
 	struct kgem_request *rq = kgem->next_request;
 	struct kgem_bo *bo;
 
-	list_for_each_entry(bo, &rq->buffers, request)
+	list_for_each_entry(bo, &rq->buffers, request) {
+		if (!bo->dirty)
+			break;
+
 		bo->dirty = false;
+	}
 }
 
 struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
commit b0dfd8223adee87e28b391a019effa7bb3630533
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 13 08:33:23 2012 +0100

    sna: Flush the batch before blocking if the GPU is idle
    
    If we have work pending for the GPU and we believe it is idle, just
    submit the batch in order to improve GPU/CPU concurrency and reduce
    latency.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b8ca214..1467fa3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12334,6 +12334,11 @@ void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
+	if (!sna->kgem.need_retire) {
+		kgem_submit(&sna->kgem);
+		sna->kgem.flush_now = 0;
+	}
+
 	if (sna_accel_do_flush(sna))
 		sna_accel_flush(sna);
 
commit 6924fc525d6bc82901cfed769c176b44c0bce024
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 11 14:04:09 2012 +0100

    sna: Fix off-by-one in computation of width for inplace trapezoids
    
    This lead to the dropping of the last pixel for small area trapezoids,
    such as the right hand outline of buttons under ClearLooks.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48320
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 120e755..c06c29d 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1581,11 +1581,11 @@ inplace_subrow(struct active_list *active, int8_t *row,
 					if (xstart < FAST_SAMPLES_X * width) {
 						FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
 						row[ix] -= FAST_SAMPLES_X - fx;
-						if (fx && ix + 1< width)
+						if (fx && ix + 1 < width)
 							row[++ix] -= fx;
 
-						if (ix > *max)
-							*max = ix;
+						if (ix >= *max)
+							*max = ix + 1;
 
 						xstart = INT_MIN;
 					} else
@@ -1622,6 +1622,7 @@ inplace_subrow(struct active_list *active, int8_t *row,
 		} else {
 			edge->prev->next = next;
 			next->prev = edge->prev;
+			active->min_height = -1;
 		}
 
 		edge = next;
commit a58a543e84ed15c41f4fa7644be3ba7865d31b92
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 10 12:59:55 2012 +0100

    sna: handle vmap creation failures gracefully
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f6a34ba..2abf9d5 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3451,8 +3451,11 @@ static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 	if (read_only)
 		vmap.flags |= I915_VMAP_READ_ONLY;
 
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_VMAP, &vmap))
+	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_VMAP, &vmap)) {
+		DBG(("%s: failed to map %p + %d bytes: %d\n",
+		     __FUNCTION__, ptr, size, errno));
 		return 0;
+	}
 
 	return vmap.handle;
 }
@@ -3764,9 +3767,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			free(old);
 			bo->base.refcnt = 1;
 		} else {
-			if (!__kgem_bo_init(&bo->base,
-					    gem_create(kgem->fd, alloc),
-					    alloc)) {
+			uint32_t handle = gem_create(kgem->fd, alloc);
+			if (handle == 0 ||
+			    !__kgem_bo_init(&bo->base, handle, alloc)) {
 				free(bo);
 				return NULL;
 			}
@@ -3874,23 +3877,23 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (kgem->has_vmap) {
 		bo = partial_bo_alloc(alloc);
 		if (bo) {
-			if (!__kgem_bo_init(&bo->base,
-					    gem_vmap(kgem->fd, bo->mem,
-						     alloc * PAGE_SIZE, false),
-					    alloc)) {
+			uint32_t handle = gem_vmap(kgem->fd, bo->mem,
+						   alloc * PAGE_SIZE, false);
+			if (handle == 0 ||
+			    !__kgem_bo_init(&bo->base, handle, alloc)) {
 				free(bo);
-				return NULL;
-			}
-
-			DBG(("%s: created vmap handle=%d for buffer\n",
-			     __FUNCTION__, bo->base.handle));
+				bo = NULL;
+			} else {
+				DBG(("%s: created vmap handle=%d for buffer\n",
+				     __FUNCTION__, bo->base.handle));
 
-			bo->need_io = false;
-			bo->base.io = true;
-			bo->base.vmap = true;
-			bo->mmapped = true;
+				bo->need_io = false;
+				bo->base.io = true;
+				bo->base.vmap = true;
+				bo->mmapped = true;
 
-			goto init;
+				goto init;
+			}
 		}
 	}
 
@@ -3942,9 +3945,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			free(old);
 			bo->base.refcnt = 1;
 		} else {
-			if (!__kgem_bo_init(&bo->base,
-					    gem_create(kgem->fd, alloc),
-					    alloc)) {
+			uint32_t handle = gem_create(kgem->fd, alloc);
+			if (handle == 0 ||
+			    !__kgem_bo_init(&bo->base, handle, alloc)) {
 				free(bo);
 				return NULL;
 			}
commit 9af651fdf93f6ccdbcb147cb16989f84c2d0ebdc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 10 10:57:56 2012 +0100

    sna: Include some DBG for segment drawing
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 636efdd..b8ca214 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7188,8 +7188,8 @@ rectangle_continue:
 				b->y1 = y1;
 				dirty = false;
 				while (length--) {
-					e += e1;
 					dirty = true;
+					e += e1;
 					if (e >= 0) {
 						e += e3;
 
@@ -7200,6 +7200,10 @@ rectangle_continue:
 							b->x2 = x1 + 1;
 						b->y2 = b->y1 + 1;
 
+						DBG(("%s: horizontal step: (%d, %d), box: (%d, %d), (%d, %d)\n",
+						     __FUNCTION__, x1, y1,
+						     b->x1, b->y1, b->x2, b->y2));
+
 						if (++b == last_box) {
 							ret = &&X_continue;
 							goto *jump;
@@ -7215,6 +7219,8 @@ X_continue:
 				}
 				if (dirty) {
 					x1 -= sdx;
+					DBG(("%s: horizontal tail: (%d, %d)\n",
+					     __FUNCTION__, x1, y1));
 					if (sdx < 0) {
 						b->x2 = b->x1 + 1;
 						b->x1 = x1;
commit 3aa98289e3a2f59633ac5f1a5f64db4ee55609d9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 10 10:31:03 2012 +0100

    sna: Correct adjustment of LineCap for rectilinear segments
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3233f8a..636efdd 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6848,6 +6848,44 @@ sna_poly_line__cpu(DrawablePtr drawable, GCPtr gc,
 	fbPolyLine(drawable, gc, mode, n, pt);
 }
 
+static inline void box_from_seg(BoxPtr b, xSegment *seg, GCPtr gc)
+{
+	if (seg->x1 == seg->x2) {
+		if (seg->y1 > seg->y2) {
+			b->y2 = seg->y1 + 1;
+			b->y1 = seg->y2 + 1;
+			if (gc->capStyle != CapNotLast)
+				b->y1--;
+		} else {
+			b->y1 = seg->y1;
+			b->y2 = seg->y2;
+			if (gc->capStyle != CapNotLast)
+				b->y2++;
+		}
+		b->x1 = seg->x1;
+		b->x2 = seg->x1 + 1;
+	} else {
+		if (seg->x1 > seg->x2) {
+			b->x2 = seg->x1 + 1;
+			b->x1 = seg->x2 + 1;
+			if (gc->capStyle != CapNotLast)
+				b->x1--;
+		} else {
+			b->x1 = seg->x1;
+			b->x2 = seg->x2;
+			if (gc->capStyle != CapNotLast)
+				b->x2++;
+		}
+		b->y1 = seg->y1;
+		b->y2 = seg->y1 + 1;
+	}
+
+	DBG(("%s: seg=(%d,%d),(%d,%d); box=(%d,%d),(%d,%d)\n",
+	     __FUNCTION__,
+	     seg->x1, seg->y1, seg->x2, seg->y2,
+	     b->x1, b->y1, b->x2, b->y2));
+}
+
 static Bool
 sna_poly_segment_blt(DrawablePtr drawable,
 		     struct kgem_bo *bo,
@@ -6880,35 +6918,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 					nbox = ARRAY_SIZE(boxes);
 				n -= nbox;
 				do {
-					if (seg->x1 <= seg->x2) {
-						b->x1 = seg->x1;
-						b->x2 = seg->x2;
-					} else {
-						b->x1 = seg->x2;
-						b->x2 = seg->x1;
-					}
-					b->x2++;
-
-					if (seg->y1 <= seg->y2) {
-						b->y1 = seg->y1;
-						b->y2 = seg->y2;
-					} else {
-						b->y1 = seg->y2;
-						b->y2 = seg->y1;
-					}
-					b->y2++;
-
-					/* don't paint last pixel */
-					if (gc->capStyle == CapNotLast) {
-						if (seg->x1 == seg->x2)
-							b->y2--;
-						else
-							b->x2--;
-					}
-
-					/* XXX does a degenerate segment
-					 * become a point?
-					 */
+					box_from_seg(b, seg, gc);
 					if (b->y2 > b->y1 && b->x2 > b->x1) {
 						b->x1 += dx;
 						b->x2 += dx;
@@ -6933,32 +6943,7 @@ sna_poly_segment_blt(DrawablePtr drawable,
 					nbox = ARRAY_SIZE(boxes);
 				n -= nbox;
 				do {
-					if (seg->x1 <= seg->x2) {
-						b->x1 = seg->x1;
-						b->x2 = seg->x2;
-					} else {
-						b->x1 = seg->x2;
-						b->x2 = seg->x1;
-					}
-					b->x2++;
-
-					if (seg->y1 <= seg->y2) {
-						b->y1 = seg->y1;
-						b->y2 = seg->y2;
-					} else {
-						b->y1 = seg->y2;
-						b->y2 = seg->y1;
-					}
-					b->y2++;
-
-					/* don't paint last pixel */
-					if (gc->capStyle == CapNotLast) {
-						if (seg->x1 == seg->x2)
-							b->y2--;
-						else
-							b->x2--;
-					}
-
+					box_from_seg(b, seg, gc);
 					if (b->y2 > b->y1 && b->x2 > b->x1)
 						b++;
 					seg++;
@@ -6985,41 +6970,13 @@ sna_poly_segment_blt(DrawablePtr drawable,
 			const BoxRec * const clip_end = clip_start + clip.data->numRects;
 			const BoxRec *c;
 			do {
-				int x, y, width, height;
 				BoxRec box;
 
-				if (seg->x1 < seg->x2) {
-					x = seg->x1;
-					width = seg->x2;
-				} else {
-					x = seg->x2;
-					width = seg->x1;
-				}
-				width -= x - 1;
-
-				if (seg->y1 < seg->y2) {
-					y = seg->y1;
-					height = seg->y2;
-				} else {
-					y = seg->y2;
-					height = seg->y1;
-				}
-				height -= y - 1;
-
-				/* don't paint last pixel */
-				if (gc->capStyle == CapNotLast) {
-					if (width == 1)
-						height--;
-					else
-						width--;
-				}
-
-				DBG(("%s: [%d] (%d, %d)x(%d, %d) + (%d, %d)\n", __FUNCTION__, n,
-				     x, y, width, height, dx+drawable->x, dy+drawable->y));
-				box.x1 = x + drawable->x;
-				box.x2 = box.x1 + width;
-				box.y1 = y + drawable->y;
-				box.y2 = box.y1 + height;
+				box_from_seg(b, seg, gc);
+				box.x1 += drawable->x;
+				box.x2 += drawable->x;
+				box.y1 += drawable->y;
+				box.y2 += drawable->y;
 				c = find_clip_box_for_y(clip_start,
 							clip_end,
 							box.y1);
@@ -7046,42 +7003,11 @@ sna_poly_segment_blt(DrawablePtr drawable,
 			} while (--n);
 		} else {
 			do {
-				int x, y, width, height;
-
-				if (seg->x1 < seg->x2) {
-					x = seg->x1;
-					width = seg->x2;
-				} else {
-					x = seg->x2;
-					width = seg->x1;
-				}
-				width -= x - 1;
-
-				if (seg->y1 < seg->y2) {
-					y = seg->y1;
-					height = seg->y2;
-				} else {
-					y = seg->y2;
-					height = seg->y1;
-				}
-				height -= y - 1;
-
-				/* don't paint last pixel */
-				if (gc->capStyle == CapNotLast) {
-					if (width == 1)
-						height--;
-					else
-						width--;
-				}
-
-				DBG(("%s: [%d] (%d, %d)x(%d, %d) + (%d, %d)\n", __FUNCTION__, n,
-				     x, y, width, height, dx+drawable->x, dy+drawable->y));
-
-				b->x1 = x + drawable->x;
-				b->x2 = b->x1 + width;
-				b->y1 = y + drawable->y;
-				b->y2 = b->y1 + height;
-
+				box_from_seg(b, seg, gc);
+				b->x1 += drawable->x;
+				b->x2 += drawable->x;
+				b->y1 += drawable->y;
+				b->y2 += drawable->y;
 				if (box_intersect(b, &clip.extents)) {
 					b->x1 += dx;
 					b->x2 += dx;
commit ad69316ab1054c5e4b56c19b8eec87b9f86939e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 10 10:03:49 2012 +0100

    sna: Include a small amount of timer-slack to avoid very short sleeps
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3013e3d..3233f8a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12105,7 +12105,7 @@ static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 			int32_t delta = sna->timer_expire[i] - now;
 			DBG(("%s: timer[%d] expires in %d [%d]\n",
 			     __FUNCTION__, i, delta, sna->timer_expire[i]));
-			if (delta <= 0)
+			if (delta <= 3)
 				sna->timer_ready |= 1 << i;
 			else if (next == 0 || delta < next)
 				next = delta;
commit 29d1d409b259ba0c85947ef5eef903ea527aa118
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 10 09:54:53 2012 +0100

    sna: Avoid doubling the final step of poly lines
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5f4e74f..3013e3d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6103,6 +6103,7 @@ Y_continue:
 				}
 
 				if (dirty) {
+					y -= sdy;
 					if (sdy < 0) {
 						b->y2 = b->y1 + 1;
 						b->y1 = y;
@@ -7371,6 +7372,7 @@ Y_continue:
 				}
 
 				if (dirty) {
+					y1 -= sdy;
 					if (sdy < 0) {
 						b->y2 = b->y1 + 1;
 						b->y1 = y1;
commit a83d90ee61be44a2a36b56ad24bbc6544320448f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 9 20:34:52 2012 +0100

    sna: Avoid randomly changing domains of active bo
    
    After attaching the bo to the scanout, mark it as retired in order to
    update its domains so that the assertion during retirement later is
    correct.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49526
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6703e7e..f6a34ba 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -342,7 +342,7 @@ kgem_busy(struct kgem *kgem, int handle)
 	return busy.busy;
 }
 
-static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
+void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d, domain=%d\n",
 	     __FUNCTION__, bo->handle, bo->domain));
@@ -359,6 +359,8 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 		list_del(&bo->request);
 		bo->needs_flush = bo->flush;
 	}
+
+	bo->domain = DOMAIN_NONE;
 }
 
 Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
@@ -375,7 +377,6 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 	DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain));
 	kgem_bo_retire(kgem, bo);
-	bo->domain = DOMAIN_NONE;
 	return TRUE;
 }
 
@@ -4171,7 +4172,6 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		kgem_bo_map__cpu(kgem, &bo->base);
 	}
 	kgem_bo_retire(kgem, &bo->base);
-	bo->base.domain = DOMAIN_NONE;
 }
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 5a4bf75..4958eff 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -243,6 +243,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
 void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
 
+void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo);
 bool kgem_retire(struct kgem *kgem);
 
 void _kgem_submit(struct kgem *kgem);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 65d1992..3f8beea 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -495,7 +495,7 @@ sna_crtc_restore(struct sna *sna)
 			return;
 	}
 
-	bo->domain = DOMAIN_NONE;
+	kgem_bo_retire(&sna->kgem, bo);
 	scrn->displayWidth = bo->pitch / sna->mode.cpp;
 	sna->mode.fb_pixmap = sna->front->drawable.serialNumber;
 }
@@ -659,7 +659,6 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	int saved_x, saved_y;
 	Rotation saved_rotation;
 	DisplayModeRec saved_mode;
-	int ret = TRUE;
 
 	DBG(("%s(rotation=%d, x=%d, y=%d, mode=%dx%d@%d)\n",
 	     __FUNCTION__, rotation, x, y,
@@ -690,7 +689,6 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		DBG(("%s: handle %d attached to fb %d\n",
 		     __FUNCTION__, bo->handle, sna_mode->fb_id));
 
-		bo->domain = DOMAIN_NONE;
 		sna_mode->fb_pixmap = sna->front->drawable.serialNumber;
 	}
 
@@ -797,7 +795,6 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 
 	DBG(("%s: attached handle %d to fb %d\n",
 	     __FUNCTION__, bo->handle, sna_crtc->shadow_fb_id));
-	bo->domain = DOMAIN_NONE;
 	return sna_crtc->shadow = shadow;
 }
 
@@ -810,7 +807,6 @@ sna_crtc_shadow_create(xf86CrtcPtr crtc, void *data, int width, int height)
 static void
 sna_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr pixmap, void *data)
 {
-	struct sna *sna = to_sna(crtc->scrn);
 	struct sna_crtc *sna_crtc = crtc->driver_private;
 
 	/* We may have not called shadow_create() on the data yet and
@@ -1727,7 +1723,8 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		if (!sna_crtc_apply(crtc))
 			goto fail;
 	}
-	bo->domain = DOMAIN_NONE;
+
+	kgem_bo_retire(&sna->kgem, bo);
 
 	scrn->virtualX = width;
 	scrn->virtualY = height;
@@ -1859,9 +1856,7 @@ sna_page_flip(struct sna *sna,
 	 */
 	count = do_page_flip(sna, data, ref_crtc_hw_id);
 	DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
-	if (count)
-		bo->domain = DOMAIN_NONE;
-	else
+	if (count == 0)
 		mode->fb_id = *old_fb;
 
 	return count;
commit d5200510b8f2a0ccb2e0c45dfebc659f8360780c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 9 17:58:58 2012 +0100

    sna: Respect the error terms when reversing the PolyLine DDA
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d52328d..5f4e74f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5948,6 +5948,7 @@ rectangle_continue:
 				}
 			} else if (adx >= ady) {
 				int x2_clipped = x2, y2_clipped = y2;
+				bool dirty;
 
 				/* X-major segment */
 				e1 = ady << 1;
@@ -5985,43 +5986,54 @@ rectangle_continue:
 				e3 = e2 - e1;
 				e  = e - e1;
 
-				if (sdx < 0) {
-					x = x2_clipped;
-					y = y2_clipped;
-					sdy = -sdy;
-				}
-
 				b->x1 = x;
-				b->y2 = b->y1 = y;
+				b->y1 = y;
+				dirty = false;
 				while (length--) {
 					e += e1;
-					x++;
+					dirty = true;
 					if (e >= 0) {
-						b->x2 = x;
-						b->y2++;
+						e += e3;
+
+						if (sdx < 0) {
+							b->x2 = b->x1 + 1;
+							b->x1 = x;
+						} else
+							b->x2 = x + 1;
+						b->y2 = b->y1 + 1;
+
 						if (++b == last_box) {
 							ret = &&X_continue;
 							goto *jump;
 X_continue:
 							b = box;
 						}
-						y += sdy;
-						e += e3;
-						b->y2 = b->y1 = y;
-						b->x1 = x;
+
+						b->x1 = x + sdx;
+						b->y1 = y += sdy;
+						dirty = false;
 					}
+					x += sdx;
 				}
+				if (dirty) {
+					x -= sdx;
+					if (sdx < 0) {
+						b->x2 = b->x1 + 1;
+						b->x1 = x;
+					} else
+						b->x2 = x + 1;
+					b->y2 = b->y1 + 1;
 
-				b->x2 = ++x;
-				b->y2++;
-				if (++b == last_box) {
-					ret = &&X_continue2;
-					goto *jump;
-X_continue2:
-					b = box;
+					if (++b == last_box) {
+						ret = &&X2_continue;
+						goto *jump;
+X2_continue:
+						b = box;
+					}
 				}
 			} else {
 				int x2_clipped = x2, y2_clipped = y2;
+				bool dirty;
 
 				/* Y-major segment */
 				e1 = adx << 1;
@@ -6060,62 +6072,50 @@ X_continue2:
 				e3 = e2 - e1;
 				e  = e - e1;
 
-				if (sdx < 0) {
-					x = x2_clipped;
-					y = y2_clipped;
-					sdy = -sdy;
-				}
+				b->x1 = x;
+				b->y1 = y;
+				dirty = false;
+				while (length--) {
+					e += e1;
+					dirty = true;
+					if (e >= 0) {
+						e += e3;
 
-				b->x2 = b->x1 = x;
-				if (sdy < 0) {
-					b->y2 = y + 1;
-					while (length--) {
-						e += e1;
-						y--;
-						if (e >= 0) {
+						if (sdy < 0) {
+							b->y2 = b->y1 + 1;
 							b->y1 = y;
-							b->x2++;
-							if (++b == last_box) {
-								ret = &&Y_up_continue;
-								goto *jump;
-Y_up_continue:
-								b = box;
-							}
-							e += e3;
-							b->x2 = b->x1 = ++x;
-							b->y2 = y;
-						}
-					}
+						} else
+							b->y2 = y + 1;
+						b->x2 = x + 1;
 
-					b->y1 = y;
-				} else {
-					b->y1 = y;
-					while (length--) {
-						e += e1;
-						y++;
-						if (e >= 0) {
-							b->y2 = y;
-							b->x2++;
-							if (++b == last_box) {
-								ret = &&Y_down_continue;
-								goto *jump;
-Y_down_continue:
-								b = box;
-							}
-							e += e3;
-							b->x2 = b->x1 = ++x;
-							b->y1 = y;
+						if (++b == last_box) {
+							ret = &&Y_continue;
+							goto *jump;
+Y_continue:
+							b = box;
 						}
-					}
 
-					b->y2 = ++y;
+						b->x1 = x += sdx;
+						b->y1 = y + sdy;
+						dirty = false;
+					}
+					y += sdy;
 				}
-				b->x2++;
-				if (++b == last_box) {
-					ret = &&Y_continue2;
-					goto *jump;
-Y_continue2:
-					b = box;
+
+				if (dirty) {
+					if (sdy < 0) {
+						b->y2 = b->y1 + 1;
+						b->y1 = y;
+					} else
+						b->y2 = y + 1;
+					b->x2 = x + 1;
+
+					if (++b == last_box) {
+						ret = &&Y2_continue;
+						goto *jump;
+Y2_continue:
+						b = box;
+					}
 				}
 			}
 		}
@@ -7222,6 +7222,8 @@ rectangle_continue:
 					b = box;
 				}
 			} else if (adx >= ady) {
+				bool dirty;
+
 				/* X-major segment */
 				e1 = ady << 1;
 				e2 = e1 - (adx << 1);
@@ -7255,42 +7257,54 @@ rectangle_continue:
 				e3 = e2 - e1;
 				e  = e - e1;
 
-				if (sdx < 0) {
-					x1 = x2;
-					y1 = y2;
-					sdy = -sdy;
-				}
-
 				b->x1 = x1;
-				b->y2 = b->y1 = y1;
-				while (--length) {
+				b->y1 = y1;
+				dirty = false;
+				while (length--) {
 					e += e1;
-					x1++;
+					dirty = true;
 					if (e >= 0) {
-						b->x2 = x1;
-						b->y2++;
+						e += e3;
+
+						if (sdx < 0) {
+							b->x2 = b->x1 + 1;
+							b->x1 = x1;
+						} else
+							b->x2 = x1 + 1;
+						b->y2 = b->y1 + 1;
+
 						if (++b == last_box) {
 							ret = &&X_continue;
 							goto *jump;
 X_continue:
 							b = box;
 						}
-						y1 += sdy;
-						e += e3;
-						b->y2 = b->y1 = y1;
-						b->x1 = x1;
+
+						b->x1 = x1 + sdx;
+						b->y1 = y1 += sdy;
+						dirty = false;
 					}
+					x1 += sdx;
 				}
+				if (dirty) {
+					x1 -= sdx;
+					if (sdx < 0) {
+						b->x2 = b->x1 + 1;
+						b->x1 = x1;
+					} else
+						b->x2 = x1 + 1;
+					b->y2 = b->y1 + 1;
 
-				b->x2 = ++x1;
-				b->y2++;
-				if (++b == last_box) {
-					ret = &&X_continue2;
-					goto *jump;
-X_continue2:
-					b = box;
+					if (++b == last_box) {
+						ret = &&X2_continue;
+						goto *jump;
+X2_continue:
+						b = box;
+					}
 				}
 			} else {
+				bool dirty;
+
 				/* Y-major segment */
 				e1 = adx << 1;
 				e2 = e1 - (ady << 1);
@@ -7326,62 +7340,50 @@ X_continue2:
 				e3 = e2 - e1;
 				e  = e - e1;
 
-				if (sdx < 0) {
-					x1 = x2;
-					y1 = y2;
-					sdy = -sdy;
-				}
+				b->x1 = x1;
+				b->y1 = y1;
+				dirty = false;
+				while (length--) {
+					e += e1;
+					dirty = true;
+					if (e >= 0) {
+						e += e3;
 
-				b->x2 = b->x1 = x1;
-				if (sdy < 0) {
-					b->y2 = y1 + 1;
-					while (--length) {
-						e += e1;
-						y1--;
-						if (e >= 0) {
+						if (sdy < 0) {
+							b->y2 = b->y1 + 1;
 							b->y1 = y1;
-							b->x2++;
-							if (++b == last_box) {
-								ret = &&Y_up_continue;
-								goto *jump;
-Y_up_continue:
-								b = box;
-							}
-							e += e3;
-							b->x2 = b->x1 = ++x1;
-							b->y2 = y1;
-						}
-					}
+						} else
+							b->y2 = y1 + 1;
+						b->x2 = x1 + 1;
 
-					b->y1 = y1;
-				} else {
-					b->y1 = y1;
-					while (--length) {
-						e += e1;
-						y1++;
-						if (e >= 0) {
-							b->y2 = y1;
-							b->x2++;
-							if (++b == last_box) {
-								ret = &&Y_down_continue;
-								goto *jump;
-Y_down_continue:
-								b = box;
-							}
-							e += e3;
-							b->x2 = b->x1 = ++x1;
-							b->y1 = y1;
+						if (++b == last_box) {
+							ret = &&Y_continue;
+							goto *jump;
+Y_continue:
+							b = box;
 						}
-					}
 
-					b->y2 = ++y1;
+						b->x1 = x1 += sdx;
+						b->y1 = y1 + sdy;
+						dirty = false;
+					}
+					y1 += sdy;
 				}
-				b->x2++;
-				if (++b == last_box) {
-					ret = &&Y_continue2;
-					goto *jump;
-Y_continue2:
-					b = box;
+
+				if (dirty) {
+					if (sdy < 0) {
+						b->y2 = b->y1 + 1;
+						b->y1 = y1;
+					} else
+						b->y2 = y1 + 1;
+					b->x2 = x1 + 1;
+
+					if (++b == last_box) {
+						ret = &&Y2_continue;
+						goto *jump;
+Y2_continue:
+						b = box;
+					}
 				}
 			}
 		} while (--n);
commit a3d37fb29f8dffb0e370ad95783994aaa7eccfaf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 15:47:14 2012 +0100

    sna: Force remapping for IO transfer
    
    Should fix regression from fcccc5528 (sna: Improve handling of inplace
    IO for large transfers) whereby it was aborting the transfer it we need
    to remap the buffer for the upload.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49546
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index c120681..5a4bf75 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -452,32 +452,23 @@ static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 	return IS_CPU_MAP(bo->map) == !bo->tiling;
 }
 
-static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
+static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
 {
-	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
-		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
-	return bo->rq;
-}
-
-static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
-{
-	DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
-	     __FUNCTION__, bo->handle,
-	     bo->domain, bo->presumed_offset, bo->size));
-
-	if (!kgem_bo_is_mappable(kgem, bo) && kgem_bo_is_busy(bo))
+	if (kgem_bo_mapped(bo))
 		return true;
 
-	if (kgem->wedged)
-		return false;
-
-	if (kgem_bo_is_busy(bo))
+	if (!bo->tiling && kgem->has_llc)
 		return true;
 
-	if (bo->presumed_offset == 0)
-		return !list_is_empty(&kgem->requests);
+	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
+}
+
 
-	return false;
+static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
+{
+	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
+		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
+	return bo->rq;
 }
 
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2c0fd57..d52328d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -981,8 +981,7 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				    priv->gpu_bo->exec == NULL)
 					kgem_retire(&sna->kgem);
 
-				if (kgem_bo_map_will_stall(&sna->kgem,
-							   priv->gpu_bo)) {
+				if (kgem_bo_is_busy(priv->gpu_bo)) {
 					if (priv->pinned)
 						goto skip_inplace_map;
 
@@ -1049,7 +1048,7 @@ skip_inplace_map:
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
+	    !kgem_bo_is_busy(priv->gpu_bo) &&
 	    pixmap_inplace(sna, pixmap, priv) &&
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -1356,7 +1355,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			    priv->gpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
-			if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) {
+			if (!kgem_bo_is_busy(priv->gpu_bo)) {
 				pixmap->devPrivate.ptr =
 					kgem_bo_map(&sna->kgem, priv->gpu_bo);
 				if (pixmap->devPrivate.ptr == NULL)
@@ -1422,7 +1421,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
+	    !kgem_bo_is_busy(priv->gpu_bo) &&
 	    region_inplace(sna, pixmap, region, priv) &&
 	    sna_pixmap_move_area_to_gpu(pixmap, &region->extents, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -2733,7 +2732,7 @@ static bool upload_inplace(struct sna *sna,
 	if (priv->gpu_bo) {
 		assert(priv->gpu_bo->proxy == NULL);
 
-		if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))
+		if (!kgem_bo_is_busy(priv->gpu_bo))
 			return true;
 
 		if (!priv->pinned &&
@@ -2795,7 +2794,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 		/* And mark as having a valid GTT mapping for future uploads */
 		if (priv->stride &&
-		    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) {
+		    !kgem_bo_is_busy(priv->gpu_bo)) {
 			pixmap->devPrivate.ptr =
 				kgem_bo_map(&sna->kgem, priv->gpu_bo);
 			if (pixmap->devPrivate.ptr) {
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 9b56c99..c39b1f1 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -116,8 +116,7 @@ static bool download_inplace(struct kgem *kgem, struct kgem_bo *bo)
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
-	return !kgem_bo_map_will_stall(kgem, bo) ||
-		bo->tiling == I915_TILING_NONE;
+	return !kgem_bo_is_busy(bo) || bo->tiling == I915_TILING_NONE;
 }
 
 void sna_read_boxes(struct sna *sna,
@@ -480,7 +479,7 @@ static bool write_boxes_inplace(struct kgem *kgem,
 	DBG(("%s x %d, handle=%d, tiling=%d\n",
 	     __FUNCTION__, n, bo->handle, bo->tiling));
 
-	if (!kgem_bo_is_mappable(kgem, bo))
+	if (!kgem_bo_can_map(kgem, bo))
 		return false;
 
 	kgem_bo_submit(kgem, bo);
@@ -525,11 +524,14 @@ static bool upload_inplace(struct kgem *kgem,
 	if (FORCE_INPLACE)
 		return FORCE_INPLACE > 0;
 
+	if (!kgem_bo_can_map(kgem, bo))
+		return false;
+
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (!bo->map || kgem_bo_map_will_stall(kgem, bo)) {
+	if (!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) {
 		unsigned int bytes = 0;
 		while (n--) {
 			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
@@ -1146,7 +1148,7 @@ bool sna_replace(struct sna *sna,
 	     pixmap->drawable.bitsPerPixel,
 	     bo->tiling));
 
-	if ((!kgem_bo_mapped(bo) || bo->rq) &&
+	if ((!kgem_bo_mapped(bo) || kgem_bo_is_busy(bo)) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
 		return true;
 
commit 2a9a93e4484e0c616724610f4c8019fcbaa7ad53
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 15:12:51 2012 +0100

    sna: Only avoid ring switching for indirect uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 3de164b..9b56c99 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -582,9 +582,7 @@ fallback:
 	}
 
 	/* Try to avoid switching rings... */
-	if (!can_blt ||
-	    kgem->ring == KGEM_RENDER ||
-	    (kgem->has_semaphores && kgem->mode == KGEM_NONE) ||
+	if (!can_blt || kgem->ring == KGEM_RENDER ||
 	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
commit 613902b60e0f2ca2a916e68306a1a37bc236d00d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 14:17:46 2012 +0100

    sna: Fix off-by-one in checking available execbuffer slots
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 9717aac..e1b31d7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -524,7 +524,7 @@ gen2_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nreloc + 3 > KGEM_RELOC_SIZE(&sna->kgem)) {
+	if (!kgem_check_reloc(&sna->kgem, 3)) {
 		DBG(("%s: flushing batch: reloc %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nreloc + 3,
@@ -533,7 +533,7 @@ gen2_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nexec + 3 > KGEM_EXEC_SIZE(&sna->kgem)) {
+	if (!kgem_check_exec(&sna->kgem, 3)) {
 		DBG(("%s: flushing batch: exec %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nexec + 1,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 39601d9..a0de1ee 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1314,7 +1314,7 @@ gen3_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - MAX_OBJECTS) {
+	if (!kgem_check_reloc(&sna->kgem, MAX_OBJECTS)) {
 		DBG(("%s: flushing batch: reloc %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nreloc,
@@ -1323,7 +1323,7 @@ gen3_get_batch(struct sna *sna)
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - MAX_OBJECTS - 1) {
+	if (!kgem_check_exec(&sna->kgem, MAX_OBJECTS)) {
 		DBG(("%s: flushing batch: exec %d >= %d\n",
 		     __FUNCTION__,
 		     sna->kgem.nexec,
@@ -1792,9 +1792,9 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 105: 5))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_reloc(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 86e9ff8..3372e7e 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1157,9 +1157,9 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, 25))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_reloc(&sna->kgem, 1))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 85343d8..b4d9203 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1161,9 +1161,9 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1613749..f3b7537 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1543,9 +1543,9 @@ static int gen6_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 5))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5492c23..362ddff 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1647,9 +1647,9 @@ static int gen7_get_rectangles__flush(struct sna *sna,
 {
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
 		return 0;
-	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
+	if (!kgem_check_exec(&sna->kgem, 1))
 		return 0;
-	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
+	if (!kgem_check_reloc(&sna->kgem, 2))
 		return 0;
 
 	if (op->need_magic_ca_pass && sna->render.vbo)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 11950e4..c120681 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -330,7 +330,8 @@ static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
 						  int num_surfaces)
 {
 	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
-		kgem_check_reloc(kgem, num_surfaces);
+		kgem_check_reloc(kgem, num_surfaces) &&
+		kgem_check_exec(kgem, num_surfaces);
 }
 
 static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords)
commit 663e387b35c314c4c2bee8137d6b70d27fa9f729
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 11:45:34 2012 +0100

    sna: Only submit a batch to the scanout if it is not already busy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bf24664..6703e7e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3001,10 +3001,10 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	__kgem_bo_destroy(kgem, bo);
 }
 
-void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
+bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo)
 {
 	/* The kernel will emit a flush *and* update its own flushing lists. */
-	kgem_busy(kgem, bo->handle);
+	return kgem_busy(kgem, bo->handle);
 }
 
 bool kgem_check_bo(struct kgem *kgem, ...)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 69562f4..11950e4 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -258,7 +258,7 @@ static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
 		_kgem_submit(kgem);
 }
 
-void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
+bool __kgem_flush(struct kgem *kgem, struct kgem_bo *bo);
 static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
 {
 	kgem_bo_submit(kgem, bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dbf7e30..2c0fd57 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11998,7 +11998,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval;
-			return true;
+			return priv->cpu_damage || !__kgem_flush(&sna->kgem, priv->gpu_bo);
 		}
 	} else {
 		if (priv->cpu_damage == NULL && priv->gpu_bo->exec == NULL) {
commit cd7a56b7313233190a4c4a735d4a141e99c9b688
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 8 08:53:41 2012 +0100

    sna: Throttle independently of scanout updates
    
    As we are now throttling to relieve GTT pressure, it is a benefit to
    consistently throttle before blocking.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c897658..bf24664 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -216,6 +216,9 @@ static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
 	if (kgem_retire(kgem))
 		return true;
 
+	if (!kgem->need_throttle)
+		return false;
+
 	if ((flags & CREATE_NO_THROTTLE) == 0)
 		kgem_throttle(kgem);
 
@@ -1520,7 +1523,7 @@ static void kgem_commit(struct kgem *kgem)
 		gem_close(kgem->fd, rq->bo->handle);
 	} else {
 		list_add_tail(&rq->list, &kgem->requests);
-		kgem->need_retire = 1;
+		kgem->need_throttle = kgem->need_retire = 1;
 	}
 
 	kgem->next_request = NULL;
@@ -1979,6 +1982,8 @@ void kgem_throttle(struct kgem *kgem)
 			   "When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
 		warned = 1;
 	}
+
+	kgem->need_throttle = 0;
 }
 
 static void kgem_expire_partial(struct kgem *kgem)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9bb3bc9..69562f4 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -149,6 +149,7 @@ struct kgem {
 	uint32_t need_expire:1;
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
+	uint32_t need_throttle:1;
 	uint32_t scanout:1;
 	uint32_t flush_now:1;
 	uint32_t busy:1;
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 023c091..790f5ff 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -209,6 +209,7 @@ enum {
 
 enum {
 	FLUSH_TIMER = 0,
+	THROTTLE_TIMER,
 	EXPIRE_TIMER,
 	INACTIVE_TIMER,
 	NUM_TIMERS
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b37ee59..dbf7e30 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12015,6 +12015,31 @@ static bool sna_accel_do_flush(struct sna *sna)
 	return false;
 }
 
+static bool sna_accel_do_throttle(struct sna *sna)
+{
+	if (sna->flags & SNA_NO_THROTTLE)
+		return false;
+
+	if (sna->timer_active & (1<<(THROTTLE_TIMER))) {
+		if (sna->timer_ready & (1<<(THROTTLE_TIMER))) {
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
+			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+			return true;
+		}
+	} else {
+		if (!sna->kgem.need_retire) {
+			DBG(("%s -- no pending activity\n", __FUNCTION__));
+		} else {
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
+			sna->timer_active |= 1 << THROTTLE_TIMER;
+			sna->timer_ready |= 1 << THROTTLE_TIMER;
+			sna->timer_expire[THROTTLE_TIMER] = sna->time + 20;
+		}
+	}
+
+	return false;
+}
+
 static bool sna_accel_do_expire(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
@@ -12089,18 +12114,17 @@ static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 	return next;
 }
 
-static bool sna_accel_flush(struct sna *sna)
+static void sna_accel_flush(struct sna *sna)
 {
 	struct sna_pixmap *priv = sna_accel_scanout(sna);
-	bool need_throttle = priv->gpu_bo->rq;
-	bool busy = priv->cpu_damage || need_throttle;
+	bool busy = priv->cpu_damage || priv->gpu_bo->rq;
 
-	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d, need_throttle=%d\n",
+	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d\n",
 	     __FUNCTION__, (long)sna->time,
 	     priv->cpu_damage,
 	     priv->gpu_bo->exec != NULL,
 	     sna->kgem.nbatch,
-	     sna->kgem.busy, need_throttle));
+	     sna->kgem.busy));
 
 	if (!sna->kgem.busy && !busy)
 		sna_accel_disarm_timer(sna, FLUSH_TIMER);
@@ -12111,8 +12135,17 @@ static bool sna_accel_flush(struct sna *sna)
 
 	kgem_bo_flush(&sna->kgem, priv->gpu_bo);
 	sna->kgem.flush_now = 0;
+}
+
+static void sna_accel_throttle(struct sna *sna)
+{
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
+
+	if (sna->kgem.need_throttle)
+		kgem_throttle(&sna->kgem);
 
-	return need_throttle;
+	if (!sna->kgem.need_retire)
+		sna_accel_disarm_timer(sna, THROTTLE_TIMER);
 }
 
 static void sna_accel_expire(struct sna *sna)
@@ -12362,21 +12395,14 @@ void sna_accel_close(struct sna *sna)
 	kgem_cleanup_cache(&sna->kgem);
 }
 
-static void sna_accel_throttle(struct sna *sna)
-{
-	if (sna->flags & SNA_NO_THROTTLE)
-		return;
-
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
-
-	kgem_throttle(&sna->kgem);
-}
-
 void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
-	if (sna_accel_do_flush(sna) && sna_accel_flush(sna))
+	if (sna_accel_do_flush(sna))
+		sna_accel_flush(sna);
+
+	if (sna_accel_do_throttle(sna))
 		sna_accel_throttle(sna);
 
 	if (sna_accel_do_expire(sna))
commit 2372176f73b7c945f56b7673eab5eccb86366416
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 12:57:35 2012 +0100

    sna: Throttle execution when searching for inactive buffers
    
    If we have some active buffers that we may reuse and old outstanding
    requests, throttling before retiring should prevent the CPU from running
    away from the GPU and hogging the entire GTT and RAM.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b1e2654..c897658 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -205,6 +205,23 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
+static bool __kgem_throttle_retire(struct kgem *kgem, unsigned flags)
+{
+	if (flags & CREATE_NO_RETIRE)
+		return false;
+
+	if (!kgem->need_retire)
+		return false;
+
+	if (kgem_retire(kgem))
+		return true;
+
+	if ((flags & CREATE_NO_THROTTLE) == 0)
+		kgem_throttle(kgem);
+
+	return kgem_retire(kgem);
+}
+
 static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
@@ -219,8 +236,7 @@ retry_gtt:
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
 		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
 		       __FUNCTION__, bo->handle, errno);
-		kgem_throttle(kgem);
-		kgem_retire(kgem);
+		__kgem_throttle_retire(kgem, 0);
 		if (kgem_expire_cache(kgem))
 			goto retry_gtt;
 
@@ -234,8 +250,7 @@ retry_mmap:
 	if (ptr == MAP_FAILED) {
 		ErrorF("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
 		       __FUNCTION__, bo->handle, bytes(bo), errno);
-		kgem_throttle(kgem);
-		if (kgem_retire(kgem))
+		if (__kgem_throttle_retire(kgem, 0))
 			goto retry_mmap;
 
 		assert(0);
@@ -1810,7 +1825,7 @@ void _kgem_submit(struct kgem *kgem)
 		size = compact_batch_surface(kgem);
 	else
 		size = kgem->nbatch * sizeof(kgem->batch[0]);
-	rq->bo = kgem_create_linear(kgem, size, 0);
+	rq->bo = kgem_create_linear(kgem, size, CREATE_NO_THROTTLE);
 	if (rq->bo) {
 		uint32_t handle = rq->bo->handle;
 		int i;
@@ -2150,7 +2165,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			return NULL;
 		}
 
-		if (!kgem->need_retire || !kgem_retire(kgem)) {
+		if (!__kgem_throttle_retire(kgem, 0)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
 		}
@@ -2641,7 +2656,8 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				bo->refcnt = 1;
 				return bo;
 			}
-		} while (!list_is_empty(cache) && kgem_retire(kgem));
+		} while (!list_is_empty(cache) &&
+			 __kgem_throttle_retire(kgem, flags));
 	}
 
 	if (flags & CREATE_INACTIVE)
@@ -2858,11 +2874,9 @@ search_inactive:
 		return bo;
 	}
 
-	if (flags & CREATE_INACTIVE && !list_is_empty(&kgem->requests)) {
-		if (kgem_retire(kgem)) {
-			flags &= ~CREATE_INACTIVE;
-			goto search_inactive;
-		}
+	if (flags & CREATE_INACTIVE && __kgem_throttle_retire(kgem, flags)) {
+		flags &= ~CREATE_INACTIVE;
+		goto search_inactive;
 	}
 
 	if (--retry) {
@@ -3375,8 +3389,7 @@ retry:
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
 		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
 		       __FUNCTION__, bo->handle, bytes(bo), errno);
-		kgem_throttle(kgem);
-		if (kgem_retire(kgem))
+		if (__kgem_throttle_retire(kgem, 0))
 			goto retry;
 
 		return NULL;
@@ -3708,7 +3721,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 
 			goto done;
-		} while (kgem_retire(kgem));
+		} while (__kgem_throttle_retire(kgem, 0));
 	}
 
 #if !DBG_NO_MAP_UPLOAD
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 52bc6f2..9bb3bc9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -225,6 +225,7 @@ enum {
 	CREATE_SCANOUT = 0x10,
 	CREATE_TEMPORARY = 0x20,
 	CREATE_NO_RETIRE = 0x40,
+	CREATE_NO_THROTTLE = 0x40,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
commit 4df228749729dd540b639368400fa20118cdf412
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:42:30 2012 +0100

    sna: Rate-limit and shrink bo usage if we hit system resource limits
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 31c1a35..b1e2654 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -205,23 +205,39 @@ static int gem_set_tiling(int fd, uint32_t handle, int tiling, int stride)
 	return set_tiling.tiling_mode;
 }
 
-static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
+static void *__kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap_gtt mmap_arg;
 	void *ptr;
 
-	DBG(("%s(handle=%d, size=%d, prot=%s)\n", __FUNCTION__,
-	     handle, size, prot & PROT_WRITE ? "read/write" : "read-only"));
+	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__,
+	     bo->handle, bytes(bo)));
 
+retry_gtt:
 	VG_CLEAR(mmap_arg);
-	mmap_arg.handle = handle;
-	if (drmIoctl(fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
+	mmap_arg.handle = bo->handle;
+	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
+		ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
+		       __FUNCTION__, bo->handle, errno);
+		kgem_throttle(kgem);
+		kgem_retire(kgem);
+		if (kgem_expire_cache(kgem))
+			goto retry_gtt;
+
 		assert(0);
 		return NULL;
 	}
 
-	ptr = mmap(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
+retry_mmap:
+	ptr = mmap(0, bytes(bo), PROT_READ | PROT_WRITE, MAP_SHARED,
+		   kgem->fd, mmap_arg.offset);
 	if (ptr == MAP_FAILED) {
+		ErrorF("%s: failed to mmap %d, %d bytes, into GTT domain: %d\n",
+		       __FUNCTION__, bo->handle, bytes(bo), errno);
+		kgem_throttle(kgem);
+		if (kgem_retire(kgem))
+			goto retry_mmap;
+
 		assert(0);
 		ptr = NULL;
 	}
@@ -3255,8 +3271,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
-		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-			       PROT_READ | PROT_WRITE);
+		ptr = __kgem_bo_map__gtt(kgem, bo);
 		if (ptr == NULL)
 			return NULL;
 
@@ -3310,8 +3325,7 @@ void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
-		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-			       PROT_READ | PROT_WRITE);
+		ptr = __kgem_bo_map__gtt(kgem, bo);
 		if (ptr == NULL)
 			return NULL;
 
@@ -3333,8 +3347,7 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 		return MAP(bo->map);
 
 	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
-	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
-				  PROT_READ | PROT_WRITE);
+	return bo->map = __kgem_bo_map__gtt(kgem, bo);
 }
 
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
@@ -3354,13 +3367,18 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 
 	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
 
+retry:
 	VG_CLEAR(mmap_arg);
 	mmap_arg.handle = bo->handle;
 	mmap_arg.offset = 0;
 	mmap_arg.size = bytes(bo);
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
-		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain\n",
-		       __FUNCTION__, bo->handle, bytes(bo));
+		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain: %d\n",
+		       __FUNCTION__, bo->handle, bytes(bo), errno);
+		kgem_throttle(kgem);
+		if (kgem_retire(kgem))
+			goto retry;
+
 		return NULL;
 	}
 
commit ca4d2296e6e42e837627756790b262cae0fd3b6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:23:19 2012 +0100

    sna: Expand batch buffers
    
    As batch buffers are compacted to fit into the smallest bo, the only
    cost is the larger static array allocation (and presumably cache
    misses).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 01651a5..86e9ff8 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -745,7 +745,7 @@ static bool gen4_check_repeat(PicturePtr picture)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen4_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -766,14 +766,11 @@ gen4_bind_bo(struct sna *sna,
 
 	offset = kgem_bo_get_binding(bo, format);
 	if (offset)
-		return offset;
-
-	offset = sna->kgem.surface - sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
+		return offset * sizeof(uint32_t);
 
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen4_surface_state_padded) / sizeof(uint32_t);
-	ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss));
+	ss = memset(sna->kgem.batch + offset, 0, sizeof(*ss));
 
 	ss->ss0.surface_type = GEN4_SURFACE_2D;
 	ss->ss0.surface_format = format;
@@ -781,9 +778,7 @@ gen4_bind_bo(struct sna *sna,
 	ss->ss0.data_return_format = GEN4_SURFACERETURNFORMAT_FLOAT32;
 	ss->ss0.color_blend = 1;
 	ss->ss1.base_addr =
-		kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+		kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
 	ss->ss2.height = height - 1;
 	ss->ss2.width  = width - 1;
@@ -798,7 +793,7 @@ gen4_bind_bo(struct sna *sna,
 	     ss->ss0.surface_format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 0b63009..85343d8 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -749,7 +749,7 @@ gen5_tiling_bits(uint32_t tiling)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen5_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -771,23 +771,18 @@ gen5_bind_bo(struct sna *sna,
 	if (!DBG_NO_SURFACE_CACHE) {
 		offset = kgem_bo_get_binding(bo, format);
 		if (offset)
-			return offset;
+			return offset * sizeof(uint32_t);
 	}
 
-	offset = sna->kgem.surface - sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen5_surface_state_padded) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 
 	ss[0] = (GEN5_SURFACE_2D << GEN5_SURFACE_TYPE_SHIFT |
 		 GEN5_SURFACE_BLEND_ENABLED |
 		 format << GEN5_SURFACE_FORMAT_SHIFT);
 
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 
 	ss[2] = ((width - 1)  << GEN5_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN5_SURFACE_HEIGHT_SHIFT);
@@ -803,7 +798,7 @@ gen5_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 55673bf..1613749 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1198,21 +1198,16 @@ gen6_bind_bo(struct sna *sna,
 		DBG(("[%x]  bo(handle=%d), format=%d, reuse %s binding\n",
 		     offset, bo->handle, format,
 		     domains & 0xffff ? "render" : "sampler"));
-		return offset;
+		return offset * sizeof(uint32_t);
 	}
 
-	offset = sna->kgem.surface - sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 	ss[0] = (GEN6_SURFACE_2D << GEN6_SURFACE_TYPE_SHIFT |
 		 GEN6_SURFACE_BLEND_ENABLED |
 		 format << GEN6_SURFACE_FORMAT_SHIFT);
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN6_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN6_SURFACE_HEIGHT_SHIFT);
 	assert(bo->pitch <= (1 << 18));
@@ -1228,7 +1223,7 @@ gen6_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e128f5c..5492c23 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1293,7 +1293,7 @@ gen7_tiling_bits(uint32_t tiling)
  * Sets up the common fields for a surface state buffer for the given
  * picture in the given surface state buffer.
  */
-static int
+static uint32_t
 gen7_bind_bo(struct sna *sna,
 	     struct kgem_bo *bo,
 	     uint32_t width,
@@ -1303,7 +1303,7 @@ gen7_bind_bo(struct sna *sna,
 {
 	uint32_t *ss;
 	uint32_t domains;
-	uint16_t offset;
+	int offset;
 
 	COMPILE_TIME_ASSERT(sizeof(struct gen7_surface_state) == 32);
 
@@ -1316,20 +1316,15 @@ gen7_bind_bo(struct sna *sna,
 
 	offset = kgem_bo_get_binding(bo, format);
 	if (offset)
-		return offset;
+		return offset * sizeof(uint32_t);
 
-	offset = sna->kgem.surface - sizeof(struct gen7_surface_state) / sizeof(uint32_t);
-	offset *= sizeof(uint32_t);
-
-	sna->kgem.surface -=
+	offset = sna->kgem.surface -=
 		sizeof(struct gen7_surface_state) / sizeof(uint32_t);
-	ss = sna->kgem.batch + sna->kgem.surface;
+	ss = sna->kgem.batch + offset;
 	ss[0] = (GEN7_SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
 		 gen7_tiling_bits(bo->tiling) |
 		 format << GEN7_SURFACE_FORMAT_SHIFT);
-	ss[1] = kgem_add_reloc(&sna->kgem,
-			       sna->kgem.surface + 1,
-			       bo, domains, 0);
+	ss[1] = kgem_add_reloc(&sna->kgem, offset + 1, bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN7_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
 	ss[3] = (bo->pitch - 1) << GEN7_SURFACE_PITCH_SHIFT;
@@ -1345,7 +1340,7 @@ gen7_bind_bo(struct sna *sna,
 	     format, width, height, bo->pitch, bo->tiling,
 	     domains & 0xffff ? "render" : "sampler"));
 
-	return offset;
+	return offset * sizeof(uint32_t);
 }
 
 fastcall static void
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0799ea7..31c1a35 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -595,10 +595,12 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	kgem->wedged = drmCommandNone(kgem->fd, DRM_I915_GEM_THROTTLE) == -EIO;
 	kgem->wedged |= DBG_NO_HW;
 
-	kgem->max_batch_size = ARRAY_SIZE(kgem->batch);
+	kgem->batch_size = ARRAY_SIZE(kgem->batch);
 	if (gen == 22)
 		/* 865g cannot handle a batch spanning multiple pages */
-		kgem->max_batch_size = PAGE_SIZE / sizeof(uint32_t);
+		kgem->batch_size = PAGE_SIZE / sizeof(uint32_t);
+	if (gen == 70)
+		kgem->batch_size = 16*1024;
 
 	kgem->min_alignment = 4;
 	if (gen < 40)
@@ -1656,16 +1658,16 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
 	assert(!kgem_busy(kgem, handle));
 
 	/* If there is no surface data, just upload the batch */
-	if (kgem->surface == kgem->max_batch_size)
+	if (kgem->surface == kgem->batch_size)
 		return gem_write(kgem->fd, handle,
 				 0, sizeof(uint32_t)*kgem->nbatch,
 				 kgem->batch);
 
 	/* Are the batch pages conjoint with the surface pages? */
-	if (kgem->surface < kgem->nbatch + PAGE_SIZE/4) {
-		assert(size == sizeof(kgem->batch));
+	if (kgem->surface < kgem->nbatch + PAGE_SIZE/sizeof(uint32_t)) {
+		assert(size == PAGE_ALIGN(kgem->batch_size*sizeof(uint32_t)));
 		return gem_write(kgem->fd, handle,
-				 0, sizeof(kgem->batch),
+				 0, kgem->batch_size*sizeof(uint32_t),
 				 kgem->batch);
 	}
 
@@ -1676,11 +1678,11 @@ static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size)
 	if (ret)
 		return ret;
 
-	assert(kgem->nbatch*sizeof(uint32_t) <=
-	       sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size));
+	ret = PAGE_ALIGN(sizeof(uint32_t) * kgem->batch_size);
+	ret -= sizeof(uint32_t) * kgem->surface;
+	assert(size-ret >= kgem->nbatch*sizeof(uint32_t));
 	return __gem_write(kgem->fd, handle,
-			sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size),
-			sizeof(kgem->batch) - sizeof(uint32_t)*kgem->surface,
+			size - ret, (kgem->batch_size - kgem->surface)*sizeof(uint32_t),
 			kgem->batch + kgem->surface);
 }
 
@@ -1719,7 +1721,7 @@ void kgem_reset(struct kgem *kgem)
 	kgem->aperture = 0;
 	kgem->aperture_fenced = 0;
 	kgem->nbatch = 0;
-	kgem->surface = kgem->max_batch_size;
+	kgem->surface = kgem->batch_size;
 	kgem->mode = KGEM_NONE;
 	kgem->flush = 0;
 	kgem->scanout = 0;
@@ -1734,24 +1736,26 @@ static int compact_batch_surface(struct kgem *kgem)
 	int size, shrink, n;
 
 	/* See if we can pack the contents into one or two pages */
-	size = kgem->max_batch_size - kgem->surface + kgem->nbatch;
-	if (size > 2048)
-		return sizeof(kgem->batch);
-	else if (size > 1024)
-		size = 8192, shrink = 2*4096;
-	else
-		size = 4096, shrink = 3*4096;
-
-	for (n = 0; n < kgem->nreloc; n++) {
-		if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
-		    kgem->reloc[n].target_handle == 0)
-			kgem->reloc[n].delta -= shrink;
-
-		if (kgem->reloc[n].offset >= size)
-			kgem->reloc[n].offset -= shrink;
+	n = ALIGN(kgem->batch_size, 1024);
+	size = n - kgem->surface + kgem->nbatch;
+	size = ALIGN(size, 1024);
+
+	shrink = n - size;
+	if (shrink) {
+		DBG(("shrinking from %d to %d\n", kgem->batch_size, size));
+
+		shrink *= sizeof(uint32_t);
+		for (n = 0; n < kgem->nreloc; n++) {
+			if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION &&
+			    kgem->reloc[n].target_handle == 0)
+				kgem->reloc[n].delta -= shrink;
+
+			if (kgem->reloc[n].offset >= sizeof(uint32_t)*kgem->nbatch)
+				kgem->reloc[n].offset -= shrink;
+		}
 	}
 
-	return size;
+	return size * sizeof(uint32_t);
 }
 
 void _kgem_submit(struct kgem *kgem)
@@ -1769,11 +1773,11 @@ void _kgem_submit(struct kgem *kgem)
 	batch_end = kgem_end_batch(kgem);
 	kgem_sna_flush(kgem);
 
-	DBG(("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
-	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
+	DBG(("batch[%d/%d]: %d %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n",
+	     kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, kgem->batch_size,
 	     kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture));
 
-	assert(kgem->nbatch <= kgem->max_batch_size);
+	assert(kgem->nbatch <= kgem->batch_size);
 	assert(kgem->nbatch <= kgem->surface);
 	assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
 	assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
@@ -1786,7 +1790,7 @@ void _kgem_submit(struct kgem *kgem)
 #endif
 
 	rq = kgem->next_request;
-	if (kgem->surface != kgem->max_batch_size)
+	if (kgem->surface != kgem->batch_size)
 		size = compact_batch_surface(kgem);
 	else
 		size = kgem->nbatch * sizeof(kgem->batch[0]);
@@ -1821,7 +1825,7 @@ void _kgem_submit(struct kgem *kgem)
 			execbuf.buffers_ptr = (uintptr_t)kgem->exec;
 			execbuf.buffer_count = kgem->nexec;
 			execbuf.batch_start_offset = 0;
-			execbuf.batch_len = batch_end*4;
+			execbuf.batch_len = batch_end*sizeof(uint32_t);
 			execbuf.cliprects_ptr = 0;
 			execbuf.num_cliprects = 0;
 			execbuf.DR1 = 0;
@@ -1835,7 +1839,7 @@ void _kgem_submit(struct kgem *kgem)
 					      O_WRONLY | O_CREAT | O_APPEND,
 					      0666);
 				if (fd != -1) {
-					ret = write(fd, kgem->batch, batch_end*4);
+					ret = write(fd, kgem->batch, batch_end*sizeof(uint32_t));
 					fd = close(fd);
 				}
 			}
@@ -1864,7 +1868,7 @@ void _kgem_submit(struct kgem *kgem)
 
 				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
 				if (i != -1) {
-					ret = write(i, kgem->batch, batch_end*4);
+					ret = write(i, kgem->batch, batch_end*sizeof(uint32_t));
 					close(i);
 				}
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 186eaa0..52bc6f2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -142,7 +142,7 @@ struct kgem {
 	uint16_t nreloc;
 	uint16_t nfence;
 	uint16_t wait;
-	uint16_t max_batch_size;
+	uint16_t batch_size;
 	uint16_t min_alignment;
 
 	uint32_t flush:1;
@@ -170,9 +170,9 @@ struct kgem {
 	void (*context_switch)(struct kgem *kgem, int new_mode);
 	void (*retire)(struct kgem *kgem);
 
-	uint32_t batch[4*1024];
+	uint32_t batch[64*1024-8];
 	struct drm_i915_gem_exec_object2 exec[256];
-	struct drm_i915_gem_relocation_entry reloc[612];
+	struct drm_i915_gem_relocation_entry reloc[4096];
 };
 
 #define KGEM_BATCH_RESERVED 1
@@ -180,7 +180,7 @@ struct kgem {
 #define KGEM_EXEC_RESERVED 1
 
 #define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
-#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED)
+#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
 #define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
 #define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
 
commit 9281b80644ce76ad9e0f3f8f812cbae97c10814a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 23:25:22 2012 +0100

    sna/gen[345]: Clear used vertices when discarding unmappable vbo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index e73c707..39601d9 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1620,7 +1620,11 @@ static int gen3_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 
+	DBG(("%s: used=%d/%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vertex_size,
+	     sna->render.vbo ? sna->render.vbo->handle : 0));
 	assert(sna->render.vertex_used);
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
 
 	bo = sna->render.vbo;
 	if (bo) {
@@ -1668,8 +1672,9 @@ static void gen3_vertex_close(struct sna *sna)
 
 	assert(sna->render_state.gen3.vertex_offset == 0);
 
-	DBG(("%s: used=%d, vbo active? %d\n",
-	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+	DBG(("%s: used=%d/%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vertex_size,
+	     sna->render.vbo ? sna->render.vbo->handle : 0));
 
 	if (sna->render.vertex_used == 0) {
 		assert(sna->render.vbo == NULL);
@@ -1730,6 +1735,7 @@ static void gen3_vertex_close(struct sna *sna)
 	}
 
 	if (sna->render.vbo == NULL) {
+		DBG(("%s: resetting vbo\n", __FUNCTION__));
 		sna->render.vertex_used = 0;
 		sna->render.vertex_index = 0;
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1937,6 +1943,17 @@ gen3_render_composite_done(struct sna *sna,
 }
 
 static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
+static void
 gen3_render_reset(struct sna *sna)
 {
 	struct gen3_render_state *state = &sna->render_state.gen3;
@@ -1961,10 +1978,7 @@ gen3_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
@@ -1976,12 +1990,7 @@ gen3_render_retire(struct kgem *kgem)
 	sna = container_of(kgem, struct sna, kgem);
 	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
-		kgem_bo_destroy(kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
-		sna->render.vertex_used = 0;
-		sna->render.vertex_index = 0;
+		discard_vbo(sna);
 	}
 }
 
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 9cad75e..01651a5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -3224,6 +3224,17 @@ gen4_render_flush(struct sna *sna)
 	gen4_vertex_close(sna);
 }
 
+static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
 static void gen4_render_reset(struct sna *sna)
 {
 	sna->render_state.gen4.needs_invariant = TRUE;
@@ -3240,10 +3251,7 @@ static void gen4_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 54d0b22..0b63009 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3648,6 +3648,17 @@ gen5_render_context_switch(struct kgem *kgem,
 }
 
 static void
+discard_vbo(struct sna *sna)
+{
+	kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+	sna->render.vbo = NULL;
+	sna->render.vertices = sna->render.vertex_data;
+	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	sna->render.vertex_used = 0;
+	sna->render.vertex_index = 0;
+}
+
+static void
 gen5_render_retire(struct kgem *kgem)
 {
 	struct sna *sna;
@@ -3655,12 +3666,7 @@ gen5_render_retire(struct kgem *kgem)
 	sna = container_of(kgem, struct sna, kgem);
 	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
 		DBG(("%s: discarding vbo\n", __FUNCTION__));
-		kgem_bo_destroy(kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
-		sna->render.vertex_used = 0;
-		sna->render.vertex_index = 0;
+		discard_vbo(sna);
 	}
 }
 
@@ -3679,10 +3685,7 @@ static void gen5_render_reset(struct sna *sna)
 	if (sna->render.vbo &&
 	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
 		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
-		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
-		sna->render.vbo = NULL;
-		sna->render.vertices = sna->render.vertex_data;
-		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		discard_vbo(sna);
 	}
 }
 
commit a6ee376e93517659391905e6c9018b3bb735135d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 10:24:21 2012 +0100

    sna: Use the correct invocation of kgem_bo_destroy() for sanity-checks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6e0c3d0..0799ea7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2891,7 +2891,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 			return bo;
 
 		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
-			_kgem_bo_destroy(kgem, bo);
+			kgem_bo_destroy(kgem, bo);
 			return NULL;
 		}
 
commit c89c7e9a04314e40cee5514a182a8364c4f99374
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 11:54:50 2012 +0100

    sna: Update select timeout when installing a timer in the block handler
    
    The block handler is run after the timers are queried for their
    expiration and so if we install a timer in the block hander, we must
    set the timeout ourselves.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index d7a20b9..023c091 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -230,6 +230,7 @@ struct sna {
 	uint32_t timer_expire[NUM_TIMERS];
 	uint16_t timer_active;
 	uint16_t timer_ready;
+	struct timeval timer_tv;
 
 	int vblank_interval;
 
@@ -561,7 +562,7 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 
 Bool sna_accel_pre_init(struct sna *sna);
 Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
-void sna_accel_block_handler(struct sna *sna);
+void sna_accel_block_handler(struct sna *sna, struct timeval **tv);
 void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready);
 void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 274facb..b37ee59 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12372,7 +12372,7 @@ static void sna_accel_throttle(struct sna *sna)
 	kgem_throttle(&sna->kgem);
 }
 
-void sna_accel_block_handler(struct sna *sna)
+void sna_accel_block_handler(struct sna *sna, struct timeval **tv)
 {
 	sna->time = GetTimeInMillis();
 
@@ -12392,15 +12392,24 @@ void sna_accel_block_handler(struct sna *sna)
 	}
 
 	if (sna->timer_ready) {
+		int32_t timeout;
+
 		DBG(("%s: evaluating timers, ready=%x\n",
 		     __FUNCTION__, sna->timer_ready));
 		sna->timer_ready = 0;
-		TimerSet(sna->timer, 0,
-			 sna_timeout(sna->timer,
-				     sna->time,
-				     sna),
-			 sna_timeout, sna);
-		assert(sna->timer_ready == 0);
+		timeout = sna_timeout(sna->timer, sna->time, sna);
+		TimerSet(sna->timer, 0, timeout, sna_timeout, sna);
+		if (timeout) {
+			if (*tv == NULL) {
+				*tv = &sna->timer_tv;
+				goto set_tv;
+			}
+			if ((*tv)->tv_sec * 1000 + (*tv)->tv_usec / 1000 > timeout) {
+set_tv:
+				(*tv)->tv_sec = timeout / 1000;
+				(*tv)->tv_usec = timeout % 1000 * 1000;
+			}
+		}
 	}
 }
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 150e973..c213ff4 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -590,7 +590,7 @@ sna_block_handler(int i, pointer data, pointer timeout, pointer read_mask)
 	sna->BlockHandler(i, sna->BlockData, timeout, read_mask);
 
 	if (*tv == NULL || ((*tv)->tv_usec | (*tv)->tv_sec))
-		sna_accel_block_handler(sna);
+		sna_accel_block_handler(sna, tv);
 }
 
 static void
commit 4ba366ead3a133136554579fe9a604da1fc1da68
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon May 7 08:55:35 2012 +0100

    sna: Manually execute the timer as TimerForce does not run an inactive timer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3280490..274facb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11995,6 +11995,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 		}
 
 		if (sna->timer_ready & (1<<(FLUSH_TIMER))) {
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval;
 			return true;
@@ -12007,6 +12008,7 @@ static bool sna_accel_do_flush(struct sna *sna)
 			sna->timer_ready |= 1 << FLUSH_TIMER;
 			sna->timer_expire[FLUSH_TIMER] =
 				sna->time + sna->vblank_interval / 2;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
 		}
 	}
 
@@ -12017,6 +12019,7 @@ static bool sna_accel_do_expire(struct sna *sna)
 {
 	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
 		if (sna->timer_ready & (1<<(EXPIRE_TIMER))) {
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			sna->timer_expire[EXPIRE_TIMER] =
 				sna->time + MAX_INACTIVE_TIME * 1000;
 			return true;
@@ -12027,6 +12030,7 @@ static bool sna_accel_do_expire(struct sna *sna)
 			sna->timer_ready |= 1 << EXPIRE_TIMER;
 			sna->timer_expire[EXPIRE_TIMER] =
 				sna->time + MAX_INACTIVE_TIME * 1000;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
 		}
 	}
 
@@ -12039,6 +12043,7 @@ static bool sna_accel_do_inactive(struct sna *sna)
 		if (sna->timer_ready & (1<<(INACTIVE_TIMER))) {
 			sna->timer_expire[INACTIVE_TIMER] =
 				sna->time + 120 * 1000;
+			DBG(("%s (time=%ld), triggered\n", __FUNCTION__, (long)sna->time));
 			return true;
 		}
 	} else {
@@ -12047,6 +12052,7 @@ static bool sna_accel_do_inactive(struct sna *sna)
 			sna->timer_ready |= 1 << INACTIVE_TIMER;
 			sna->timer_expire[INACTIVE_TIMER] =
 				sna->time + 120 * 1000;
+			DBG(("%s (time=%ld), starting\n", __FUNCTION__, (long)sna->time));
 		}
 	}
 
@@ -12056,30 +12062,31 @@ static bool sna_accel_do_inactive(struct sna *sna)
 static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 {
 	struct sna *sna = arg;
-	CARD32 next = UINT32_MAX;
+	int32_t next = 0;
 	uint32_t active;
 	int i;
 
+	DBG(("%s: now=%d, active=%08x, ready=%08x\n",
+	     __FUNCTION__, now, sna->timer_active, sna->timer_ready));
 	active = sna->timer_active & ~sna->timer_ready;
 	if (active == 0)
 		return 0;
 
 	for (i = 0; i < NUM_TIMERS; i++) {
 		if (active & (1 << i)) {
-			if (now > sna->timer_expire[i]) {
+			int32_t delta = sna->timer_expire[i] - now;
+			DBG(("%s: timer[%d] expires in %d [%d]\n",
+			     __FUNCTION__, i, delta, sna->timer_expire[i]));
+			if (delta <= 0)
 				sna->timer_ready |= 1 << i;
-			} else {
-				if (sna->timer_expire[i] < next)
-					next = sna->timer_expire[i];
-			}
+			else if (next == 0 || delta < next)
+				next = delta;
 		}
 	}
 
-	if ((sna->timer_active & ~sna->timer_ready) == 0)
-		return 0;
-
-	assert(next != UINT32_MAX);
-	return next - now;
+	DBG(("%s: active=%08x, ready=%08x, next=+%d\n",
+	     __FUNCTION__, sna->timer_active, sna->timer_ready, next));
+	return next;
 }
 
 static bool sna_accel_flush(struct sna *sna)
@@ -12385,8 +12392,15 @@ void sna_accel_block_handler(struct sna *sna)
 	}
 
 	if (sna->timer_ready) {
+		DBG(("%s: evaluating timers, ready=%x\n",
+		     __FUNCTION__, sna->timer_ready));
 		sna->timer_ready = 0;
-		TimerForce(sna->timer);
+		TimerSet(sna->timer, 0,
+			 sna_timeout(sna->timer,
+				     sna->time,
+				     sna),
+			 sna_timeout, sna);
+		assert(sna->timer_ready == 0);
 	}
 }
 
commit 2c801c45dbad58d18f2673e17723825dda0c4e55
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 6 17:23:11 2012 +0100

    sna: Replace timerfd with OsTimer
    
    As timerfd is linux-specific, and OsTimer an OS-agnostic abraction,
    replace the former with the later. Arguably this has slightly better
    performance characteristics in select-bound loops.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 3770983..c031a26 100644
--- a/configure.ac
+++ b/configure.ac
@@ -265,8 +265,6 @@ if test "x$DEBUG" = xfull; then
         CFLAGS="$CFLAGS -O0 -ggdb3"
 fi
 
-AC_CHECK_HEADERS([sys/timerfd.h])
-
 DRIVER_NAME=intel
 AC_SUBST([DRIVER_NAME])
 AC_SUBST([moduledir])
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 3e4b8ec..d7a20b9 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -225,7 +225,9 @@ struct sna {
 	unsigned watch_flush;
 	unsigned flush;
 
-	int timer[NUM_TIMERS];
+	uint32_t time;
+	OsTimerPtr timer;
+	uint32_t timer_expire[NUM_TIMERS];
 	uint16_t timer_active;
 	uint16_t timer_ready;
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3e03934..3280490 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11964,165 +11964,123 @@ static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
 	return priv && priv->gpu_bo ? priv : NULL;
 }
 
-#if HAVE_SYS_TIMERFD_H && !FORCE_FLUSH
-#include <sys/timerfd.h>
-#include <errno.h>
-
-static uint64_t read_timer(int fd)
-{
-	uint64_t count = 0;
-	int ret = read(fd, &count, sizeof(count));
-	return count;
-	(void)ret;
-}
-
-static void sna_accel_drain_timer(struct sna *sna, int id)
+static void sna_accel_disarm_timer(struct sna *sna, int id)
 {
-	if (sna->timer_active & (1<<id))
-		read_timer(sna->timer[id]);
-}
-
-static void _sna_accel_disarm_timer(struct sna *sna, int id)
-{
-	struct itimerspec to;
-
-	DBG(("%s[%d] (time=%ld)\n", __FUNCTION__, id, (long)GetTimeInMillis()));
-
-	memset(&to, 0, sizeof(to));
-	timerfd_settime(sna->timer[id], 0, &to, NULL);
+	DBG(("%s[%d] (time=%ld)\n", __FUNCTION__, id, (long)sna->time));
 	sna->timer_active &= ~(1<<id);
+	sna->timer_ready &= ~(1<<id);
 }
 
-#define return_if_timer_active(id) do {					\
-	if (sna->timer_active & (1<<(id)))				\
-		return (sna->timer_ready & (1<<(id))) && read_timer(sna->timer[id]) > 0;			\
-} while (0)
-
-static Bool sna_accel_do_flush(struct sna *sna)
+static bool sna_accel_do_flush(struct sna *sna)
 {
-	struct itimerspec to;
 	struct sna_pixmap *priv;
 
 	priv = sna_accel_scanout(sna);
 	if (priv == NULL) {
 		DBG(("%s -- no scanout attached\n", __FUNCTION__));
-		return FALSE;
-	}
-
-	if (sna->kgem.flush_now) {
-		sna->kgem.flush_now = 0;
-		if (priv->gpu_bo->exec) {
-			DBG(("%s -- forcing flush\n", __FUNCTION__));
-			sna_accel_drain_timer(sna, FLUSH_TIMER);
-			return TRUE;
-		}
-	}
-
-	return_if_timer_active(FLUSH_TIMER);
-
-	if (priv->cpu_damage == NULL && priv->gpu_bo->exec == NULL) {
-		DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
-		return FALSE;
+		sna_accel_disarm_timer(sna, FLUSH_TIMER);
+		return false;
 	}
 
 	if (sna->flags & SNA_NO_DELAYED_FLUSH)
-		return TRUE;
-
-	if (sna->timer[FLUSH_TIMER] == -1)
-		return TRUE;
-
-	DBG(("%s, starting flush timer, at time=%ld\n",
-	     __FUNCTION__, (long)GetTimeInMillis()));
+		return true;
 
-	/* Initial redraw hopefully before this vblank */
-	to.it_value.tv_sec = 0;
-	to.it_value.tv_nsec = sna->vblank_interval / 2;
+	if (sna->timer_active & (1<<(FLUSH_TIMER))) {
+		if (sna->kgem.flush_now) {
+			sna->kgem.flush_now = 0;
+			if (priv->gpu_bo->exec) {
+				DBG(("%s -- forcing flush\n", __FUNCTION__));
+				sna->timer_ready |= 1 << FLUSH_TIMER;
+			}
+		}
 
-	/* Then periodic updates for every vblank */
-	to.it_interval.tv_sec = 0;
-	to.it_interval.tv_nsec = sna->vblank_interval;
-	timerfd_settime(sna->timer[FLUSH_TIMER], 0, &to, NULL);
+		if (sna->timer_ready & (1<<(FLUSH_TIMER))) {
+			sna->timer_expire[FLUSH_TIMER] =
+				sna->time + sna->vblank_interval;
+			return true;
+		}
+	} else {
+		if (priv->cpu_damage == NULL && priv->gpu_bo->exec == NULL) {
+			DBG(("%s -- no pending write to scanout\n", __FUNCTION__));
+		} else {
+			sna->timer_active |= 1 << FLUSH_TIMER;
+			sna->timer_ready |= 1 << FLUSH_TIMER;
+			sna->timer_expire[FLUSH_TIMER] =
+				sna->time + sna->vblank_interval / 2;
+		}
+	}
 
-	sna->timer_active |= 1 << FLUSH_TIMER;
-	return FALSE;
+	return false;
 }
 
-static Bool sna_accel_do_expire(struct sna *sna)
+static bool sna_accel_do_expire(struct sna *sna)
 {
-	struct itimerspec to;
-
-	return_if_timer_active(EXPIRE_TIMER);
-
-	if (!sna->kgem.need_expire)
-		return FALSE;
-
-	if (sna->timer[EXPIRE_TIMER] == -1)
-		return TRUE;
-
-	to.it_interval.tv_sec = MAX_INACTIVE_TIME;
-	to.it_interval.tv_nsec = 0;
-	to.it_value = to.it_interval;
-	timerfd_settime(sna->timer[EXPIRE_TIMER], 0, &to, NULL);
+	if (sna->timer_active & (1<<(EXPIRE_TIMER))) {
+		if (sna->timer_ready & (1<<(EXPIRE_TIMER))) {
+			sna->timer_expire[EXPIRE_TIMER] =
+				sna->time + MAX_INACTIVE_TIME * 1000;
+			return true;
+		}
+	} else {
+		if (sna->kgem.need_expire) {
+			sna->timer_active |= 1 << EXPIRE_TIMER;
+			sna->timer_ready |= 1 << EXPIRE_TIMER;
+			sna->timer_expire[EXPIRE_TIMER] =
+				sna->time + MAX_INACTIVE_TIME * 1000;
+		}
+	}
 
-	sna->timer_active |= 1 << EXPIRE_TIMER;
-	return FALSE;
+	return false;
 }
 
-static Bool sna_accel_do_inactive(struct sna *sna)
+static bool sna_accel_do_inactive(struct sna *sna)
 {
-	struct itimerspec to;
-
-	return_if_timer_active(INACTIVE_TIMER);
-
-	if (list_is_empty(&sna->active_pixmaps))
-		return FALSE;
-
-	if (sna->timer[INACTIVE_TIMER] == -1)
-		return FALSE;
-
-	/* Periodic expiration after every 2 minutes. */
-	to.it_interval.tv_sec = 120;
-	to.it_interval.tv_nsec = 0;
-	to.it_value = to.it_interval;
-	timerfd_settime(sna->timer[INACTIVE_TIMER], 0, &to, NULL);
+	if (sna->timer_active & (1<<(INACTIVE_TIMER))) {
+		if (sna->timer_ready & (1<<(INACTIVE_TIMER))) {
+			sna->timer_expire[INACTIVE_TIMER] =
+				sna->time + 120 * 1000;
+			return true;
+		}
+	} else {
+		if (!list_is_empty(&sna->active_pixmaps)) {
+			sna->timer_active |= 1 << INACTIVE_TIMER;
+			sna->timer_ready |= 1 << INACTIVE_TIMER;
+			sna->timer_expire[INACTIVE_TIMER] =
+				sna->time + 120 * 1000;
+		}
+	}
 
-	sna->timer_active |= 1 << INACTIVE_TIMER;
-	return FALSE;
+	return false;
 }
 
-static void sna_accel_create_timers(struct sna *sna)
+static CARD32 sna_timeout(OsTimerPtr timer, CARD32 now, pointer arg)
 {
-	int id;
+	struct sna *sna = arg;
+	CARD32 next = UINT32_MAX;
+	uint32_t active;
+	int i;
 
-	/* XXX Can we replace this with OSTimer provided by dix? */
+	active = sna->timer_active & ~sna->timer_ready;
+	if (active == 0)
+		return 0;
 
-#ifdef CLOCK_MONOTONIC_COARSE
-	for (id = 0; id < NUM_FINE_TIMERS; id++)
-		sna->timer[id] = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
-	for (; id < NUM_TIMERS; id++) {
-		sna->timer[id] = timerfd_create(CLOCK_MONOTONIC_COARSE, TFD_NONBLOCK);
-		if (sna->timer[id] == -1)
-			sna->timer[id] = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
+	for (i = 0; i < NUM_TIMERS; i++) {
+		if (active & (1 << i)) {
+			if (now > sna->timer_expire[i]) {
+				sna->timer_ready |= 1 << i;
+			} else {
+				if (sna->timer_expire[i] < next)
+					next = sna->timer_expire[i];
+			}
+		}
 	}
-#else
-	for (id = 0; id < NUM_TIMERS; id++)
-		sna->timer[id] = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
-#endif
-}
-#else
-static void sna_accel_create_timers(struct sna *sna)
-{
-	int id;
 
-	for (id = 0; id < NUM_TIMERS; id++)
-		sna->timer[id] = -1;
+	if ((sna->timer_active & ~sna->timer_ready) == 0)
+		return 0;
+
+	assert(next != UINT32_MAX);
+	return next - now;
 }
-static Bool sna_accel_do_flush(struct sna *sna) { return sna_accel_scanout(sna) != NULL; }
-static Bool sna_accel_do_expire(struct sna *sna) { return sna->kgem.need_expire; }
-static Bool sna_accel_do_inactive(struct sna *sna) { return FALSE; }
-static void sna_accel_drain_timer(struct sna *sna, int id) { }
-static void _sna_accel_disarm_timer(struct sna *sna, int id) { }
-#endif
 
 static bool sna_accel_flush(struct sna *sna)
 {
@@ -12131,14 +12089,14 @@ static bool sna_accel_flush(struct sna *sna)
 	bool busy = priv->cpu_damage || need_throttle;
 
 	DBG(("%s (time=%ld), cpu damage? %p, exec? %d nbatch=%d, busy? %d, need_throttle=%d\n",
-	     __FUNCTION__, (long)GetTimeInMillis(),
+	     __FUNCTION__, (long)sna->time,
 	     priv->cpu_damage,
 	     priv->gpu_bo->exec != NULL,
 	     sna->kgem.nbatch,
 	     sna->kgem.busy, need_throttle));
 
 	if (!sna->kgem.busy && !busy)
-		_sna_accel_disarm_timer(sna, FLUSH_TIMER);
+		sna_accel_disarm_timer(sna, FLUSH_TIMER);
 	sna->kgem.busy = busy;
 
 	if (priv->cpu_damage)
@@ -12152,10 +12110,10 @@ static bool sna_accel_flush(struct sna *sna)
 
 static void sna_accel_expire(struct sna *sna)
 {
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)GetTimeInMillis()));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
 
 	if (!kgem_expire_cache(&sna->kgem))
-		_sna_accel_disarm_timer(sna, EXPIRE_TIMER);
+		sna_accel_disarm_timer(sna, EXPIRE_TIMER);
 }
 
 static void sna_accel_inactive(struct sna *sna)
@@ -12163,7 +12121,7 @@ static void sna_accel_inactive(struct sna *sna)
 	struct sna_pixmap *priv;
 	struct list preserve;
 
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)GetTimeInMillis()));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
 
 #if DEBUG_ACCEL
 	{
@@ -12252,22 +12210,12 @@ static void sna_accel_inactive(struct sna *sna)
 	if (list_is_empty(&sna->inactive_clock[1]) &&
 	    list_is_empty(&sna->inactive_clock[0]) &&
 	    list_is_empty(&sna->active_pixmaps))
-		_sna_accel_disarm_timer(sna, INACTIVE_TIMER);
-}
-
-static void sna_accel_install_timers(struct sna *sna)
-{
-	int n;
-
-	for (n = 0; n < NUM_TIMERS; n++) {
-		if (sna->timer[n] != -1)
-			AddGeneralSocket(sna->timer[n]);
-	}
+		sna_accel_disarm_timer(sna, INACTIVE_TIMER);
 }
 
 Bool sna_accel_pre_init(struct sna *sna)
 {
-	sna_accel_create_timers(sna);
+	sna->timer = TimerSet(NULL, 0, 0, sna_timeout, sna);
 	return TRUE;
 }
 
@@ -12287,7 +12235,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	list_init(&sna->inactive_clock[1]);
 
 	AddGeneralSocket(sna->kgem.fd);
-	sna_accel_install_timers(sna);
 
 	screen->CreateGC = sna_create_gc;
 	screen->GetImage = sna_get_image;
@@ -12413,17 +12360,17 @@ static void sna_accel_throttle(struct sna *sna)
 	if (sna->flags & SNA_NO_THROTTLE)
 		return;
 
-	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)GetTimeInMillis()));
+	DBG(("%s (time=%ld)\n", __FUNCTION__, (long)sna->time));
 
 	kgem_throttle(&sna->kgem);
 }
 
 void sna_accel_block_handler(struct sna *sna)
 {
-	if (sna_accel_do_flush(sna)) {
-		if (sna_accel_flush(sna))
-			sna_accel_throttle(sna);
-	}
+	sna->time = GetTimeInMillis();
+
+	if (sna_accel_do_flush(sna) && sna_accel_flush(sna))
+		sna_accel_throttle(sna);
 
 	if (sna_accel_do_expire(sna))
 		sna_accel_expire(sna);
@@ -12437,31 +12384,20 @@ void sna_accel_block_handler(struct sna *sna)
 		sna->watch_flush = 0;
 	}
 
-	sna->timer_ready = 0;
+	if (sna->timer_ready) {
+		sna->timer_ready = 0;
+		TimerForce(sna->timer);
+	}
 }
 
 void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 {
-	int id, active;
-
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
 	if (sna->kgem.need_purge)
 		kgem_purge_cache(&sna->kgem);
-
-	active = sna->timer_active & ~sna->timer_ready;
-	for (id = 0; id < NUM_TIMERS; id++)
-		if (active & (1 << id) && FD_ISSET(sna->timer[id], ready))
-			sna->timer_ready |= 1 << id;
 }
 
 void sna_accel_free(struct sna *sna)
 {
-	int id;
-
-	for (id = 0; id < NUM_TIMERS; id++)
-		if (sna->timer[id] != -1) {
-			close(sna->timer[id]);
-			sna->timer[id] = -1;
-		}
 }
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 6287cd9..65d1992 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -643,8 +643,8 @@ static void update_flush_interval(struct sna *sna)
 	if (max_vrefresh == 0)
 		max_vrefresh = 40;
 
-	sna->vblank_interval = 1000 * 1000 * 1000 / max_vrefresh; /* Hz -> ns */
-	DBG(("max_vrefresh=%d, vblank_interval=%d ns\n",
+	sna->vblank_interval = 1000 / max_vrefresh; /* Hz -> ms */
+	DBG(("max_vrefresh=%d, vblank_interval=%d ms\n",
 	       max_vrefresh, sna->vblank_interval));
 }
 
commit 74d2707de4a0f94d2143f1e9c60762e4167b1ea6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 6 21:51:27 2012 +0100

    sna: Remove short-circuiting for large font fallbacks
    
    Unlike the fallback for an unhandled depth, we need to ensure that the
    pixmaps are mapped to the CPU before calling the fb routines.
    
    Reported-by: Toralf Förster <toralf.foerster at gmx.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49558
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1bc34dc..3e03934 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10796,9 +10796,6 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
-	if (sna_font_too_large(gc->font))
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -10828,8 +10825,11 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_POLY_TEXT8)
 		goto force_fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto force_fallback;
+
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		return false;
+		goto force_fallback;
 
 	if (!gc_is_solid(gc, &fg))
 		goto force_fallback;
@@ -10888,9 +10888,6 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
-	if (sna_font_too_large(gc->font))
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -10920,8 +10917,11 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_POLY_TEXT16)
 		goto force_fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto force_fallback;
+
 	if (!PM_IS_SOLID(drawable, gc->planemask))
-		return false;
+		goto force_fallback;
 
 	if (!gc_is_solid(gc, &fg))
 		goto force_fallback;
@@ -10980,9 +10980,6 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
-	if (sna_font_too_large(gc->font))
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -11024,6 +11021,9 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_IMAGE_TEXT8)
 		goto force_fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto force_fallback;
+
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto force_fallback;
 
@@ -11074,9 +11074,6 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
-	if (sna_font_too_large(gc->font))
-		goto fallback;
-
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -11118,6 +11115,9 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_IMAGE_TEXT16)
 		goto force_fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto force_fallback;
+
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto force_fallback;
 
commit c5b6741d3729c6867702ab64a6c59cb8052c0ef3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun May 6 12:40:54 2012 +0100

    sna/gen2+: Fix typo for computing redirected extents for render copy
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 0ad346e..9717aac 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2909,14 +2909,14 @@ fallback:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 680d36f..e73c707 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -4069,14 +4069,14 @@ fallback_blt:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index b8a2459..9cad75e 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2655,14 +2655,14 @@ fallback_blt:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 1fb7f65..54d0b22 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2962,14 +2962,14 @@ fallback_blt:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 38fb024..55673bf 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3339,14 +3339,14 @@ fallback_blt:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 6c9a833..e128f5c 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3422,14 +3422,14 @@ fallback_blt:
 		int i;
 
 		for (i = 1; i < n; i++) {
-			if (extents.x1 < box[i].x1)
+			if (box[i].x1 < extents.x1)
 				extents.x1 = box[i].x1;
-			if (extents.y1 < box[i].y1)
+			if (box[i].y1 < extents.y1)
 				extents.y1 = box[i].y1;
 
-			if (extents.x2 > box[i].x2)
+			if (box[i].x2 > extents.x2)
 				extents.x2 = box[i].x2;
-			if (extents.y2 > box[i].y2)
+			if (box[i].y2 > extents.y2)
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
commit 771090f25db702d25ebbd3f2b44429cf0acfe8fd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 4 20:56:37 2012 +0100

    sna: Add a pair of asserts to track down a NULL pointer dereference
    
    Looks like the assumption for the location of the data is invalid,
    allocation failure, perhaps?
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index dee6608..880e173 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -460,6 +460,8 @@ static struct kgem_bo *upload(struct sna *sna,
 			priv->mapped = false;
 		}
 		if (pixmap->devPrivate.ptr == NULL) {
+			assert(priv->ptr);
+			assert(priv->stride);
 			pixmap->devPrivate.ptr = priv->ptr;
 			pixmap->devKind = priv->stride;
 		}
commit d376a960df7a081a5d449f77b81ae13223b98929
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 4 10:09:46 2012 +0100

    sna/dri: Only track a single pending flip across all pipes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index e07a115..3e4b8ec 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -249,7 +249,7 @@ struct sna {
 	} mode;
 
 	struct sna_dri {
-		void *flip_pending[2];
+		void *flip_pending;
 	} dri;
 
 	unsigned int tiling;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 32602d5..2b97e68 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -937,6 +937,9 @@ can_flip(struct sna * sna,
 	WindowPtr win = (WindowPtr)draw;
 	PixmapPtr pixmap;
 
+	if (!sna->scrn->vtSema)
+		return FALSE;
+
 	if (draw->type == DRAWABLE_PIXMAP)
 		return FALSE;
 
@@ -1109,7 +1112,7 @@ sna_dri_flip_continue(struct sna *sna,
 
 	info->next_front.name = 0;
 
-	sna->dri.flip_pending[info->pipe] = info;
+	sna->dri.flip_pending = info;
 
 	return TRUE;
 }
@@ -1171,8 +1174,8 @@ static void sna_dri_flip_event(struct sna *sna,
 		break;
 
 	case DRI2_FLIP_THROTTLE:
-		assert(sna->dri.flip_pending[flip->pipe] == flip);
-		sna->dri.flip_pending[flip->pipe] = NULL;
+		assert(sna->dri.flip_pending == flip);
+		sna->dri.flip_pending = NULL;
 
 		if (flip->next_front.name &&
 		    flip->drawable_id &&
@@ -1204,9 +1207,9 @@ static void sna_dri_flip_event(struct sna *sna,
 	case DRI2_ASYNC_FLIP:
 		DBG(("%s: async swap flip completed on pipe %d, pending? %d, new? %d\n",
 		     __FUNCTION__, flip->pipe,
-		     sna->dri.flip_pending[flip->pipe] != NULL,
+		     sna->dri.flip_pending != NULL,
 		     flip->front->name != flip->old_front.name));
-		assert(sna->dri.flip_pending[flip->pipe] == flip);
+		assert(sna->dri.flip_pending == flip);
 
 		if (flip->front->name != flip->next_front.name) {
 			DBG(("%s: async flip continuing\n", __FUNCTION__));
@@ -1240,7 +1243,7 @@ finish_async_flip:
 			flip->next_front.bo = NULL;
 
 			DBG(("%s: async flip completed\n", __FUNCTION__));
-			sna->dri.flip_pending[flip->pipe] = NULL;
+			sna->dri.flip_pending = NULL;
 			sna_dri_frame_event_info_free(flip);
 		}
 		break;
@@ -1326,9 +1329,9 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		int type = DRI2_FLIP_THROTTLE;
 
 		DBG(("%s: performing immediate swap on pipe %d, pending? %d\n",
-		     __FUNCTION__, pipe, sna->dri.flip_pending[pipe] != NULL));
+		     __FUNCTION__, pipe, sna->dri.flip_pending != NULL));
 
-		info = sna->dri.flip_pending[pipe];
+		info = sna->dri.flip_pending;
 		if (info) {
 			if (info->drawable_id == draw->id) {
 				DBG(("%s: chaining flip\n", __FUNCTION__));
@@ -1382,7 +1385,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 				       CREATE_EXACT);
 		info->back->name = kgem_bo_flink(&sna->kgem,
 						 get_private(info->back)->bo);
-		sna->dri.flip_pending[info->pipe] = info;
+		sna->dri.flip_pending = info;
 
 		DRI2SwapComplete(info->client, draw, 0, 0, 0,
 				 DRI2_EXCHANGE_COMPLETE,
@@ -1697,16 +1700,17 @@ sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 	struct sna *sna = to_sna_from_drawable(draw);
 	struct sna_dri_frame_event *info;
 	struct kgem_bo *bo;
-	int name, pipe;
+	int name;
 
 	DBG(("%s()\n", __FUNCTION__));
 
-	pipe = sna_dri_get_pipe(draw);
-	if (pipe == -1) {
-		PixmapPtr pixmap = get_drawable_pixmap(draw);
+	if (!sna->scrn->vtSema) {
+		PixmapPtr pixmap;
 
+exchange:
 		DBG(("%s: unattached, exchange pixmaps\n", __FUNCTION__));
 
+		pixmap = get_drawable_pixmap(draw);
 		set_bo(pixmap, get_private(back)->bo);
 		sna_dri_exchange_attachment(front, back);
 		get_private(back)->pixmap = pixmap;
@@ -1733,10 +1737,14 @@ blit:
 	bo = NULL;
 	name = 0;
 
-	info = sna->dri.flip_pending[pipe];
+	info = sna->dri.flip_pending;
 	if (info == NULL) {
-		DBG(("%s: no pending flip on pipe %d, so updating scanout\n",
-		     __FUNCTION__, pipe));
+		int pipe = sna_dri_get_pipe(draw);
+		if (pipe == -1)
+			goto exchange;
+
+		DBG(("%s: no pending flip, so updating scanout\n",
+		     __FUNCTION__));
 
 		info = calloc(1, sizeof(struct sna_dri_frame_event));
 		if (!info)
@@ -1801,7 +1809,7 @@ blit:
 	info->back->name = name;
 
 	set_bo(sna->front, get_private(info->front)->bo);
-	sna->dri.flip_pending[info->pipe] = info;
+	sna->dri.flip_pending = info;
 
 	DRI2SwapComplete(client, draw, 0, 0, 0,
 			 DRI2_EXCHANGE_COMPLETE, func, data);
commit 450592f989efd0d3bc9ef2de245fce0a180e91a2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 4 09:50:19 2012 +0100

    sna: Cache the framebuffer id
    
    Also fixup a weakness of only tracking scanout with a single bit, as we
    used to clear it forcibly after every flip.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 666a23f..6e0c3d0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1088,6 +1088,25 @@ inline static void kgem_bo_remove_from_active(struct kgem *kgem,
 	assert(list_is_empty(&bo->vma));
 }
 
+static void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
+{
+	if (!bo->scanout)
+		return;
+
+	assert(bo->proxy == NULL);
+
+	DBG(("%s: handle=%d, fb=%d\n", __FUNCTION__, bo->handle, bo->delta));
+	if (bo->delta) {
+		drmModeRmFB(kgem->fd, bo->delta);
+		bo->delta = 0;
+	}
+
+	bo->scanout = false;
+	bo->needs_flush = true;
+	bo->flush = false;
+	bo->reusable = true;
+}
+
 static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
@@ -1096,6 +1115,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->refcnt == 0);
 
 	bo->binding.offset = 0;
+	kgem_bo_clear_scanout(kgem, bo);
 
 	if (NO_CACHE)
 		goto destroy;
@@ -4151,18 +4171,6 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
 	}
 }
 
-void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
-{
-	bo->needs_flush = true;
-	bo->flush = false;
-
-	if (!bo->scanout)
-		return;
-
-	bo->scanout = false;
-	bo->reusable = true;
-}
-
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
 		struct kgem_bo *src,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9eac68e..186eaa0 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -509,8 +509,6 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 bool kgem_buffer_is_inplace(struct kgem_bo *bo);
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
 
-void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo);
-
 void kgem_throttle(struct kgem *kgem);
 #define MAX_INACTIVE_TIME 10
 bool kgem_expire_cache(struct kgem *kgem);
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 5272a08..e07a115 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -318,8 +318,6 @@ extern int sna_page_flip(struct sna *sna,
 
 extern PixmapPtr sna_set_screen_pixmap(struct sna *sna, PixmapPtr pixmap);
 
-void sna_mode_delete_fb(struct sna *sna, uint32_t fb);
-
 constant static inline struct sna *
 to_sna(ScrnInfoPtr scrn)
 {
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 5275d4a..6287cd9 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -151,6 +151,40 @@ int sna_crtc_to_plane(xf86CrtcPtr crtc)
 	return sna_crtc->plane;
 }
 
+static unsigned get_fb(struct sna *sna, struct kgem_bo *bo,
+		       int width, int height)
+{
+	ScrnInfoPtr scrn = sna->scrn;
+	int ret;
+
+	assert(bo->proxy == NULL);
+	if (bo->delta) {
+		DBG(("%s: reusing fb=%d for handle=%d\n",
+		     __FUNCTION__, bo->delta, bo->handle));
+		return bo->delta;
+	}
+
+	DBG(("%s: create fb %dx%d@%d/%d\n",
+	     __FUNCTION__, width, height, scrn->depth, scrn->bitsPerPixel));
+
+	assert(bo->tiling != I915_TILING_Y);
+	ret = drmModeAddFB(sna->kgem.fd,
+			   width, height,
+			   scrn->depth, scrn->bitsPerPixel,
+			   bo->pitch, bo->handle,
+			   &bo->delta);
+	if (ret < 0) {
+		ErrorF("%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
+		       __FUNCTION__,
+		       width, height,
+		       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+		return 0;
+	}
+
+	bo->scanout = true;
+	return bo->delta;
+}
+
 static uint32_t gem_create(int fd, int size)
 {
 	struct drm_i915_gem_create create;
@@ -434,10 +468,6 @@ sna_crtc_restore(struct sna *sna)
 	if (!bo)
 		return;
 
-	assert(bo->tiling != I915_TILING_Y);
-	bo->scanout = true;
-	bo->domain = DOMAIN_NONE;
-
 	DBG(("%s: create fb %dx%d@%d/%d\n",
 	     __FUNCTION__,
 	     sna->front->drawable.width,
@@ -446,13 +476,10 @@ sna_crtc_restore(struct sna *sna)
 	     sna->front->drawable.bitsPerPixel));
 
 	sna_mode_remove_fb(sna);
-	if (drmModeAddFB(sna->kgem.fd,
-			 sna->front->drawable.width,
-			 sna->front->drawable.height,
-			 sna->front->drawable.depth,
-			 sna->front->drawable.bitsPerPixel,
-			 bo->pitch, bo->handle,
-			 &sna->mode.fb_id))
+	sna->mode.fb_id = get_fb(sna, bo,
+				 sna->front->drawable.width,
+				 sna->front->drawable.height);
+	if (sna->mode.fb_id == 0)
 		return;
 
 	DBG(("%s: handle %d attached to fb %d\n",
@@ -468,6 +495,7 @@ sna_crtc_restore(struct sna *sna)
 			return;
 	}
 
+	bo->domain = DOMAIN_NONE;
 	scrn->displayWidth = bo->pitch / sna->mode.cpp;
 	sna->mode.fb_pixmap = sna->front->drawable.serialNumber;
 }
@@ -652,28 +680,16 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		if (!bo)
 			return FALSE;
 
-		DBG(("%s: create fb %dx%d@%d/%d\n",
-		     __FUNCTION__,
-		     scrn->virtualX, scrn->virtualY,
-		     scrn->depth, scrn->bitsPerPixel));
-
-		assert(bo->tiling != I915_TILING_Y);
-		ret = drmModeAddFB(sna->kgem.fd,
-				   scrn->virtualX, scrn->virtualY,
-				   scrn->depth, scrn->bitsPerPixel,
-				   bo->pitch, bo->handle,
-				   &sna_mode->fb_id);
-		if (ret < 0) {
-			ErrorF("%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
-			       __FUNCTION__,
-			       scrn->virtualX, scrn->virtualY,
-			       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+		/* recreate the fb in case the size has changed */
+		assert(bo->delta == 0);
+		sna_mode->fb_id = get_fb(sna, bo,
+					 scrn->virtualX, scrn->virtualY);
+		if (sna_mode->fb_id == 0)
 			return FALSE;
-		}
 
 		DBG(("%s: handle %d attached to fb %d\n",
 		     __FUNCTION__, bo->handle, sna_mode->fb_id));
-		bo->scanout = true;
+
 		bo->domain = DOMAIN_NONE;
 		sna_mode->fb_pixmap = sna->front->drawable.serialNumber;
 	}
@@ -773,22 +789,14 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 		return NULL;
 	}
 
-	assert(bo->tiling != I915_TILING_Y);
-	if (drmModeAddFB(sna->kgem.fd,
-			 width, height, scrn->depth, scrn->bitsPerPixel,
-			 bo->pitch, bo->handle,
-			 &sna_crtc->shadow_fb_id)) {
-		ErrorF("%s: failed to add rotate  fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
-		       __FUNCTION__,
-		       width, height,
-		       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+	sna_crtc->shadow_fb_id = get_fb(sna, bo, width, height);
+	if (sna_crtc->shadow_fb_id == 0) {
 		scrn->pScreen->DestroyPixmap(shadow);
 		return NULL;
 	}
 
 	DBG(("%s: attached handle %d to fb %d\n",
 	     __FUNCTION__, bo->handle, sna_crtc->shadow_fb_id));
-	bo->scanout = true;
 	bo->domain = DOMAIN_NONE;
 	return sna_crtc->shadow = shadow;
 }
@@ -813,10 +821,8 @@ sna_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr pixmap, void *data)
 	DBG(("%s(fb=%d, handle=%d)\n", __FUNCTION__,
 	     sna_crtc->shadow_fb_id, sna_pixmap_get_bo(pixmap)->handle));
 
-	drmModeRmFB(sna->kgem.fd, sna_crtc->shadow_fb_id);
 	sna_crtc->shadow_fb_id = 0;
 
-	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(pixmap));
 	pixmap->drawable.pScreen->DestroyPixmap(pixmap);
 	sna_crtc->shadow = NULL;
 }
@@ -1702,23 +1708,16 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	if (!bo)
 		goto fail;
 
-	assert(bo->tiling != I915_TILING_Y);
-	if (drmModeAddFB(sna->kgem.fd, width, height,
-			 scrn->depth, scrn->bitsPerPixel,
-			 bo->pitch, bo->handle,
-			 &mode->fb_id)) {
-		ErrorF("%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
-		       __FUNCTION__,
-		       width, height,
-		       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+	assert(bo->delta == 0);
+
+	mode->fb_id = get_fb(sna, bo, width, height);
+	if (mode->fb_id == 0)
 		goto fail;
-	}
 
 	DBG(("%s: handle %d, pixmap serial %lu attached to fb %d\n",
 	     __FUNCTION__, bo->handle,
 	     sna->front->drawable.serialNumber, mode->fb_id));
 
-	bo->scanout = true;
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
 
@@ -1739,17 +1738,12 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	assert(scrn->pScreen->GetScreenPixmap(scrn->pScreen) == sna->front);
 	assert(scrn->pScreen->GetWindowPixmap(scrn->pScreen->root) == sna->front);
 
-	if (old_fb_id)
-		drmModeRmFB(sna->kgem.fd, old_fb_id);
-	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(old_front));
 	scrn->pScreen->DestroyPixmap(old_front);
 
 	return TRUE;
 
 fail:
 	DBG(("%s: restoring original front pixmap and fb\n", __FUNCTION__));
-	if (old_fb_id != mode->fb_id)
-		drmModeRmFB(sna->kgem.fd, mode->fb_id);
 	mode->fb_id = old_fb_id;
 
 	if (sna->front)
@@ -1843,15 +1837,9 @@ sna_page_flip(struct sna *sna,
 	/*
 	 * Create a new handle for the back buffer
 	 */
-	assert(bo->tiling != I915_TILING_Y);
-	if (drmModeAddFB(sna->kgem.fd, scrn->virtualX, scrn->virtualY,
-			 scrn->depth, scrn->bitsPerPixel,
-			 bo->pitch, bo->handle,
-			 &mode->fb_id)) {
-		ErrorF("%s: failed to add fb: %dx%d depth=%d, bpp=%d, pitch=%d\n",
-		       __FUNCTION__,
-		       scrn->virtualX, scrn->virtualY,
-		       scrn->depth, scrn->bitsPerPixel, bo->pitch);
+	mode->fb_id = get_fb(sna, bo, scrn->virtualX, scrn->virtualY);
+	if (mode->fb_id == 0) {
+		mode->fb_id = *old_fb;
 		return 0;
 	}
 
@@ -1871,23 +1859,14 @@ sna_page_flip(struct sna *sna,
 	 */
 	count = do_page_flip(sna, data, ref_crtc_hw_id);
 	DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
-	if (count) {
-		bo->scanout = true;
+	if (count)
 		bo->domain = DOMAIN_NONE;
-	} else {
-		drmModeRmFB(sna->kgem.fd, mode->fb_id);
+	else
 		mode->fb_id = *old_fb;
-	}
 
 	return count;
 }
 
-void sna_mode_delete_fb(struct sna *sna, uint32_t fb)
-{
-	if (fb)
-		drmModeRmFB(sna->kgem.fd, fb);
-}
-
 static const xf86CrtcConfigFuncsRec sna_crtc_config_funcs = {
 	sna_crtc_resize
 };
@@ -1932,11 +1911,8 @@ sna_mode_remove_fb(struct sna *sna)
 	DBG(("%s: deleting fb id %d for pixmap serial %d\n",
 	     __FUNCTION__, mode->fb_id,mode->fb_pixmap));
 
-	if (mode->fb_id) {
-		drmModeRmFB(sna->kgem.fd, mode->fb_id);
-		mode->fb_id = 0;
-		mode->fb_pixmap = 0;
-	}
+	mode->fb_id = 0;
+	mode->fb_pixmap = 0;
 }
 
 void
@@ -1957,7 +1933,6 @@ sna_mode_fini(struct sna *sna)
 #endif
 
 	sna_mode_remove_fb(sna);
-	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(sna->front));
 
 	/* mode->shadow_fb_id should have been destroyed already */
 }
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index d5eefcb..32602d5 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -336,7 +336,6 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 		}
 
 		private->bo->flush = 0;
-		kgem_bo_clear_scanout(&sna->kgem, private->bo); /* paranoia */
 		kgem_bo_destroy(&sna->kgem, private->bo);
 
 		free(buffer);
@@ -392,7 +391,6 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	sna_damage_destroy(&priv->cpu_damage);
 	priv->undamaged = false;
 
-	kgem_bo_clear_scanout(&sna->kgem, priv->gpu_bo); /* paranoia */
 	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 	priv->gpu_bo = ref(bo);
 }
@@ -860,18 +858,10 @@ sna_dri_add_frame_event(struct sna_dri_frame_event *info)
 static void
 sna_dri_frame_event_release_bo(struct kgem *kgem, struct kgem_bo *bo)
 {
-	kgem_bo_clear_scanout(kgem, bo);
 	kgem_bo_destroy(kgem, bo);
 }
 
 static void
-sna_dri_frame_event_finish(struct sna_dri_frame_event *info)
-{
-	sna_mode_delete_fb(info->sna, info->old_fb);
-	kgem_bo_clear_scanout(&info->sna->kgem, info->old_front.bo);
-}
-
-static void
 sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 {
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
@@ -1177,13 +1167,10 @@ static void sna_dri_flip_event(struct sna *sna,
 					 flip->event_data);
 		}
 
-		sna_dri_frame_event_finish(flip);
 		sna_dri_frame_event_info_free(flip);
 		break;
 
 	case DRI2_FLIP_THROTTLE:
-		sna_dri_frame_event_finish(flip);
-
 		assert(sna->dri.flip_pending[flip->pipe] == flip);
 		sna->dri.flip_pending[flip->pipe] = NULL;
 
@@ -1221,8 +1208,6 @@ static void sna_dri_flip_event(struct sna *sna,
 		     flip->front->name != flip->old_front.name));
 		assert(sna->dri.flip_pending[flip->pipe] == flip);
 
-		sna_dri_frame_event_finish(flip);
-
 		if (flip->front->name != flip->next_front.name) {
 			DBG(("%s: async flip continuing\n", __FUNCTION__));
 
@@ -1801,7 +1786,6 @@ blit:
 		}
 		info->front->name = info->back->name;
 		get_private(info->front)->bo = get_private(info->back)->bo;
-		__kgem_flush(&sna->kgem, get_private(info->back)->bo);
 	}
 
 	if (bo == NULL) {
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 7b3cfce..150e973 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -784,9 +784,6 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 
 	sna_mode_remove_fb(sna);
 	if (sna->front) {
-		struct kgem_bo *bo = sna_pixmap_get_bo(sna->front);
-		if (bo)
-			kgem_bo_clear_scanout(&sna->kgem, bo); /* valgrind */
 		screen->DestroyPixmap(sna->front);
 		sna->front = NULL;
 	}
commit 29d035279b2fe98d5ba9cf01125faea34d36fb76
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri May 4 09:11:31 2012 +0100

    sna/dri: pageflip unref debugging
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 956a038..5272a08 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -130,6 +130,8 @@ struct sna_pixmap {
 	uint32_t stride;
 	uint32_t clear_color;
 
+	uint32_t flush;
+
 #define SOURCE_BIAS 4
 	uint16_t source_count;
 	uint8_t pinned :1;
@@ -138,7 +140,6 @@ struct sna_pixmap {
 	uint8_t undamaged :1;
 	uint8_t create :3;
 	uint8_t header :1;
-	uint8_t flush :1;
 };
 
 struct sna_glyph {
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 1ad2ff1..d5eefcb 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -163,7 +163,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	if (priv == NULL)
 		return NULL;
 
-	if (priv->flush)
+	if (priv->flush++)
 		return priv->gpu_bo;
 
 	tiling = color_tiling(sna, &pixmap->drawable);
@@ -181,7 +181,6 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
-	priv->flush = true;
 
 	return priv->gpu_bo;
 }
@@ -317,17 +316,21 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 	if (buffer == NULL)
 		return;
 
+	DBG(("%s: %p [handle=%d] -- refcnt=%d, pixmap=%d\n",
+	     __FUNCTION__, buffer, private->bo->handle, private->refcnt,
+	     private->pixmap ? private->pixmap->drawable.serialNumber : 0));
+
 	if (--private->refcnt == 0) {
 		if (private->pixmap) {
 			ScreenPtr screen = private->pixmap->drawable.pScreen;
 			struct sna_pixmap *priv = sna_pixmap(private->pixmap);
 
 			/* Undo the DRI markings on this pixmap */
-			assert(priv->flush);
-			list_del(&priv->list);
-			sna_accel_watch_flush(sna, -1);
-			priv->pinned = private->pixmap == sna->front;
-			priv->flush = false;
+			if (priv->flush && --priv->flush == 0) {
+				list_del(&priv->list);
+				sna_accel_watch_flush(sna, -1);
+				priv->pinned = private->pixmap == sna->front;
+			}
 
 			screen->DestroyPixmap(private->pixmap);
 		}
@@ -1238,6 +1241,8 @@ static void sna_dri_flip_event(struct sna *sna,
 			flip->next_front.name = flip->front->name;
 			flip->off_delay = 5;
 		} else if (--flip->off_delay) {
+			DBG(("%s: queuing no-flip [delay=%d]\n",
+			     __FUNCTION__, flip->off_delay));
 			/* Just queue a no-op flip to trigger another event */
 			flip->count = sna_page_flip(sna,
 						    get_private(flip->front)->bo,
@@ -1765,6 +1770,13 @@ blit:
 			goto blit;
 		}
 
+		DBG(("%s: referencing (%p:%d, %p:%d)\n",
+		     __FUNCTION__,
+		     front, get_private(front)->refcnt,
+		     back, get_private(back)->refcnt));
+		sna_dri_reference_buffer(front);
+		sna_dri_reference_buffer(back);
+
 		if (!sna_dri_page_flip(sna, info)) {
 			sna_dri_frame_event_info_free(info);
 			goto blit;
@@ -1773,9 +1785,6 @@ blit:
 		info->next_front.name = info->front->name;
 		info->next_front.bo = get_private(info->front)->bo;
 		info->off_delay = 5;
-
-		sna_dri_reference_buffer(front);
-		sna_dri_reference_buffer(back);
 	} else if (info->type != DRI2_ASYNC_FLIP) {
 		/* A normal vsync'ed client is finishing, wait for it
 		 * to unpin the old framebuffer before taking over.
commit 079b491ced2c9c3c73d938ef6025d040016ad3a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 23:52:17 2012 +0100

    sna: Ensure drawables are clipped against the pixmap before migration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 424738a..956a038 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -446,16 +446,7 @@ bool must_check sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 						RegionPtr region,
 						unsigned flags);
 
-static inline bool must_check
-sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
-{
-	RegionRec region;
-
-	pixman_region_init_rect(&region,
-				drawable->x, drawable->y,
-				drawable->width, drawable->height);
-	return sna_drawable_move_region_to_cpu(drawable, &region, flags);
-}
+bool must_check sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags);
 
 static inline bool must_check
 sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6430de8..1bc34dc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1697,6 +1697,43 @@ out:
 	return true;
 }
 
+bool
+sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
+{
+	RegionRec region;
+	PixmapPtr pixmap;
+	int16_t dx, dy;
+
+	if (drawable->type == DRAWABLE_PIXMAP)
+		return sna_pixmap_move_to_cpu((PixmapPtr)drawable, flags);
+
+	pixmap = get_window_pixmap((WindowPtr)drawable);
+	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+
+	DBG(("%s: (%d, %d)x(%d, %d) + (%d, %d), flags=%x\n",
+	     __FUNCTION__,
+	     drawable->x, drawable->y,
+	     drawable->width, drawable->height,
+	     dx, dy, flags));
+
+	region.extents.x1 = drawable->x + dx;
+	region.extents.y1 = drawable->y + dy;
+	region.extents.x2 = region.extents.x1 + drawable->width;
+	region.extents.y2 = region.extents.y1 + drawable->height;
+	region.data = NULL;
+
+	if (region.extents.x1 < 0)
+		region.extents.x1 = 0;
+	if (region.extents.y1 < 0)
+		region.extents.y1 = 0;
+	if (region.extents.x2 > pixmap->drawable.width)
+		region.extents.x2 = pixmap->drawable.width;
+	if (region.extents.y2 > pixmap->drawable.height)
+		region.extents.y2 = pixmap->drawable.height;
+
+	return sna_drawable_move_region_to_cpu(&pixmap->drawable, &region, flags);
+}
+
 static bool alu_overwrites(uint8_t alu)
 {
 	switch (alu) {
commit 1a5f8599b150064339d62a97c58026e62b49ff27
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 23:21:48 2012 +0100

    sna: Compile fix for fresh assertion
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a116936..666a23f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4076,7 +4076,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	assert(_bo->proxy);
 
 	_bo = _bo->proxy;
-	assert(bo->proxy == NULL);
+	assert(_bo->proxy == NULL);
 	assert(_bo->exec == NULL);
 
 	bo = (struct kgem_partial_bo *)_bo;
commit 61cac5c265279d45677262216a0ba56f548cd898
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 22:33:59 2012 +0100

    sna: Maintain a reference to the chain of proxies
    
    Rather than attempt to flatten the chain to the last link, we may need
    to hold a reference to the intermediate links in case of batch buffer
    submission.
    
    Fixes http://tnsp.org/~ccr/intel-gfx/test.html
    
    Reported-by: Matti Hamalainen <ccr at tnsp.org>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=49436
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ed2eaf1..680d36f 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2381,9 +2381,14 @@ gen3_composite_picture(struct sna *sna,
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
 						  x, y, w, h, dst_x, dst_y);
 
-	if (too_large(pixmap->drawable.width, pixmap->drawable.height))
+	if (too_large(pixmap->drawable.width, pixmap->drawable.height)) {
+		DBG(("%s: pixmap too large (%dx%d), extracting (%d, %d)x(%d,%d)\n",
+		     __FUNCTION__,
+		     pixmap->drawable.width, pixmap->drawable.height,
+		     x, y, w, h));
 		return sna_render_picture_extract(sna, picture, channel,
 						  x, y, w, h, dst_x, dst_y);
+	}
 
 	return sna_render_pixmap_bo(sna, channel, pixmap,
 				    x, y, w, h, dst_x, dst_y);
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 49fa173..a116936 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2960,7 +2960,7 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 		if (bo->exec)
 			continue;
 
-		if (bo->proxy) {
+		while (bo->proxy) {
 			bo = bo->proxy;
 			if (bo->exec)
 				continue;
@@ -2989,7 +2989,7 @@ bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
 {
 	uint32_t size;
 
-	if (bo->proxy)
+	while (bo->proxy)
 		bo = bo->proxy;
 	if (bo->exec) {
 		if (kgem->gen < 40 &&
@@ -3034,7 +3034,7 @@ bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
-		if (bo->proxy)
+		while (bo->proxy)
 			bo = bo->proxy;
 		if (bo->exec) {
 			if (kgem->gen >= 40 || bo->tiling == I915_TILING_NONE)
@@ -3098,10 +3098,10 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 		assert(bo->refcnt);
 		assert(!bo->purged);
 
-		delta += bo->delta;
-		if (bo->proxy) {
-			DBG(("%s: adding proxy for handle=%d\n",
-			     __FUNCTION__, bo->handle));
+		while (bo->proxy) {
+			DBG(("%s: adding proxy [delta=%d] for handle=%d\n",
+			     __FUNCTION__, bo->delta, bo->handle));
+			delta += bo->delta;
 			assert(bo->handle == bo->proxy->handle);
 			/* need to release the cache upon batch submit */
 			list_move(&bo->request, &kgem->next_request->buffers);
@@ -3527,8 +3527,9 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 {
 	struct kgem_bo *bo;
 
-	DBG(("%s: target handle=%d, offset=%d, length=%d, io=%d\n",
-	     __FUNCTION__, target->handle, offset, length, target->io));
+	DBG(("%s: target handle=%d [proxy? %d], offset=%d, length=%d, io=%d\n",
+	     __FUNCTION__, target->handle, target->proxy ? target->proxy->delta : -1,
+	     offset, length, target->io));
 
 	bo = __kgem_bo_alloc(target->handle, length);
 	if (bo == NULL)
@@ -3537,15 +3538,11 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	bo->reusable = false;
 	bo->size.bytes = length;
 
-	bo->io = target->io;
+	bo->io = target->io && target->proxy == NULL;
 	bo->dirty = target->dirty;
 	bo->tiling = target->tiling;
 	bo->pitch = target->pitch;
 
-	if (target->proxy) {
-		offset += target->delta;
-		target = target->proxy;
-	}
 	bo->proxy = kgem_bo_reference(target);
 	bo->delta = offset;
 	return bo;
@@ -4079,6 +4076,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	assert(_bo->proxy);
 
 	_bo = _bo->proxy;
+	assert(bo->proxy == NULL);
 	assert(_bo->exec == NULL);
 
 	bo = (struct kgem_partial_bo *)_bo;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3072345..6430de8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2138,7 +2138,8 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	int bpp = BitsPerPixel(depth);
 	void *ptr;
 
-	DBG(("%s(%d, %d, %d)\n", __FUNCTION__, width, height, depth));
+	DBG(("%s(%d, %d, %d, flags=%x)\n", __FUNCTION__,
+	     width, height, depth, flags));
 	assert(width);
 	assert(height);
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 3d0f9e9..dee6608 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -957,6 +957,10 @@ sna_render_picture_partial(struct sna *sna,
 		kgem_get_tile_size(&sna->kgem, bo->tiling,
 				   &tile_width, &tile_height, &tile_size);
 
+		DBG(("%s: tiling=%d, size=%dx%d, chunk=%d\n",
+		     __FUNCTION__, bo->tiling,
+		     tile_width, tile_height, tile_size));
+
 		/* Ensure we align to an even tile row */
 		box.y1 = box.y1 & ~(2*tile_height - 1);
 		box.y2 = ALIGN(box.y2, 2*tile_height);
@@ -989,8 +993,6 @@ sna_render_picture_partial(struct sna *sna,
 	if (channel->bo == NULL)
 		return 0;
 
-	channel->bo->pitch = bo->pitch;
-
 	if (channel->transform) {
 		memset(&channel->embedded_transform,
 		       0,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index d0e5afc..d7e6d40 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -188,6 +188,14 @@ sna_tiling_composite_done(struct sna *sna,
 					if (y2 > y + height)
 						y2 = y + height;
 
+					DBG(("%s: rect[%d] = (%d, %d)x(%d,%d), tile=(%d,%d)x(%d, %d), blt=(%d,%d),(%d,%d), delta=(%d,%d)\n",
+					     __FUNCTION__, n,
+					     r->dst.x, r->dst.y,
+					     r->width, r->height,
+					     x, y, width, height,
+					     x1, y1, x2, y2,
+					     dx, dy));
+
 					if (y2 > y1 && x2 > x1) {
 						struct sna_composite_rectangles rr;
 						rr.src.x = dx + r->src.x;
commit dea5d429f7a52dfc945b17a57ef79744cc796b0e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 17:35:24 2012 +0100

    sna: Remove extraneous SCANOUT flags
    
    These are now fixed by obeying a minimum alignment restriction.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index f239555..3d0f9e9 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1846,7 +1846,7 @@ sna_render_composite_redirect(struct sna *sna,
 			    width, height, bpp,
 			    kgem_choose_tiling(&sna->kgem, I915_TILING_X,
 					       width, height, bpp),
-			    CREATE_SCANOUT | CREATE_TEMPORARY);
+			    CREATE_TEMPORARY);
 	if (!bo)
 		return FALSE;
 
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index dcb4d1d..d0e5afc 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -384,7 +384,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 							       tmp.drawable.width,
 							       tmp.drawable.height,
 							       dst->drawable.bitsPerPixel),
-					    CREATE_SCANOUT | CREATE_TEMPORARY);
+					    CREATE_TEMPORARY);
 			if (bo) {
 				int16_t dx = this.extents.x1;
 				int16_t dy = this.extents.y1;
commit 19fd24a4db994bb5c5ce4a73f06d9394a758ea91
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 17:35:10 2012 +0100

    sna: Fix offset for combining damage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index fb661b2..1196912 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -36,7 +36,8 @@ static inline void sna_damage_combine(struct sna_damage **l,
 				      struct sna_damage *r,
 				      int dx, int dy)
 {
-	*l = _sna_damage_combine(*l, r, dx, dy);
+	assert(!DAMAGE_IS_ALL(*l));
+	*l = _sna_damage_combine(*l, DAMAGE_PTR(r), dx, dy);
 }
 
 fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index ec2aaad..f239555 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1869,6 +1869,7 @@ sna_render_composite_redirect(struct sna *sna,
 	t->real_bo = op->dst.bo;
 	t->real_damage = op->damage;
 	if (op->damage) {
+		assert(!DAMAGE_IS_ALL(op->damage));
 		t->damage = sna_damage_create();
 		op->damage = &t->damage;
 	}
@@ -1899,8 +1900,10 @@ sna_render_composite_redirect_done(struct sna *sna,
 					   &t->box, 1);
 		}
 		if (t->damage) {
+			DBG(("%s: combining damage, offset=(%d, %d)\n",
+			     __FUNCTION__, t->box.x1, t->box.y1));
 			sna_damage_combine(t->real_damage, t->damage,
-					   -t->box.x1, -t->box.y1);
+					   t->box.x1, t->box.y1);
 			__sna_damage_destroy(t->damage);
 		}
 
commit 1376c81dbf3b789e04e6804df1b1fd32bcb2bd1d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 17:34:28 2012 +0100

    sna: Debug option to force particular upload/download paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 7cec3ff..3de164b 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -42,6 +42,8 @@
 
 #define PITCH(x, y) ALIGN((x)*(y), 4)
 
+#define FORCE_INPLACE 0
+
 /* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
 
 static Bool
@@ -109,6 +111,15 @@ static void read_boxes_inplace(struct kgem *kgem,
 	} while (--n);
 }
 
+static bool download_inplace(struct kgem *kgem, struct kgem_bo *bo)
+{
+	if (FORCE_INPLACE)
+		return FORCE_INPLACE > 0;
+
+	return !kgem_bo_map_will_stall(kgem, bo) ||
+		bo->tiling == I915_TILING_NONE;
+}
+
 void sna_read_boxes(struct sna *sna,
 		    struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		    PixmapPtr dst, int16_t dst_dx, int16_t dst_dy,
@@ -150,8 +161,7 @@ void sna_read_boxes(struct sna *sna,
 	 * this path.
 	 */
 
-	if (!kgem_bo_map_will_stall(kgem, src_bo) ||
-	    src_bo->tiling == I915_TILING_NONE) {
+	if (download_inplace(kgem, src_bo)) {
 fallback:
 		read_boxes_inplace(kgem,
 				   src_bo, src_dx, src_dy,
@@ -512,6 +522,9 @@ static bool upload_inplace(struct kgem *kgem,
 			   const BoxRec *box,
 			   int n, int bpp)
 {
+	if (FORCE_INPLACE)
+		return FORCE_INPLACE > 0;
+
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
 	 * operation.
commit f0d464d6b18855616fc43d9a25aa6853f86c8e2b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 16:17:35 2012 +0100

    sna/dri: Balance flush counting
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 308e329..424738a 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -129,7 +129,6 @@ struct sna_pixmap {
 
 	uint32_t stride;
 	uint32_t clear_color;
-	unsigned flush;
 
 #define SOURCE_BIAS 4
 	uint16_t source_count;
@@ -139,6 +138,7 @@ struct sna_pixmap {
 	uint8_t undamaged :1;
 	uint8_t create :3;
 	uint8_t header :1;
+	uint8_t flush :1;
 };
 
 struct sna_glyph {
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 1a7b6bd..1ad2ff1 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -163,7 +163,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	if (priv == NULL)
 		return NULL;
 
-	if (priv->flush++)
+	if (priv->flush)
 		return priv->gpu_bo;
 
 	tiling = color_tiling(sna, &pixmap->drawable);
@@ -181,6 +181,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
+	priv->flush = true;
 
 	return priv->gpu_bo;
 }
@@ -322,12 +323,11 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 			struct sna_pixmap *priv = sna_pixmap(private->pixmap);
 
 			/* Undo the DRI markings on this pixmap */
-			assert(priv->flush > 0);
-			if (--priv->flush == 0) {
-				list_del(&priv->list);
-				sna_accel_watch_flush(sna, -1);
-				priv->pinned = private->pixmap == sna->front;
-			}
+			assert(priv->flush);
+			list_del(&priv->list);
+			sna_accel_watch_flush(sna, -1);
+			priv->pinned = private->pixmap == sna->front;
+			priv->flush = false;
 
 			screen->DestroyPixmap(private->pixmap);
 		}
@@ -649,13 +649,13 @@ sna_dri_copy_region(DrawablePtr draw,
 		     struct kgem_bo *, struct kgem_bo *, bool) = sna_dri_copy;
 
 	if (dst_buffer->attachment == DRI2BufferFrontLeft) {
-		dst = sna_pixmap_set_dri(sna, pixmap);
+		dst = sna_pixmap_get_bo(pixmap);
 		copy = sna_dri_copy_to_front;
 	} else
 		dst = get_private(dst_buffer)->bo;
 
 	if (src_buffer->attachment == DRI2BufferFrontLeft) {
-		src = sna_pixmap_set_dri(sna, pixmap);
+		src = sna_pixmap_get_bo(pixmap);
 		assert(copy == sna_dri_copy);
 		copy = sna_dri_copy_from_front;
 	} else
commit d47e98dd64c0b9fe2979db42622c5ee8168e8b35
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 14:54:17 2012 +0100

    sna: Minor glyph fallback fixes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index f4f115e..07b2a94 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1136,6 +1136,8 @@ glyphs_fallback(CARD8 op,
 		mask_image = dst_image;
 		src_x -= x;
 		src_y -= y;
+		x += dst->pDrawable->x;
+		y += dst->pDrawable->y;
 	}
 
 	do {
@@ -1152,7 +1154,7 @@ glyphs_fallback(CARD8 op,
 			glyph_image = sna_glyph(g)->image;
 			if (glyph_image == NULL) {
 				PicturePtr picture;
-				int dx, dy;
+				int gx, gy;
 
 				picture = GlyphPicture(g)[screen];
 				if (picture == NULL)
@@ -1160,11 +1162,11 @@ glyphs_fallback(CARD8 op,
 
 				glyph_image = image_from_pict(picture,
 							      FALSE,
-							      &dx, &dy);
+							      &gx, &gy);
 				if (!glyph_image)
 					goto next_glyph;
 
-				assert(dx == 0 && dy == 0);
+				assert(gx == 0 && gy == 0);
 				sna_glyph(g)->image = glyph_image;
 			}
 
@@ -1191,11 +1193,15 @@ glyphs_fallback(CARD8 op,
 				int xi = x - g->info.x;
 				int yi = y - g->info.y;
 
-				DBG(("%s: glyph+(%d, %d) to dst (%d, %d)x(%d, %d), src (%d, %d) [op=%d]\n",
+				DBG(("%s: glyph+(%d, %d) to dst (%d, %d)x(%d, %d)/[(%d, %d)x(%d, %d)], src (%d, %d) [op=%d]\n",
 				     __FUNCTION__,
 				     dx, dy,
 				     xi, yi,
 				     g->info.width, g->info.height,
+				     dst->pDrawable->x,
+				     dst->pDrawable->y,
+				     dst->pDrawable->width,
+				     dst->pDrawable->height,
 				     src_x + xi,
 				     src_y + yi,
 				     op));
@@ -1261,7 +1267,7 @@ sna_glyphs(CARD8 op,
 	if (REGION_NUM_RECTS(dst->pCompositeClip) == 0)
 		return;
 
-	if (FALLBACK || DEBUG_NO_RENDER)
+	if (FALLBACK || !sna->have_render)
 		goto fallback;
 
 	if (wedged(sna)) {
commit a1f08b8850616952fb0babe2275eb36b13a380ec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 14:53:29 2012 +0100

    sna: Don't discard GPU buffer if we only want to read back for the operation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c84b23e..3072345 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1315,9 +1315,11 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
 		sna_damage_destroy(&priv->gpu_damage);
-		sna_pixmap_free_gpu(sna, priv);
 		priv->undamaged = false;
 
+		if (flags & MOVE_WRITE)
+			sna_pixmap_free_gpu(sna, priv);
+
 		if (pixmap->devPrivate.ptr == NULL &&
 		    !sna_pixmap_alloc_cpu(sna, pixmap, priv, false))
 			return false;
@@ -1458,8 +1460,10 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL))
 		return false;
 
-	if (priv->gpu_bo == NULL)
+	if (priv->gpu_bo == NULL) {
+		assert(priv->gpu_damage == NULL);
 		goto done;
+	}
 
 	assert(priv->gpu_bo->proxy == NULL);
 	if (priv->clear) {
commit fcccc5528b8696fb4f9b3f9f528673b95d98a907
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu May 3 11:27:44 2012 +0100

    sna: Improve handling of inplace IO for large transfers
    
    If the transfer is large enough to obliterate the caches, then it is
    preferrable to do it inplace rather than upload a proxy texture and
    queue a blit. This helps prevent an inconsistency where one layer
    believes the operation should be done inplace only for the IO layer to
    perform an indirect upload.
    
    Testing show no significant impact upon the cairo-traces, but it does
    prevent x11perf -shmput from exploding.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 51025ea..9eac68e 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -462,7 +462,7 @@ static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
 	     __FUNCTION__, bo->handle,
 	     bo->domain, bo->presumed_offset, bo->size));
 
-	if (!kgem_bo_is_mappable(kgem, bo))
+	if (!kgem_bo_is_mappable(kgem, bo) && kgem_bo_is_busy(bo))
 		return true;
 
 	if (kgem->wedged)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 2539518..7cec3ff 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -516,17 +516,16 @@ static bool upload_inplace(struct kgem *kgem,
 	 * able to almagamate a series of small writes into a single
 	 * operation.
 	 */
-	if (!bo->map) {
+	if (!bo->map || kgem_bo_map_will_stall(kgem, bo)) {
 		unsigned int bytes = 0;
 		while (n--) {
 			bytes += (box->x2 - box->x1) * (box->y2 - box->y1);
 			box++;
 		}
-		if (bytes * bpp >> 12 < kgem->half_cpu_cache_pages)
-			return false;
+		return bytes * bpp >> 12 >= kgem->half_cpu_cache_pages;
 	}
 
-	return !kgem_bo_map_will_stall(kgem, bo);
+	return true;
 }
 
 bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
@@ -570,7 +569,9 @@ fallback:
 	}
 
 	/* Try to avoid switching rings... */
-	if (!can_blt || kgem->ring == KGEM_RENDER ||
+	if (!can_blt ||
+	    kgem->ring == KGEM_RENDER ||
+	    (kgem->has_semaphores && kgem->mode == KGEM_NONE) ||
 	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
commit 53568e8e49559094ce5b24b8709669f1f76fe2bf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 30 09:24:06 2012 +0100

    sna/gen7: Fix debug printing of primitives
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 327714f..6c9a833 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -898,7 +898,7 @@ gen7_emit_vertex_elements(struct sna *sna,
 
 	if (op->is_affine) {
 		src_format = GEN7_SURFACEFORMAT_R32G32_FLOAT;
-		w_component = GEN7_VFCOMPONENT_STORE_1_FLT;
+		w_component = GEN7_VFCOMPONENT_STORE_0;
 	} else {
 		src_format = GEN7_SURFACEFORMAT_R32G32B32_FLOAT;
 		w_component = GEN7_VFCOMPONENT_STORE_SRC;
@@ -930,7 +930,7 @@ gen7_emit_vertex_elements(struct sna *sna,
 		  0 << GEN7_VE0_OFFSET_SHIFT); /* offsets vb in bytes */
 	OUT_BATCH(GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_0_SHIFT |
 		  GEN7_VFCOMPONENT_STORE_SRC << GEN7_VE1_VFCOMPONENT_1_SHIFT |
-		  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_2_SHIFT |
+		  GEN7_VFCOMPONENT_STORE_0 << GEN7_VE1_VFCOMPONENT_2_SHIFT |
 		  GEN7_VFCOMPONENT_STORE_1_FLT << GEN7_VE1_VFCOMPONENT_3_SHIFT);
 
 	/* u0, v0, w0 */
diff --git a/src/sna/kgem_debug_gen7.c b/src/sna/kgem_debug_gen7.c
index a7dbbf2..78eae01 100644
--- a/src/sna/kgem_debug_gen7.c
+++ b/src/sna/kgem_debug_gen7.c
@@ -287,8 +287,8 @@ static void primitive_out(struct kgem *kgem, uint32_t *data)
 
 	assert((data[0] & (1<<15)) == 0); /* XXX index buffers */
 
-	for (n = 0; n < data[1]; n++) {
-		int v = data[2] + n;
+	for (n = 0; n < data[2]; n++) {
+		int v = data[3] + n;
 		ErrorF("	[%d:%d] = ", n, v);
 		indirect_vertex_out(kgem, v);
 		ErrorF("\n");
@@ -358,7 +358,7 @@ get_965_depthformat(unsigned int depthformat)
 }
 
 static const char *
-get_965_element_component(uint32_t data, int component)
+get_element_component(uint32_t data, int component)
 {
 	uint32_t component_control = (data >> (16 + (3 - component) * 4)) & 0x7;
 
@@ -387,9 +387,9 @@ get_965_element_component(uint32_t data, int component)
 }
 
 static const char *
-get_965_prim_type(uint32_t data)
+get_prim_type(uint32_t data)
 {
-	uint32_t primtype = (data >> 10) & 0x1f;
+	uint32_t primtype = data & 0x1f;
 
 	switch (primtype) {
 	case 0x01: return "point list";
@@ -656,10 +656,10 @@ int kgem_gen7_decode_3d(struct kgem *kgem, uint32_t offset)
 			i++;
 			kgem_debug_print(data, offset, i, "(%s, %s, %s, %s), "
 				  "dst offset 0x%02x bytes\n",
-				  get_965_element_component(data[i], 0),
-				  get_965_element_component(data[i], 1),
-				  get_965_element_component(data[i], 2),
-				  get_965_element_component(data[i], 3),
+				  get_element_component(data[i], 0),
+				  get_element_component(data[i], 1),
+				  get_element_component(data[i], 2),
+				  get_element_component(data[i], 3),
 				  (data[i] & 0xff) * 4);
 			i++;
 		}
@@ -673,16 +673,16 @@ int kgem_gen7_decode_3d(struct kgem *kgem, uint32_t offset)
 		return len;
 
 	case 0x7b00:
-		assert(len == 6);
-		kgem_debug_print(data, offset, 0,
-			  "3DPRIMITIVE: %s %s\n",
-			  get_965_prim_type(data[0]),
-			  (data[0] & (1 << 15)) ? "random" : "sequential");
-		kgem_debug_print(data, offset, 1, "vertex count\n");
-		kgem_debug_print(data, offset, 2, "start vertex\n");
-		kgem_debug_print(data, offset, 3, "instance count\n");
-		kgem_debug_print(data, offset, 4, "start instance\n");
-		kgem_debug_print(data, offset, 5, "index bias\n");
+		assert(len == 7);
+		kgem_debug_print(data, offset, 0, "3DPRIMITIVE\n");
+		kgem_debug_print(data, offset, 1, "type %s, %s\n",
+			  get_prim_type(data[1]),
+			  (data[1] & (1 << 15)) ? "random" : "sequential");
+		kgem_debug_print(data, offset, 2, "vertex count\n");
+		kgem_debug_print(data, offset, 3, "start vertex\n");
+		kgem_debug_print(data, offset, 4, "instance count\n");
+		kgem_debug_print(data, offset, 5, "start instance\n");
+		kgem_debug_print(data, offset, 6, "index bias\n");
 		primitive_out(kgem, data);
 		return len;
 	}
commit 01c26a44fdce761781908be11102e7a6a3db523c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 2 21:09:27 2012 +0100

    sna: Avoid reducing damage for synchronisation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8b52a40..c84b23e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1523,9 +1523,10 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		priv->undamaged = true;
 	}
 
-	if (sna_damage_contains_box(priv->gpu_damage,
-				    &region->extents) != PIXMAN_REGION_OUT) {
-		DBG(("%s: region (%dx%d) intersects gpu damage\n",
+	if (priv->gpu_damage &&
+	    (DAMAGE_IS_ALL(priv->gpu_damage) ||
+	     sna_damage_overlaps_box(priv->gpu_damage, &region->extents))) {
+		DBG(("%s: region (%dx%d) overlaps gpu damage\n",
 		     __FUNCTION__,
 		     region->extents.x2 - region->extents.x1,
 		     region->extents.y2 - region->extents.y1));
@@ -1538,9 +1539,18 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				       priv->gpu_bo, 0, 0,
 				       pixmap, 0, 0,
 				       &region->extents, 1);
-		} else {
+			goto done;
+		}
+
+		if (sna_damage_contains_box(priv->gpu_damage,
+					    &region->extents) != PIXMAN_REGION_OUT) {
 			RegionRec want, *r = region;
 
+			DBG(("%s: region (%dx%d) intersects gpu damage\n",
+			     __FUNCTION__,
+			     region->extents.x2 - region->extents.x1,
+			     region->extents.y2 - region->extents.y1));
+
 			/* Expand the region to move 32x32 pixel blocks at a
 			 * time, as we assume that we will continue writing
 			 * afterwards and so aim to coallesce subsequent
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index f2d682d..b97edbe 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -984,20 +984,6 @@ static bool box_contains(const BoxRec *a, const BoxRec *b)
 	return true;
 }
 
-static inline Bool sna_damage_maybe_contains_box(const struct sna_damage *damage,
-						 const BoxRec *box)
-{
-	if (box->x2 <= damage->extents.x1 ||
-	    box->x1 >= damage->extents.x2)
-		return FALSE;
-
-	if (box->y2 <= damage->extents.y1 ||
-	    box->y1 >= damage->extents.y2)
-		return FALSE;
-
-	return TRUE;
-}
-
 static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 						RegionPtr region)
 {
@@ -1011,7 +997,7 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 
 	assert(RegionNotEmpty(region));
 
-	if (!sna_damage_maybe_contains_box(damage, &region->extents))
+	if (!sna_damage_overlaps_box(damage, &region->extents))
 		return damage;
 
 
@@ -1096,7 +1082,7 @@ inline static struct sna_damage *__sna_damage_subtract_box(struct sna_damage *da
 		return NULL;
 	}
 
-	if (!sna_damage_maybe_contains_box(damage, box))
+	if (!sna_damage_overlaps_box(damage, box))
 		return damage;
 
 	if (box_contains(box, &damage->extents)) {
@@ -1189,7 +1175,7 @@ static struct sna_damage *__sna_damage_subtract_boxes(struct sna_damage *damage,
 	extents.y1 += dy;
 	extents.y2 += dy;
 
-	if (!sna_damage_maybe_contains_box(damage, &extents))
+	if (!sna_damage_overlaps_box(damage, &extents))
 		return damage;
 
 	if (n == 1)
@@ -1248,7 +1234,7 @@ static int __sna_damage_contains_box(struct sna_damage *damage,
 	if (damage->mode == DAMAGE_ALL)
 		return PIXMAN_REGION_IN;
 
-	if (!sna_damage_maybe_contains_box(damage, box))
+	if (!sna_damage_overlaps_box(damage, box))
 		return PIXMAN_REGION_OUT;
 
 	ret = pixman_region_contains_rectangle(&damage->region, (BoxPtr)box);
@@ -1297,7 +1283,7 @@ bool _sna_damage_contains_box__no_reduce(const struct sna_damage *damage,
 	if (damage->mode == DAMAGE_SUBTRACT)
 		return false;
 
-	if (!sna_damage_maybe_contains_box(damage, box))
+	if (!sna_damage_overlaps_box(damage, box))
 		return false;
 
 	return pixman_region_contains_rectangle((RegionPtr)&damage->region,
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 422a124..fb661b2 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -190,6 +190,21 @@ static inline Bool sna_damage_intersect(struct sna_damage *damage,
 	return _sna_damage_intersect(damage, region, result);
 }
 
+static inline bool
+sna_damage_overlaps_box(const struct sna_damage *damage,
+			const BoxRec *box)
+{
+	if (box->x2 <= damage->extents.x1 ||
+	    box->x1 >= damage->extents.x2)
+		return FALSE;
+
+	if (box->y2 <= damage->extents.y1 ||
+	    box->y1 >= damage->extents.y2)
+		return FALSE;
+
+	return TRUE;
+}
+
 int _sna_damage_contains_box(struct sna_damage *damage,
 			     const BoxRec *box);
 static inline int sna_damage_contains_box(struct sna_damage *damage,
commit f4c34e9ab32f31669896b8f626195827a85af337
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 2 19:09:10 2012 +0100

    sna: Bring back the minimum alignment for G33
    
    The underlying cause is still not fixed. It should be possible to use
    the much laxer alignment for single-stream linear. Still no idea how I
    fail to convince the GPU to drop the depth buffer.
    
    Reported-by: Matti Hamalainen <ccr at tnsp.org>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=49391
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d519ed6..49fa173 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -600,6 +600,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		/* 865g cannot handle a batch spanning multiple pages */
 		kgem->max_batch_size = PAGE_SIZE / sizeof(uint32_t);
 
+	kgem->min_alignment = 4;
+	if (gen < 40)
+		kgem->min_alignment = 64;
+
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
 
 	list_init(&kgem->active_partials);
@@ -770,8 +774,8 @@ static uint32_t kgem_untiled_pitch(struct kgem *kgem,
 				   uint32_t width, uint32_t bpp,
 				   bool scanout)
 {
-	width = ALIGN(width, 4) * bpp >> 3;
-	return ALIGN(width, scanout ? 64 : 4);
+	width = ALIGN(width, 2) * bpp >> 3;
+	return ALIGN(width, scanout ? 64 : kgem->min_alignment);
 }
 
 void kgem_get_tile_size(struct kgem *kgem, int tiling,
@@ -832,13 +836,17 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 			tile_width = 512;
 			tile_height = kgem->gen < 30 ? 16 : 8;
 		} else {
-			tile_width = scanout ? 64 : 4 * bpp >> 3;
-			tile_height = 4;
+			tile_width = 2 * bpp >> 3;
+			tile_width = ALIGN(tile_width,
+					   scanout ? 64 : kgem->min_alignment);
+			tile_height = 2;
 		}
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		tile_width = scanout ? 64 : 4 * bpp >> 3;
+		tile_width = 2 * bpp >> 3;
+		tile_width = ALIGN(tile_width,
+				   scanout ? 64 : kgem->min_alignment);
 		tile_height = 2;
 		break;
 	case I915_TILING_X:
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index ad2fe84..51025ea 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -143,6 +143,7 @@ struct kgem {
 	uint16_t nfence;
 	uint16_t wait;
 	uint16_t max_batch_size;
+	uint16_t min_alignment;
 
 	uint32_t flush:1;
 	uint32_t need_expire:1;
commit 10b4a9bb5f46ab9d9c8b165084ce4174b54a8d39
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 2 12:45:09 2012 +0100

    sna: Always try to operate inplace if we an LLC gpu bo

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index db2442a..8b52a40 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -904,6 +904,9 @@ static inline bool pixmap_inplace(struct sna *sna,
 	if (priv->mapped)
 		return true;
 
+	if (sna->kgem.has_llc && pixmap != sna->front)
+		return !priv->cpu_bo;
+
 	return (pixmap->devKind * pixmap->drawable.height >> 12) >
 		sna->kgem.half_cpu_cache_pages;
 }
@@ -1264,6 +1267,9 @@ static inline bool region_inplace(struct sna *sna,
 		return false;
 	}
 
+	if (sna->kgem.has_llc && pixmap != sna->front)
+		return !priv->cpu_bo;
+
 	DBG(("%s: (%dx%d), inplace? %d\n",
 	     __FUNCTION__,
 	     region->extents.x2 - region->extents.x1,
commit dd80fb00bf7acf37dc3b9125431a12b67d7e92d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed May 2 11:31:42 2012 +0100

    sna: Fallback for glyphs too large for XY_TEXT_IMMEDIATE
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 12017bd..db2442a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6746,7 +6746,6 @@ spans_fallback:
 		}
 
 		gc->ops = (GCOps *)&sna_gc_ops;
-		assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 		if (data.damage) {
 			if (data.dx | data.dy)
 				pixman_region_translate(&data.region, data.dx, data.dy);
@@ -10622,8 +10621,6 @@ sna_glyph_extents(FontPtr font,
 
 		extents->overallWidth += p->metrics.characterWidth;
 	}
-
-	assert(extents->overallWidth > 0);
 }
 
 static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
@@ -10716,6 +10713,16 @@ inline static bool sna_get_glyph16(FontPtr font, struct sna_font *priv,
 	return sna_set_glyph(ret, *out = p);
 }
 
+static inline bool sna_font_too_large(FontPtr font)
+{
+	int top = max(FONTMAXBOUNDS(font, ascent), FONTASCENT(font));
+	int bot = max(FONTMAXBOUNDS(font, descent), FONTDESCENT(font));
+	int width = max(FONTMAXBOUNDS(font, characterWidth), -FONTMINBOUNDS(font, characterWidth));
+	DBG(("%s: (%d + %d) x %d: %d\n", __FUNCTION__,
+	     top, bot, width, (top + bot) * (width + 7)/8));
+	return (top + bot) * (width + 7)/8 > 124;
+}
+
 static int
 sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	       int x, int y,
@@ -10731,6 +10738,9 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -10820,6 +10830,9 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -10909,6 +10922,9 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -11000,6 +11016,9 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (drawable->depth < 8)
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	for (i = n = 0; i < count; i++) {
 		if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
 			n++;
@@ -11335,6 +11354,9 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	if ((bo = sna_drawable_use_bo(drawable, true,
 				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
@@ -11412,6 +11434,9 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	if (!gc_is_solid(gc, &fg))
 		goto fallback;
 
+	if (sna_font_too_large(gc->font))
+		goto fallback;
+
 	if ((bo = sna_drawable_use_bo(drawable, true,
 				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
commit 7e09babb3e9e23882db30ee7d0c22c503962faa9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 1 12:37:45 2012 +0100

    sna: Only attempt to reuse exported scanout buffers
    
    Yet more mesa w/a.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4302952..d519ed6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4148,13 +4148,13 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
 void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 {
 	bo->needs_flush = true;
-	bo->reusable = true;
 	bo->flush = false;
 
 	if (!bo->scanout)
 		return;
 
 	bo->scanout = false;
+	bo->reusable = true;
 }
 
 struct kgem_bo *
commit b4b32e7a0172a74372f800e9c74d639a23c5ff34
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue May 1 11:15:00 2012 +0100

    sna: Fast-path unclipped glyphs
    
    Avoid the redundant computation of the glyph intersection with the
    drawable bounding box.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 87371aa..f4f115e 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -427,8 +427,12 @@ glyphs_to_dst(struct sna *sna,
 	     __FUNCTION__, op, src_x, src_y, nlist,
 	     list->xOff, list->yOff, dst->pDrawable->x, dst->pDrawable->y));
 
-	rects = REGION_RECTS(dst->pCompositeClip);
-	nrect = REGION_NUM_RECTS(dst->pCompositeClip);
+	if (dst->pCompositeClip->extents.x2 - dst->pCompositeClip->extents.x1 < dst->pDrawable->width ||
+	    dst->pCompositeClip->extents.y2 - dst->pCompositeClip->extents.y1 < dst->pDrawable->height) {
+		rects = REGION_RECTS(dst->pCompositeClip);
+		nrect = REGION_NUM_RECTS(dst->pCompositeClip);
+	} else
+		nrect = 0;
 
 	x = dst->pDrawable->x;
 	y = dst->pDrawable->y;
@@ -476,47 +480,68 @@ glyphs_to_dst(struct sna *sna,
 				glyph_atlas = priv.atlas;
 			}
 
-			for (i = 0; i < nrect; i++) {
+			if (nrect) {
+				for (i = 0; i < nrect; i++) {
+					struct sna_composite_rectangles r;
+					int16_t dx, dy;
+					int16_t x2, y2;
+
+					r.dst.x = x - glyph->info.x;
+					r.dst.y = y - glyph->info.y;
+					x2 = r.dst.x + glyph->info.width;
+					y2 = r.dst.y + glyph->info.height;
+					dx = dy = 0;
+
+					DBG(("%s: glyph=(%d, %d), (%d, %d), clip=(%d, %d), (%d, %d)\n",
+					     __FUNCTION__,
+					     r.dst.x, r.dst.y, x2, y2,
+					     rects[i].x1, rects[i].y1,
+					     rects[i].x2, rects[i].y2));
+					if (rects[i].y1 >= y2)
+						break;
+
+					if (r.dst.x < rects[i].x1)
+						dx = rects[i].x1 - r.dst.x, r.dst.x = rects[i].x1;
+					if (x2 > rects[i].x2)
+						x2 = rects[i].x2;
+					if (r.dst.y < rects[i].y1)
+						dy = rects[i].y1 - r.dst.y, r.dst.y = rects[i].y1;
+					if (y2 > rects[i].y2)
+						y2 = rects[i].y2;
+
+					if (r.dst.x < x2 && r.dst.y < y2) {
+						DBG(("%s: blt=(%d, %d), (%d, %d)\n",
+						     __FUNCTION__, r.dst.x, r.dst.y, x2, y2));
+
+						r.src.x = r.dst.x + src_x;
+						r.src.y = r.dst.y + src_y;
+						r.mask.x = dx + priv.coordinate.x;
+						r.mask.y = dy + priv.coordinate.y;
+						r.width  = x2 - r.dst.x;
+						r.height = y2 - r.dst.y;
+						tmp.blt(sna, &tmp, &r);
+						apply_damage(&tmp, &r);
+					}
+				}
+			} else {
 				struct sna_composite_rectangles r;
-				int16_t dx, dy;
-				int16_t x2, y2;
 
 				r.dst.x = x - glyph->info.x;
 				r.dst.y = y - glyph->info.y;
-				x2 = r.dst.x + glyph->info.width;
-				y2 = r.dst.y + glyph->info.height;
-				dx = dy = 0;
+				r.src.x = r.dst.x + src_x;
+				r.src.y = r.dst.y + src_y;
+				r.mask.x = priv.coordinate.x;
+				r.mask.y = priv.coordinate.y;
+				r.width  = glyph->info.width;
+				r.height = glyph->info.height;
 
-				DBG(("%s: glyph=(%d, %d), (%d, %d), clip=(%d, %d), (%d, %d)\n",
+				DBG(("%s: glyph=(%d, %d)x(%d, %d), unclipped\n",
 				     __FUNCTION__,
-				     r.dst.x, r.dst.y, x2, y2,
-				     rects[i].x1, rects[i].y1,
-				     rects[i].x2, rects[i].y2));
-				if (rects[i].y1 >= y2)
-					break;
+				     r.dst.x, r.dst.y,
+				     r.width, r.height));
 
-				if (r.dst.x < rects[i].x1)
-					dx = rects[i].x1 - r.dst.x, r.dst.x = rects[i].x1;
-				if (x2 > rects[i].x2)
-					x2 = rects[i].x2;
-				if (r.dst.y < rects[i].y1)
-					dy = rects[i].y1 - r.dst.y, r.dst.y = rects[i].y1;
-				if (y2 > rects[i].y2)
-					y2 = rects[i].y2;
-
-				if (r.dst.x < x2 && r.dst.y < y2) {
-					DBG(("%s: blt=(%d, %d), (%d, %d)\n",
-					     __FUNCTION__, r.dst.x, r.dst.y, x2, y2));
-
-					r.src.x = r.dst.x + src_x;
-					r.src.y = r.dst.y + src_y;
-					r.mask.x = dx + priv.coordinate.x;
-					r.mask.y = dy + priv.coordinate.y;
-					r.width  = x2 - r.dst.x;
-					r.height = y2 - r.dst.y;
-					tmp.blt(sna, &tmp, &r);
-					apply_damage(&tmp, &r);
-				}
+				tmp.blt(sna, &tmp, &r);
+				apply_damage(&tmp, &r);
 			}
 
 next_glyph:
commit ffdf9aca12adcfa1ec7ab7a1706873105a5f0d4a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 29 21:59:52 2012 +0100

    legacy/i810: hwmc additionally depends upon building DRI
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/legacy/i810/Makefile.am b/src/legacy/i810/Makefile.am
index a1bdd85..e7fa04f 100644
--- a/src/legacy/i810/Makefile.am
+++ b/src/legacy/i810/Makefile.am
@@ -25,10 +25,10 @@ liblegacy_i810_la_SOURCES +=\
          i810_dri.c \
          i810_dri.h \
 	 $(NULL)
-endif
 
 if XVMC
 liblegacy_i810_la_SOURCES += \
 	i810_hwmc.c \
 	$(NULL)
 endif
+endif
commit 444da84c47266bcbbdf5121507901de8eb36f11b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 29 21:49:04 2012 +0100

    configure: Version bump for 2.19.0 release

diff --git a/NEWS b/NEWS
index 9d2b15e..cc74879 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,29 @@
+Release 2.19.0 (2012-04-29)
+===========================
+More stability fixes for UXA and support for another variant of IvyBridge.
+Given the severity of the stability fixes, I strongly recommend everybody
+to upgrade to 2.19.0.
+
+* Prevent waiting on scanlines whilst not in control of the VT and therefore
+  whilst referencing foreign CRTC configurations.
+
+* Pixmap (and bo leak) during fallback glyph composition
+
+* Remove broken acceleration for rendering glyphs directly upon the
+  destination pixmap, exposed by cairo-1.12.0 (and coincidentally fix
+  another Pixmap leak upon fallback handling).
+
+* Add support for Ivy Bridge GT2 Server chipset [PCI id 0x016a]
+
+* Remove broken damage flushing with CompositeRectangles
+  https://bugs.freedesktop.org/show_bug.cgi?id=32547
+
+* Fix crash upon server start with multiple monitors
+  https://bugs.freedesktop.org/show_bug.cgi?id=47395
+
+* Fix composition issues resulting from overly aggressive Pixmap reuse
+  https://bugs.freedesktop.org/show_bug.cgi?id=47345
+
 Release 2.18.0 (2012-02-24)
 ===========================
 Time passes, a few more bugs have crept out of the woodwork that are a
diff --git a/configure.ac b/configure.ac
index 5124100..3770983 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.18.0],
+        [2.19.0],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit a206a1eee1fb799d567d68db564d663cfc66f6cc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 28 01:54:43 2012 +0100

    sna: Tune relocation array size
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 4def6b1..ad2fe84 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -171,7 +171,7 @@ struct kgem {
 
 	uint32_t batch[4*1024];
 	struct drm_i915_gem_exec_object2 exec[256];
-	struct drm_i915_gem_relocation_entry reloc[384];
+	struct drm_i915_gem_relocation_entry reloc[612];
 };
 
 #define KGEM_BATCH_RESERVED 1
commit 4f1908c651ef9e2af33d8831466a605234978c46
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 27 23:19:56 2012 +0100

    sna: PolyPoint only uses the gc->fgPixel
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f4d3de4..12017bd 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5560,8 +5560,7 @@ static Bool
 sna_poly_point_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo,
 		   struct sna_damage **damage,
-		   GCPtr gc, uint32_t pixel,
-		   int mode, int n, DDXPointPtr pt,
+		   GCPtr gc, int mode, int n, DDXPointPtr pt,
 		   bool clipped)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
@@ -5574,7 +5573,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 	DBG(("%s: alu=%d, pixel=%08lx, clipped?=%d\n",
 	     __FUNCTION__, gc->alu, gc->fgPixel, clipped));
 
-	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, pixel))
+	if (!sna_fill_init_blt(&fill, sna, pixmap, bo, gc->alu, gc->fgPixel))
 		return FALSE;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -5704,7 +5703,6 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	RegionRec region;
 	unsigned flags;
-	uint32_t color;
 
 	DBG(("%s(mode=%d, n=%d, pt[0]=(%d, %d)\n",
 	     __FUNCTION__, mode, n, pt[0].x, pt[0].y));
@@ -5729,7 +5727,7 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 		goto fallback;
 	}
 
-	if (PM_IS_SOLID(drawable, gc->planemask) && gc_is_solid(gc, &color)) {
+	if (PM_IS_SOLID(drawable, gc->planemask)) {
 		struct sna_damage **damage;
 		struct kgem_bo *bo;
 
@@ -5738,7 +5736,7 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 
 		if ((bo = sna_drawable_use_bo(drawable, false, &region.extents, &damage)) &&
 		    sna_poly_point_blt(drawable, bo, damage,
-				       gc, color, mode, n, pt, flags & 2))
+				       gc, mode, n, pt, flags & 2))
 			return;
 	}
 
commit 8453034c7dd893f1d4c32ee87724f3a13137595d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 27 22:08:51 2012 +0100

    sna/gen6: Allow ring switching at the start of a batch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 5bbe5e3..38fb024 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -57,7 +57,7 @@
 #define NO_FILL_BOXES 0
 #define NO_CLEAR 0
 
-#define NO_RING_SWITCH 1
+#define NO_RING_SWITCH 0
 
 #define GEN6_MAX_SIZE 8192
 
@@ -2332,7 +2332,7 @@ static bool prefer_blt_ring(struct sna *sna)
 
 static bool can_switch_rings(struct sna *sna)
 {
-	return sna->kgem.has_semaphores && !NO_RING_SWITCH;
+	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
 static Bool
@@ -2369,6 +2369,8 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return TRUE;
+		if (src->pDrawable)
+			return TRUE;
 	}
 
 	return FALSE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 2228873..327714f 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -57,7 +57,7 @@
 #define NO_FILL_BOXES 0
 #define NO_CLEAR 0
 
-#define NO_RING_SWITCH 1
+#define NO_RING_SWITCH 0
 
 #define GEN7_MAX_SIZE 16384
 
@@ -2425,7 +2425,7 @@ static bool prefer_blt_ring(struct sna *sna)
 
 static bool can_switch_rings(struct sna *sna)
 {
-	return sna->kgem.has_semaphores && !NO_RING_SWITCH;
+	return sna->kgem.mode == KGEM_NONE && sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
 static Bool
@@ -2462,6 +2462,8 @@ try_blt(struct sna *sna,
 	if (can_switch_rings(sna)) {
 		if (sna_picture_is_solid(src, NULL))
 			return TRUE;
+		if (src->pDrawable)
+			return TRUE;
 	}
 
 	return FALSE;
commit 93ad7793894787600c5074917c753fa7c6816134
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 27 22:03:55 2012 +0100

    sna: Tweak semaphores-enabled heuristic
    
    The kernel module now defaults to -1, confusing the test.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 29f0e29..4302952 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -574,7 +574,7 @@ static bool semaphores_enabled(void)
 	if (file) {
 		int value;
 		if (fscanf(file, "%d", &value) == 1)
-			detected = value > 0;
+			detected = value != 0;
 		fclose(file);
 	}
 
commit 986dbdda3bf8dcf208e55543d8a3393c4b53f10b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 27 19:21:44 2012 +0100

    sna: Tweak placement choice for high-overhead operations
    
    Some operations cost more to setup than to transfer data back and forth!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a5c1648..f4d3de4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1923,6 +1923,7 @@ box_inplace(PixmapPtr pixmap, const BoxRec *box)
 
 static inline struct kgem_bo *
 sna_drawable_use_bo(DrawablePtr drawable,
+		    bool prefer_gpu,
 		    const BoxRec *box,
 		    struct sna_damage ***damage)
 {
@@ -1949,6 +1950,9 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 	}
 
+	if (!prefer_gpu && priv->gpu_bo && !kgem_bo_is_busy(priv->gpu_bo))
+		goto use_cpu_bo;
+
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		goto use_gpu_bo;
 
@@ -1968,6 +1972,12 @@ sna_drawable_use_bo(DrawablePtr drawable,
 			goto use_cpu_bo;
 		}
 
+		if (priv->cpu_damage && !prefer_gpu) {
+			DBG(("%s: prefer cpu",
+			     __FUNCTION__));
+			goto use_cpu_bo;
+		}
+
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ))
 			goto use_cpu_bo;
 
@@ -2932,7 +2942,8 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	bo = sna_drawable_use_bo(&pixmap->drawable, &region->extents, &damage);
+	bo = sna_drawable_use_bo(&pixmap->drawable, true,
+				 &region->extents, &damage);
 	if (bo == NULL)
 		return false;
 
@@ -3054,7 +3065,8 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	if (gc->alu != GXcopy)
 		return false;
 
-	bo = sna_drawable_use_bo(&pixmap->drawable, &region->extents, &damage);
+	bo = sna_drawable_use_bo(&pixmap->drawable, true,
+				 &region->extents, &damage);
 	if (bo == NULL)
 		return false;
 
@@ -4952,7 +4964,7 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	bo = sna_drawable_use_bo(drawable, &region.extents, &damage);
+	bo = sna_drawable_use_bo(drawable, true, &region.extents, &damage);
 	if (bo) {
 		if (gc_is_solid(gc, &color)) {
 			DBG(("%s: trying solid fill [alu=%d, pixel=%08lx] blt paths\n",
@@ -5502,7 +5514,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (!PM_IS_SOLID(dst, gc->planemask))
 		goto fallback;
 
-	arg.bo = sna_drawable_use_bo(dst, &region.extents, &arg.damage);
+	arg.bo = sna_drawable_use_bo(dst, true, &region.extents, &arg.damage);
 	if (arg.bo) {
 		if (arg.bo->tiling == I915_TILING_Y) {
 			assert(arg.bo == sna_pixmap_get_bo(pixmap));
@@ -5724,7 +5736,7 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 		DBG(("%s: trying solid fill [%08lx] blt paths\n",
 		     __FUNCTION__, gc->fgPixel));
 
-		if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+		if ((bo = sna_drawable_use_bo(drawable, false, &region.extents, &damage)) &&
 		    sna_poly_point_blt(drawable, bo, damage,
 				       gc, color, mode, n, pt, flags & 2))
 			return;
@@ -6417,50 +6429,10 @@ sna_poly_line_extents(DrawablePtr drawable, GCPtr gc,
 inline static bool
 _use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
-	PixmapPtr pixmap;
-	struct sna_pixmap *priv;
-	BoxRec area;
-	int16_t dx, dy;
-
 	if (USE_ZERO_SPANS)
 		return USE_ZERO_SPANS > 0;
 
-	if (!drawable_gc_inplace_hint(drawable, gc))
-		return TRUE;
-
-	/* XXX check for GPU stalls on the gc (stipple, tile, etc) */
-
-	pixmap = get_drawable_pixmap(drawable);
-	priv = sna_pixmap(pixmap);
-	if (priv == NULL)
-		return FALSE;
-
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return FALSE;
-
-	if (priv->stride == 0 || priv->gpu_bo == NULL)
-		return FALSE;
-
-	if (!kgem_bo_is_busy(priv->gpu_bo))
-		return FALSE;
-
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
-		return TRUE;
-
-	if (priv->gpu_damage == NULL)
-		return FALSE;
-
-	get_drawable_deltas(drawable, pixmap, &dx, &dy);
-	area = *extents;
-	area.x1 += dx;
-	area.x2 += dx;
-	area.y1 += dy;
-	area.y2 += dy;
-	DBG(("%s extents (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     area.x1, area.y1, area.x2, area.y2));
-
-	return sna_damage_contains_box(priv->gpu_damage,
-				       &area) != PIXMAN_REGION_OUT;
+	return !drawable_gc_inplace_hint(drawable, gc);
 }
 
 static bool
@@ -6481,50 +6453,10 @@ use_zero_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 inline static bool
 _use_wide_spans(DrawablePtr drawable, GCPtr gc, const BoxRec *extents)
 {
-	PixmapPtr pixmap;
-	struct sna_pixmap *priv;
-	BoxRec area;
-	int16_t dx, dy;
-
 	if (USE_WIDE_SPANS)
 		return USE_WIDE_SPANS > 0;
 
-	if (!drawable_gc_inplace_hint(drawable, gc))
-		return TRUE;
-
-	/* XXX check for GPU stalls on the gc (stipple, tile, etc) */
-
-	pixmap = get_drawable_pixmap(drawable);
-	priv = sna_pixmap(pixmap);
-	if (priv == NULL)
-		return FALSE;
-
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return FALSE;
-
-	if (priv->stride == 0 || priv->gpu_bo == NULL)
-		return FALSE;
-
-	if (!kgem_bo_is_busy(priv->gpu_bo))
-		return FALSE;
-
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
-		return TRUE;
-
-	if (priv->gpu_damage == NULL)
-		return FALSE;
-
-	get_drawable_deltas(drawable, pixmap, &dx, &dy);
-	area = *extents;
-	area.x1 += dx;
-	area.x2 += dx;
-	area.y1 += dy;
-	area.y2 += dy;
-	DBG(("%s extents (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     area.x1, area.y1, area.x2, area.y2));
-
-	return sna_damage_contains_box(priv->gpu_damage,
-				       &area) != PIXMAN_REGION_OUT;
+	return !drawable_gc_inplace_hint(drawable, gc);
 }
 
 static bool
@@ -6605,7 +6537,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		     __FUNCTION__, (unsigned)color));
 
 		if (data.flags & 4) {
-			data.bo = sna_drawable_use_bo(drawable,
+			data.bo = sna_drawable_use_bo(drawable, true,
 						      &data.region.extents,
 						      &data.damage);
 			if (data.bo &&
@@ -6616,8 +6548,8 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 					      data.flags & 2))
 				return;
 		} else { /* !rectilinear */
-			if (use_zero_spans(drawable, gc, &data.region.extents) &&
-			    (data.bo = sna_drawable_use_bo(drawable,
+			if ((data.bo = sna_drawable_use_bo(drawable,
+							   use_zero_spans(drawable, gc, &data.region.extents),
 							   &data.region.extents,
 							   &data.damage)) &&
 			    sna_poly_zero_line_blt(drawable,
@@ -6630,7 +6562,8 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		}
 	} else if (data.flags & 4) {
 		/* Try converting these to a set of rectangles instead */
-		data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage);
+		data.bo = sna_drawable_use_bo(drawable, true,
+					      &data.region.extents, &data.damage);
 		if (data.bo) {
 			DDXPointRec p1, p2;
 			xRectangle *rect;
@@ -6701,8 +6634,9 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 	}
 
 spans_fallback:
-	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    (data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
+	if ((data.bo = sna_drawable_use_bo(drawable,
+					   use_wide_spans(drawable, gc, &data.region.extents),
+					   &data.region.extents, &data.damage))) {
 		DBG(("%s: converting line into spans\n", __FUNCTION__));
 		get_drawable_deltas(drawable, data.pixmap, &data.dx, &data.dy);
 		sna_gc(gc)->priv = &data;
@@ -7570,12 +7504,6 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	data.bo = sna_drawable_use_bo(drawable,
-				      &data.region.extents,
-				      &data.damage);
-	if (data.bo == NULL)
-		goto fallback;
-
 	if (gc->lineStyle != LineSolid || gc->lineWidth > 1)
 		goto spans_fallback;
 	if (gc_is_solid(gc, &color)) {
@@ -7583,14 +7511,20 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		     __FUNCTION__, (unsigned)color, data.flags));
 
 		if (data.flags & 4) {
-			if (sna_poly_segment_blt(drawable,
+			if ((data.bo = sna_drawable_use_bo(drawable, true,
+							   &data.region.extents,
+							   &data.damage)) &&
+			     sna_poly_segment_blt(drawable,
 						 data.bo, data.damage,
 						 gc, color, n, seg,
 						 &data.region.extents,
 						 data.flags & 2))
 				return;
 		} else {
-			if (use_zero_spans(drawable, gc, &data.region.extents) &&
+			if ((data.bo = sna_drawable_use_bo(drawable,
+							   use_zero_spans(drawable, gc, &data.region.extents),
+							   &data.region.extents,
+							   &data.damage)) &&
 			    sna_poly_zero_segment_blt(drawable,
 						      data.bo, data.damage,
 						      gc, n, seg,
@@ -7603,6 +7537,12 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		xRectangle *rect;
 		int i;
 
+		data.bo = sna_drawable_use_bo(drawable, true,
+					      &data.region.extents,
+					      &data.damage);
+		if (data.bo == NULL)
+			goto fallback;
+
 		DBG(("%s: converting to rectagnles\n", __FUNCTION__));
 
 		rect = malloc (n * sizeof (xRectangle));
@@ -7660,7 +7600,10 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	}
 
 spans_fallback:
-	if (use_wide_spans(drawable, gc, &data.region.extents)) {
+	if ((data.bo = sna_drawable_use_bo(drawable,
+					   use_wide_spans(drawable, gc, &data.region.extents),
+					   &data.region.extents,
+					   &data.damage))) {
 		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
 		int i;
 
@@ -8280,7 +8223,8 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	    PM_IS_SOLID(drawable, gc->planemask)) {
 		DBG(("%s: trying blt solid fill [%08lx] paths\n",
 		     __FUNCTION__, gc->fgPixel));
-		if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+		if ((bo = sna_drawable_use_bo(drawable, true,
+					      &region.extents, &damage)) &&
 		    sna_poly_rectangle_blt(drawable, bo, damage,
 					   gc, n, r, &region.extents, flags&2))
 			return;
@@ -8288,7 +8232,8 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 		/* Not a trivial outline, but we still maybe able to break it
 		 * down into simpler operations that we can accelerate.
 		 */
-		if (sna_drawable_use_bo(drawable, &region.extents, &damage)) {
+		if (sna_drawable_use_bo(drawable, true,
+					&region.extents, &damage)) {
 			miPolyRectangle(drawable, gc, n, r);
 			return;
 		}
@@ -8408,8 +8353,8 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    (data.bo = sna_drawable_use_bo(drawable,
+	if ((data.bo = sna_drawable_use_bo(drawable,
+					   use_wide_spans(drawable, gc, &data.region.extents),
 					   &data.region.extents, &data.damage))) {
 		uint32_t color;
 
@@ -8761,8 +8706,8 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 	if (!PM_IS_SOLID(draw, gc->planemask))
 		goto fallback;
 
-	if (use_wide_spans(draw, gc, &data.region.extents) &&
-	    (data.bo = sna_drawable_use_bo(draw,
+	if ((data.bo = sna_drawable_use_bo(draw,
+					   use_wide_spans(draw, gc, &data.region.extents),
 					   &data.region.extents,
 					   &data.damage))) {
 		uint32_t color;
@@ -10174,12 +10119,15 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		}
 	}
 
+	bo = sna_drawable_use_bo(draw, true, &region.extents, &damage);
+	if (bo == NULL)
+		goto fallback;
+
 	if (gc_is_solid(gc, &color)) {
 		DBG(("%s: solid fill [%08x], testing for blt\n",
 		     __FUNCTION__, color));
 
-		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
-		    sna_poly_fill_rect_blt(draw,
+		if (sna_poly_fill_rect_blt(draw,
 					   bo, damage,
 					   gc, color, n, rect,
 					   &region.extents, flags & 2))
@@ -10187,16 +10135,14 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	} else if (gc->fillStyle == FillTiled) {
 		DBG(("%s: tiled fill, testing for blt\n", __FUNCTION__));
 
-		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
-		    sna_poly_fill_rect_tiled_blt(draw, bo, damage,
+		if (sna_poly_fill_rect_tiled_blt(draw, bo, damage,
 						 gc, n, rect,
 						 &region.extents, flags & 2))
 			return;
 	} else {
 		DBG(("%s: stippled fill, testing for blt\n", __FUNCTION__));
 
-		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
-		    sna_poly_fill_rect_stippled_blt(draw, bo, damage,
+		if (sna_poly_fill_rect_stippled_blt(draw, bo, damage,
 						    gc, n, rect,
 						    &region.extents, flags & 2))
 			return;
@@ -10330,8 +10276,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 	if (!PM_IS_SOLID(draw, gc->planemask))
 		goto fallback;
 
-	if (use_wide_spans(draw, gc, &data.region.extents) &&
-	    (data.bo = sna_drawable_use_bo(draw,
+	if ((data.bo = sna_drawable_use_bo(draw, true,
 					   &data.region.extents,
 					   &data.damage))) {
 		uint32_t color;
@@ -10482,7 +10427,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		return false;
 	}
 
-	bo = sna_drawable_use_bo(drawable, &clip->extents, &damage);
+	bo = sna_drawable_use_bo(drawable, true, &clip->extents, &damage);
 	if (bo == NULL)
 		return false;
 
@@ -11392,7 +11337,8 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+	if ((bo = sna_drawable_use_bo(drawable, true,
+				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
 				   bo, damage, &region,
 				   gc->fgPixel, gc->bgPixel, false))
@@ -11468,7 +11414,8 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	if (!gc_is_solid(gc, &fg))
 		goto fallback;
 
-	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+	if ((bo = sna_drawable_use_bo(drawable, true,
+				      &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
 				   bo, damage, &region, fg, -1, true))
 		goto out;
@@ -11505,7 +11452,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	bo = sna_drawable_use_bo(drawable, &region->extents, &damage);
+	bo = sna_drawable_use_bo(drawable, true, &region->extents, &damage);
 	if (bo == NULL)
 		return false;
 
commit daac9a1d036d80ccce83438b49115a236a16bfb6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 27 16:50:28 2012 +0100

    sna: Micro-optimise common case of checking a single fenced bo
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4c4aa7c..29f0e29 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2977,7 +2977,45 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	return true;
 }
 
-bool kgem_check_bo_fenced(struct kgem *kgem, ...)
+bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo)
+{
+	uint32_t size;
+
+	if (bo->proxy)
+		bo = bo->proxy;
+	if (bo->exec) {
+		if (kgem->gen < 40 &&
+		    bo->tiling != I915_TILING_NONE &&
+		    (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
+			if (kgem->nfence >= kgem->fence_max)
+				return false;
+
+			size = kgem->aperture_fenced;
+			size += kgem_bo_fenced_size(kgem, bo);
+			if (size > kgem->aperture_mappable)
+				return false;
+		}
+
+		return true;
+	}
+
+	if (kgem->aperture > kgem->aperture_low)
+		return false;
+
+	if (kgem->nexec >= KGEM_EXEC_SIZE(kgem) - 1)
+		return false;
+
+	if (kgem->gen < 40 &&
+	    bo->tiling != I915_TILING_NONE &&
+	    kgem->nfence >= kgem->fence_max)
+		return false;
+
+	size = kgem->aperture;
+	size += num_pages(bo);
+	return size <= kgem->aperture_high;
+}
+
+bool kgem_check_many_bo_fenced(struct kgem *kgem, ...)
 {
 	va_list ap;
 	struct kgem_bo *bo;
@@ -4165,7 +4203,7 @@ kgem_replace_bo(struct kgem *kgem,
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_batch(kgem, 8) ||
 	    !kgem_check_reloc(kgem, 2) ||
-	    !kgem_check_bo_fenced(kgem, src, dst, NULL)) {
+	    !kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 1235b83..4def6b1 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -344,7 +344,8 @@ static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
 }
 
 bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
-bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
+bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
+bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
 
 void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
 static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dc084f0..a5c1648 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2975,7 +2975,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		void *ptr;
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -3103,7 +3103,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 			void *ptr;
 
 			if (!kgem_check_batch(&sna->kgem, 12) ||
-			    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 			    !kgem_check_reloc(&sna->kgem, 2)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -5117,7 +5117,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		if (src_stride <= 128) {
 			src_stride = ALIGN(src_stride, 8) / 4;
 			if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-			    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
 			    !kgem_check_reloc(&sna->kgem, 1)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -5159,7 +5159,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 			void *ptr;
 
 			if (!kgem_check_batch(&sna->kgem, 8) ||
-			    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
 			    !kgem_check_reloc(&sna->kgem, 2)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -5277,7 +5277,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 		     sx, sy, bx1, bx2));
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9195,7 +9195,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 			     __FUNCTION__, r->x + dx, r->y + dy, r->width, r->height));
 
 			if (!kgem_check_batch(&sna->kgem, 9) ||
-			    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 			    !kgem_check_reloc(&sna->kgem, 1)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9243,7 +9243,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 					uint32_t *b;
 
 					if (!kgem_check_batch(&sna->kgem, 9) ||
-					    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 					    !kgem_check_reloc(&sna->kgem, 1)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9293,7 +9293,7 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
 						uint32_t *b;
 
 						if (!kgem_check_batch(&sna->kgem, 9) ||
-						    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 						    !kgem_check_reloc(&sna->kgem, 1)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9433,7 +9433,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 			if (src_stride <= 128) {
 				src_stride = ALIGN(src_stride, 8) / 4;
 				if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-				    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+				    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 				    !kgem_check_reloc(&sna->kgem, 1)) {
 					_kgem_submit(&sna->kgem);
 					_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9475,7 +9475,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				void *ptr;
 
 				if (!kgem_check_batch(&sna->kgem, 8) ||
-				    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+				    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 				    !kgem_check_reloc(&sna->kgem, 2)) {
 					_kgem_submit(&sna->kgem);
 					_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9575,7 +9575,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 				if (src_stride <= 128) {
 					src_stride = ALIGN(src_stride, 8) / 4;
 					if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-					    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 					    !kgem_check_reloc(&sna->kgem, 1)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9614,7 +9614,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					} while (--bh);
 				} else {
 					if (!kgem_check_batch(&sna->kgem, 8) ||
-					    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+					    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 					    !kgem_check_reloc(&sna->kgem, 2)) {
 						_kgem_submit(&sna->kgem);
 						_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9715,7 +9715,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 					if (src_stride <= 128) {
 						src_stride = ALIGN(src_stride, 8) / 4;
 						if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-						    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 						    !kgem_check_reloc(&sna->kgem, 1)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9754,7 +9754,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
 						} while (--bh);
 					} else {
 						if (!kgem_check_batch(&sna->kgem, 8) ||
-						    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+						    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 						    !kgem_check_reloc(&sna->kgem, 2)) {
 							_kgem_submit(&sna->kgem);
 							_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -9856,7 +9856,7 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
 			len = bw*bh;
 			len = ALIGN(len, 8) / 4;
 			if (!kgem_check_batch(&sna->kgem, 7+len) ||
-			    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 			    !kgem_check_reloc(&sna->kgem, 1)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -10512,7 +10512,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	if (!kgem_check_batch(&sna->kgem, 16) ||
-	    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+	    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 	    !kgem_check_reloc(&sna->kgem, 1)) {
 		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -11180,7 +11180,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	if (!kgem_check_batch(&sna->kgem, 16) ||
-	    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+	    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 	    !kgem_check_reloc(&sna->kgem, 1)) {
 		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -11549,7 +11549,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		void *ptr;
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, bo) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index a81a145..82c61df 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -151,10 +151,10 @@ static bool sna_blt_fill_init(struct sna *sna,
 	blt->bpp = bpp;
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, bo, NULL) ||
+	if (!kgem_check_bo_fenced(kgem, bo) ||
 	    !kgem_check_batch(kgem, 12)) {
 		_kgem_submit(kgem);
-		assert(kgem_check_bo_fenced(kgem, bo, NULL));
+		assert(kgem_check_bo_fenced(kgem, bo));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -293,9 +293,9 @@ static Bool sna_blt_copy_init(struct sna *sna,
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, src, dst, NULL)) {
+	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
-		if (!kgem_check_bo_fenced(kgem, src, dst, NULL))
+		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
 			return FALSE;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -345,9 +345,9 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 	blt->pixel = alpha;
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, src, dst, NULL)) {
+	if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
-		if (!kgem_check_bo_fenced(kgem, src, dst, NULL))
+		if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
 			return FALSE;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -1103,10 +1103,10 @@ prepare_blt_copy(struct sna *sna,
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo))
 		return FALSE;
 
-	if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
+	if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
 		_kgem_submit(&sna->kgem);
-		if (!kgem_check_bo_fenced(&sna->kgem,
-					  op->dst.bo, priv->gpu_bo, NULL))
+		if (!kgem_check_many_bo_fenced(&sna->kgem,
+					       op->dst.bo, priv->gpu_bo, NULL))
 			return FALSE;
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
@@ -1577,9 +1577,9 @@ sna_blt_composite(struct sna *sna,
 	if (width && height)
 		reduce_damage(tmp, dst_x, dst_y, width, height);
 
-	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL)) {
+	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo)) {
 		_kgem_submit(&sna->kgem);
-		assert(kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL));
+		assert(kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo));
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
@@ -1884,9 +1884,9 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_batch(kgem, 6) ||
 	    !kgem_check_reloc(kgem, 1) ||
-	    !kgem_check_bo_fenced(kgem, bo, NULL)) {
+	    !kgem_check_bo_fenced(kgem, bo)) {
 		_kgem_submit(kgem);
-		assert(kgem_check_bo_fenced(&sna->kgem, bo, NULL));
+		assert(kgem_check_bo_fenced(&sna->kgem, bo));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -1957,10 +1957,10 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	}
 
 	kgem_set_mode(kgem, KGEM_BLT);
-	if (!kgem_check_bo_fenced(kgem, bo, NULL) ||
+	if (!kgem_check_bo_fenced(kgem, bo) ||
 	    !kgem_check_batch(kgem, 12)) {
 		_kgem_submit(kgem);
-		assert(kgem_check_bo_fenced(&sna->kgem, bo, NULL));
+		assert(kgem_check_bo_fenced(&sna->kgem, bo));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -2122,9 +2122,9 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_batch(kgem, 8) ||
 	    !kgem_check_reloc(kgem, 2) ||
-	    !kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
+	    !kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
 		_kgem_submit(kgem);
-		if (!kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL))
+		if (!kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL))
 			return sna_tiling_blt_copy_boxes(sna, alu,
 							 src_bo, src_dx, src_dy,
 							 dst_bo, dst_dx, dst_dy,
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 02a5c75..2539518 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -360,7 +360,7 @@ fallback:
 	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
 	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
 	    !kgem_check_batch(kgem, 8) ||
-	    !kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
+	    !kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -732,7 +732,7 @@ tile:
 	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
 	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
 	    !kgem_check_batch(kgem, 8) ||
-	    !kgem_check_bo_fenced(kgem, dst_bo, NULL)) {
+	    !kgem_check_bo_fenced(kgem, dst_bo)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
@@ -969,7 +969,7 @@ fallback:
 	if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
 	    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
 	    !kgem_check_batch(kgem, 8) ||
-	    !kgem_check_bo_fenced(kgem, dst_bo, NULL)) {
+	    !kgem_check_bo_fenced(kgem, dst_bo)) {
 		_kgem_submit(kgem);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
commit 5967d76ca09a257ec9db66ea664158e1dfd083ba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 25 17:15:37 2012 +0100

    sna: Fixup broken assertion
    
    It is valid for the cpu_bo to be NULL, as we may be choosing to free the
    large shadow pixel buffer instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 382c671..dc084f0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2256,7 +2256,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		sna_damage_destroy(&priv->cpu_damage);
 		priv->undamaged = false;
 		list_del(&priv->list);
-		assert(!priv->cpu_bo->sync);
+		assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
 		sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv);
 	}
 
commit 860d3859b586939cd52e45b944cb6abd2a2ca71b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 25 16:04:33 2012 +0100

    sna/gen7: Add CS stall before changing WM binding table
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e2741c4..2228873 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -837,11 +837,11 @@ gen7_emit_wm(struct sna *sna, unsigned int kernel, int nr_surfaces, int nr_input
 	OUT_BATCH(0); /* kernel 2 */
 }
 
-static void
+static bool
 gen7_emit_binding_table(struct sna *sna, uint16_t offset)
 {
 	if (sna->render_state.gen7.surface_table == offset)
-		return;
+		return false;
 
 	/* Binding table pointers */
 	assert(is_aligned(4*offset, 32));
@@ -849,6 +849,7 @@ gen7_emit_binding_table(struct sna *sna, uint16_t offset)
 	OUT_BATCH(offset*4);
 
 	sna->render_state.gen7.surface_table = offset;
+	return true;
 }
 
 static void
@@ -970,11 +971,7 @@ gen7_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
 		uint16_t wm_binding_table)
 {
-	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
-		gen7_emit_flush(sna);
-		kgem_clear_dirty(&sna->kgem);
-		kgem_bo_mark_dirty(op->dst.bo);
-	}
+	bool need_stall = false;
 
 	gen7_emit_cc(sna,
 		     gen7_get_blend(op->op,
@@ -1001,8 +998,22 @@ gen7_emit_state(struct sna *sna,
 		     op->u.gen7.nr_inputs);
 	gen7_emit_vertex_elements(sna, op);
 
-	gen7_emit_binding_table(sna, wm_binding_table);
+	need_stall |= gen7_emit_binding_table(sna, wm_binding_table);
 	gen7_emit_drawing_rectangle(sna, op);
+
+	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
+		gen7_emit_flush(sna);
+		kgem_clear_dirty(&sna->kgem);
+		kgem_bo_mark_dirty(op->dst.bo);
+		need_stall = false;
+	}
+	if (need_stall) {
+		OUT_BATCH(GEN7_PIPE_CONTROL | (4 - 2));
+		OUT_BATCH(GEN7_PIPE_CONTROL_CS_STALL |
+			  GEN7_PIPE_CONTROL_STALL_AT_SCOREBOARD);
+		OUT_BATCH(0);
+		OUT_BATCH(0);
+	}
 }
 
 static void gen7_magic_ca_pass(struct sna *sna,
commit c219283460c0f2dfdb823e0cb139d05075c6afce
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 25 15:04:01 2012 +0100

    sna/gen7: Apply more recent improvements from SNB perf tuning
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 7dff02f..e2741c4 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -57,6 +57,8 @@
 #define NO_FILL_BOXES 0
 #define NO_CLEAR 0
 
+#define NO_RING_SWITCH 1
+
 #define GEN7_MAX_SIZE 16384
 
 /* XXX Todo
@@ -967,7 +969,6 @@ static void
 gen7_emit_state(struct sna *sna,
 		const struct sna_composite_op *op,
 		uint16_t wm_binding_table)
-
 {
 	if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) {
 		gen7_emit_flush(sna);
@@ -1353,18 +1354,13 @@ gen7_emit_composite_primitive_solid(struct sna *sna,
 	dst.p.x = r->dst.x + r->width;
 	dst.p.y = r->dst.y + r->height;
 	v[0] = dst.f;
-	v[1] = 1.;
-	v[2] = 1.;
-
 	dst.p.x = r->dst.x;
 	v[3] = dst.f;
-	v[4] = 0.;
-	v[5] = 1.;
-
 	dst.p.y = r->dst.y;
 	v[6] = dst.f;
-	v[7] = 0.;
-	v[8] = 0.;
+
+	v[5] = v[2] = v[1] = 1.;
+	v[8] = v[7] = v[4] = 0.;
 }
 
 fastcall static void
@@ -1397,6 +1393,44 @@ gen7_emit_composite_primitive_identity_source(struct sna *sna,
 }
 
 fastcall static void
+gen7_emit_composite_primitive_simple_source(struct sna *sna,
+					    const struct sna_composite_op *op,
+					    const struct sna_composite_rectangles *r)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*3;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
+	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
+
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
+
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+	v[8] = ((r->src.y + ty) * yy + y0) * sy;
+}
+
+fastcall static void
 gen7_emit_composite_primitive_affine_source(struct sna *sna,
 					    const struct sna_composite_op *op,
 					    const struct sna_composite_rectangles *r)
@@ -1486,141 +1520,63 @@ gen7_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	v[14] = msk_y * op->mask.scale[1];
 }
 
-fastcall static void
-gen7_emit_composite_primitive(struct sna *sna,
-			      const struct sna_composite_op *op,
-			      const struct sna_composite_rectangles *r)
+inline static void
+gen7_emit_composite_texcoord(struct sna *sna,
+			     const struct sna_composite_channel *channel,
+			     int16_t x, int16_t y)
 {
-	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
-	const float *src_sf = op->src.scale;
-	const float *mask_sf = op->mask.scale;
-
-	if (is_affine) {
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
-						r->src.y + op->src.offset[1],
-						op->src.transform,
-						&src_x[0],
-						&src_y[0]);
-
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
-						r->src.y + op->src.offset[1] + r->height,
-						op->src.transform,
-						&src_x[1],
-						&src_y[1]);
-
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0] + r->width,
-						r->src.y + op->src.offset[1] + r->height,
-						op->src.transform,
-						&src_x[2],
-						&src_y[2]);
-	} else {
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1],
-							op->src.transform,
-							&src_x[0],
-							&src_y[0],
-							&src_w[0]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[1],
-							&src_y[1],
-							&src_w[1]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[2],
-							&src_y[2],
-							&src_w[2]))
-			return;
-	}
+	x += channel->offset[0];
+	y += channel->offset[1];
 
-	if (op->mask.bo) {
-		if (is_affine) {
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
-							r->mask.y + op->mask.offset[1],
-							op->mask.transform,
-							&mask_x[0],
-							&mask_y[0]);
-
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
-							r->mask.y + op->mask.offset[1] + r->height,
-							op->mask.transform,
-							&mask_x[1],
-							&mask_y[1]);
-
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0] + r->width,
-							r->mask.y + op->mask.offset[1] + r->height,
-							op->mask.transform,
-							&mask_x[2],
-							&mask_y[2]);
-		} else {
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1],
-								op->mask.transform,
-								&mask_x[0],
-								&mask_y[0],
-								&mask_w[0]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[1],
-								&mask_y[1],
-								&mask_w[1]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[2],
-								&mask_y[2],
-								&mask_w[2]))
-				return;
-		}
-	}
+	if (channel->is_affine) {
+		float s, t;
 
-	OUT_VERTEX(r->dst.x + r->width, r->dst.y + r->height);
-	OUT_VERTEX_F(src_x[2] * src_sf[0]);
-	OUT_VERTEX_F(src_y[2] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[2]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[2] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[2] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[2]);
-	}
+		sna_get_transformed_coordinates(x, y,
+						channel->transform,
+						&s, &t);
+		OUT_VERTEX_F(s * channel->scale[0]);
+		OUT_VERTEX_F(t * channel->scale[1]);
+	} else {
+		float s, t, w;
 
-	OUT_VERTEX(r->dst.x, r->dst.y + r->height);
-	OUT_VERTEX_F(src_x[1] * src_sf[0]);
-	OUT_VERTEX_F(src_y[1] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[1]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[1] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[1] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[1]);
+		sna_get_transformed_coordinates_3d(x, y,
+						   channel->transform,
+						   &s, &t, &w);
+		OUT_VERTEX_F(s * channel->scale[0]);
+		OUT_VERTEX_F(t * channel->scale[1]);
+		OUT_VERTEX_F(w);
 	}
+}
 
-	OUT_VERTEX(r->dst.x, r->dst.y);
-	OUT_VERTEX_F(src_x[0] * src_sf[0]);
-	OUT_VERTEX_F(src_y[0] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[0]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[0] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[0] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[0]);
-	}
+static void
+gen7_emit_composite_vertex(struct sna *sna,
+			   const struct sna_composite_op *op,
+			   int16_t srcX, int16_t srcY,
+			   int16_t mskX, int16_t mskY,
+			   int16_t dstX, int16_t dstY)
+{
+	OUT_VERTEX(dstX, dstY);
+	gen7_emit_composite_texcoord(sna, &op->src, srcX, srcY);
+	gen7_emit_composite_texcoord(sna, &op->mask, mskX, mskY);
+}
+
+fastcall static void
+gen7_emit_composite_primitive(struct sna *sna,
+			      const struct sna_composite_op *op,
+			      const struct sna_composite_rectangles *r)
+{
+	gen7_emit_composite_vertex(sna, op,
+				   r->src.x + r->width,  r->src.y + r->height,
+				   r->mask.x + r->width, r->mask.y + r->height,
+				   r->dst.x + r->width, r->dst.y + r->height);
+	gen7_emit_composite_vertex(sna, op,
+				   r->src.x,  r->src.y + r->height,
+				   r->mask.x, r->mask.y + r->height,
+				   r->dst.x,  r->dst.y + r->height);
+	gen7_emit_composite_vertex(sna, op,
+				   r->src.x,  r->src.y,
+				   r->mask.x, r->mask.y,
+				   r->dst.x,  r->dst.y);
 }
 
 static void gen7_emit_vertex_buffer(struct sna *sna,
@@ -2064,7 +2020,7 @@ gen7_render_video(struct sna *sna,
 	tmp.dst.pixmap = pixmap;
 	tmp.dst.width  = pixmap->drawable.width;
 	tmp.dst.height = pixmap->drawable.height;
-	tmp.dst.format = sna_format_for_depth(pixmap->drawable.depth);
+	tmp.dst.format = sna_render_format_for_depth(pixmap->drawable.depth);
 	tmp.dst.bo = priv->gpu_bo;
 
 	tmp.src.bo = frame->bo;
@@ -2163,6 +2119,7 @@ gen7_composite_solid_init(struct sna *sna,
 	channel->repeat = RepeatNormal;
 	channel->is_affine = TRUE;
 	channel->is_solid  = TRUE;
+	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -2366,7 +2323,7 @@ gen7_composite_picture(struct sna *sna,
 		channel->transform = picture->transform;
 
 	channel->card_format = gen7_get_card_format(picture->format);
-	if (channel->card_format == -1)
+	if (channel->card_format == (unsigned)-1)
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
 						  x, y, w, h, dst_x, dst_y);
 
@@ -2411,9 +2368,6 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 {
 	struct sna_pixmap *priv;
 
-	if (!gen7_check_dst_format(dst->format))
-		return FALSE;
-
 	op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	op->dst.width  = op->dst.pixmap->drawable.width;
 	op->dst.height = op->dst.pixmap->drawable.height;
@@ -2453,12 +2407,22 @@ gen7_composite_set_target(struct sna *sna, struct sna_composite_op *op, PictureP
 	return TRUE;
 }
 
+static bool prefer_blt_ring(struct sna *sna)
+{
+	return sna->kgem.ring != KGEM_RENDER;
+}
+
+static bool can_switch_rings(struct sna *sna)
+{
+	return sna->kgem.has_semaphores && !NO_RING_SWITCH;
+}
+
 static Bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
-	if (sna->kgem.ring != KGEM_RENDER) {
+	if (prefer_blt_ring(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
 		return TRUE;
 	}
@@ -2484,11 +2448,16 @@ try_blt(struct sna *sna,
 		return TRUE;
 	}
 
+	if (can_switch_rings(sna)) {
+		if (sna_picture_is_solid(src, NULL))
+			return TRUE;
+	}
+
 	return FALSE;
 }
 
 static bool
-is_gradient(PicturePtr picture)
+check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
 		return FALSE;
@@ -2542,9 +2511,10 @@ source_fallback(PicturePtr p, PixmapPtr pixmap)
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	if (is_gradient(p) ||
-	    !gen7_check_repeat(p) ||
-	    !gen7_check_format(p->format))
+	if (p->pSourcePict)
+		return check_gradient(p);
+
+	if (!gen7_check_repeat(p) || !gen7_check_format(p->format))
 		return true;
 
 	if (pixmap && source_is_busy(pixmap))
@@ -2578,7 +2548,7 @@ gen7_composite_fallback(struct sna *sna,
 
 	if (mask) {
 		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
-		mask_fallback = source_fallback(src, mask_pixmap);
+		mask_fallback = source_fallback(mask, mask_pixmap);
 	} else {
 		mask_pixmap = NULL;
 		mask_fallback = false;
@@ -2743,6 +2713,8 @@ gen7_render_composite(struct sna *sna,
 					    width, height,
 					    tmp);
 
+	if (op == PictOpClear)
+		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen7_composite_set_target(sna, tmp, dst))
 		return FALSE;
@@ -2842,12 +2814,21 @@ gen7_render_composite(struct sna *sna,
 
 		tmp->floats_per_vertex = 5 + 2 * !tmp->is_affine;
 	} else {
-		if (tmp->src.is_solid)
+		if (tmp->src.is_solid) {
 			tmp->prim_emit = gen7_emit_composite_primitive_solid;
-		else if (tmp->src.transform == NULL)
+			if (tmp->src.is_opaque && op == PictOpOver)
+				tmp->op = PictOpSrc;
+		} else if (tmp->src.transform == NULL)
 			tmp->prim_emit = gen7_emit_composite_primitive_identity_source;
-		else if (tmp->src.is_affine)
-			tmp->prim_emit = gen7_emit_composite_primitive_affine_source;
+		else if (tmp->src.is_affine) {
+			if (tmp->src.transform->matrix[0][1] == 0 &&
+			    tmp->src.transform->matrix[1][0] == 0) {
+				tmp->src.scale[0] /= tmp->src.transform->matrix[2][2];
+				tmp->src.scale[1] /= tmp->src.transform->matrix[2][2];
+				tmp->prim_emit = gen7_emit_composite_primitive_simple_source;
+			} else
+				tmp->prim_emit = gen7_emit_composite_primitive_affine_source;
+		}
 
 		tmp->floats_per_vertex = 3 + !tmp->is_affine;
 	}
@@ -2920,32 +2901,6 @@ gen7_composite_alpha_gradient_init(struct sna *sna,
 }
 
 inline static void
-gen7_emit_composite_texcoord(struct sna *sna,
-			     const struct sna_composite_channel *channel,
-			     int16_t x, int16_t y)
-{
-	float t[3];
-
-	if (channel->is_affine) {
-		sna_get_transformed_coordinates(x + channel->offset[0],
-						y + channel->offset[1],
-						channel->transform,
-						&t[0], &t[1]);
-		OUT_VERTEX_F(t[0] * channel->scale[0]);
-		OUT_VERTEX_F(t[1] * channel->scale[1]);
-	} else {
-		t[0] = t[1] = 0; t[2] = 1;
-		sna_get_transformed_coordinates_3d(x + channel->offset[0],
-						   y + channel->offset[1],
-						   channel->transform,
-						   &t[0], &t[1], &t[2]);
-		OUT_VERTEX_F(t[0] * channel->scale[0]);
-		OUT_VERTEX_F(t[1] * channel->scale[1]);
-		OUT_VERTEX_F(t[2]);
-	}
-}
-
-inline static void
 gen7_emit_composite_texcoord_affine(struct sna *sna,
 				    const struct sna_composite_channel *channel,
 				    int16_t x, int16_t y)
@@ -3344,7 +3299,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
 				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
-	return (sna->kgem.ring != KGEM_RENDER ||
+	return (sna->kgem.ring == KGEM_BLT ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
@@ -3667,7 +3622,7 @@ fallback:
 	if (!gen7_check_format(op->base.src.pict_format))
 		goto fallback;
 
-	op->base.op = alu == GXcopy ? PictOpSrc : PictOpClear;
+	op->base.op = PictOpSrc;
 
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
@@ -3751,7 +3706,9 @@ gen7_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
-	return sna->kgem.ring != KGEM_RENDER || untiled_tlb_miss(bo);
+	return (can_switch_rings(sna) ||
+		prefer_blt_ring(sna) ||
+		untiled_tlb_miss(bo));
 }
 
 static Bool
@@ -3810,9 +3767,10 @@ gen7_render_fill_boxes(struct sna *sna,
 	return FALSE;
 #endif
 
-	if (op == PictOpClear)
+	if (op == PictOpClear) {
 		pixel = 0;
-	else if (!sna_get_pixel_from_rgba(&pixel,
+		op = PictOpSrc;
+	} else if (!sna_get_pixel_from_rgba(&pixel,
 				     color->red,
 				     color->green,
 				     color->blue,
@@ -3824,8 +3782,6 @@ gen7_render_fill_boxes(struct sna *sna,
 	     __FUNCTION__, pixel, n,
 	     box[0].x1, box[0].y1, box[0].x2, box[0].y2));
 
-	memset(&tmp, 0, sizeof(tmp));
-
 	tmp.op = op;
 
 	tmp.dst.pixmap = dst;
@@ -3833,14 +3789,21 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.format = format;
 	tmp.dst.bo = dst_bo;
+	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
+	tmp.mask.bo = NULL;
+	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
+	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
+
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
+	tmp.has_component_alpha = FALSE;
+	tmp.need_magic_ca_pass = FALSE;
 
 	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
@@ -4004,7 +3967,7 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	if (alu == GXclear)
 		color = 0;
 
-	op->base.op = color == 0 ? PictOpClear : PictOpSrc;
+	op->base.op = PictOpSrc;
 
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
@@ -4097,7 +4060,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (alu == GXclear)
 		color = 0;
 
-	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
+	tmp.op = PictOpSrc;
 
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
@@ -4195,7 +4158,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	if (too_large(dst->drawable.width, dst->drawable.height))
 		return gen7_render_clear_try_blt(sna, dst, bo);
 
-	tmp.op = PictOpClear;
+	tmp.op = PictOpSrc;
 
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
commit a3371613c9bf577a69cdf811ca1bebaea46bbe95
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 25 11:09:35 2012 +0100

    sna: Do not automagically convert GTT mappings on untiled scanout to CPU
    
    The likelihood of an untiled mapping of the scanout is slim, except for
    gen3 with large desktops, and there it should never be in the CPU
    domain...
    
    The issue is that we may perform an operation "inplace", yet incoherent
    with the display engine, and never flush the CPU cache, resulting in
    render corruption. In theory at least!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d97f559..4c4aa7c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3167,7 +3167,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
 
-	if (bo->tiling == I915_TILING_NONE &&
+	if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
 	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
 		DBG(("%s: converting request for GTT map into CPU map\n",
 		     __FUNCTION__));
@@ -3274,6 +3274,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bytes(bo)));
 	assert(!bo->purged);
 	assert(list_is_empty(&bo->list));
+	assert(!bo->scanout);
 
 	if (IS_CPU_MAP(bo->map))
 		return MAP(bo->map);
commit 1abd92cd012ee46d44ed4873a5e750d56ae6668f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 25 11:16:30 2012 +0100

    sna: Clear the domain tracking after attaching the bo to scanout
    
    This is basically to make sure we don't continue treating it as CPU
    coherent.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index cb051b0..5275d4a 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -436,6 +436,7 @@ sna_crtc_restore(struct sna *sna)
 
 	assert(bo->tiling != I915_TILING_Y);
 	bo->scanout = true;
+	bo->domain = DOMAIN_NONE;
 
 	DBG(("%s: create fb %dx%d@%d/%d\n",
 	     __FUNCTION__,
@@ -673,6 +674,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		DBG(("%s: handle %d attached to fb %d\n",
 		     __FUNCTION__, bo->handle, sna_mode->fb_id));
 		bo->scanout = true;
+		bo->domain = DOMAIN_NONE;
 		sna_mode->fb_pixmap = sna->front->drawable.serialNumber;
 	}
 
@@ -787,6 +789,7 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 	DBG(("%s: attached handle %d to fb %d\n",
 	     __FUNCTION__, bo->handle, sna_crtc->shadow_fb_id));
 	bo->scanout = true;
+	bo->domain = DOMAIN_NONE;
 	return sna_crtc->shadow = shadow;
 }
 
@@ -1725,6 +1728,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		if (!sna_crtc_apply(crtc))
 			goto fail;
 	}
+	bo->domain = DOMAIN_NONE;
 
 	scrn->virtualX = width;
 	scrn->virtualY = height;
@@ -1869,6 +1873,7 @@ sna_page_flip(struct sna *sna,
 	DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
 	if (count) {
 		bo->scanout = true;
+		bo->domain = DOMAIN_NONE;
 	} else {
 		drmModeRmFB(sna->kgem.fd, mode->fb_id);
 		mode->fb_id = *old_fb;
commit 8c58c840b1ba579a5601804fc710c58e1e00213f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 23 11:09:37 2012 +0100

    sna/dri: Always clear the scanout when destroying dri2 buffers
    
    As we may end up holding onto and releasing the Screen pixmap last, we
    may also be responsible for flushing the last reference to the scanout.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index f4d55e0..1a7b6bd 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -333,6 +333,7 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 		}
 
 		private->bo->flush = 0;
+		kgem_bo_clear_scanout(&sna->kgem, private->bo); /* paranoia */
 		kgem_bo_destroy(&sna->kgem, private->bo);
 
 		free(buffer);
@@ -388,6 +389,7 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 	sna_damage_destroy(&priv->cpu_damage);
 	priv->undamaged = false;
 
+	kgem_bo_clear_scanout(&sna->kgem, priv->gpu_bo); /* paranoia */
 	kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 	priv->gpu_bo = ref(bo);
 }
commit caf9144271a10f90ea580c246b2df3f69a10b7a0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 20 17:15:37 2012 +0100

    sna: Remove the assertions that the cached upload buffers are active
    
    These were added to track down some corruption, but the assertions
    themselves are incorrect, just very rare. The upload buffer may
    genuinely be cached if we abort the render operation after uploading the
    source data, leaving the proxy not coupled to any request.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48400
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 06bd2c0..382c671 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1943,7 +1943,6 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
-		assert(priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(&to_sna_from_pixmap(pixmap)->kgem,
 				priv->gpu_bo);
 		priv->gpu_bo = NULL;
@@ -2290,7 +2289,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
-		assert(priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = NULL;
 	}
@@ -2744,7 +2742,6 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
-		assert(priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = NULL;
 	}
@@ -3540,7 +3537,6 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload\n", __FUNCTION__));
-		assert(dst_priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
 		dst_priv->gpu_bo = NULL;
 	}
@@ -8383,9 +8379,10 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 	if (data.flags == 0)
 		return;
 
-	DBG(("%s: extents=(%d, %d), (%d, %d)\n", __FUNCTION__,
+	DBG(("%s: extents=(%d, %d), (%d, %d), flags=%x\n", __FUNCTION__,
 	     data.region.extents.x1, data.region.extents.y1,
-	     data.region.extents.x2, data.region.extents.y2));
+	     data.region.extents.x2, data.region.extents.y2,
+	     data.flags));
 
 	data.region.data = NULL;
 
commit aff3614efd5c12e658fa5723934e5bd50a83a316
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 20 13:21:40 2012 +0100

    sna: Always clear the mmapped domains when reusing  partial upload buffers
    
    As we need to make sure that we do invalidate the caches appropriately
    on reuse. Mildly paranoid, but strictly required by the spec.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 72b6ad7..d97f559 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3414,6 +3414,29 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
+{
+	assert(bo->proxy == NULL);
+	kgem_bo_submit(kgem, bo);
+
+	if (bo->domain != DOMAIN_GTT) {
+		struct drm_i915_gem_set_domain set_domain;
+
+		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
+		     bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
+
+		VG_CLEAR(set_domain);
+		set_domain.handle = bo->handle;
+		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+
+		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+			kgem_bo_retire(kgem, bo);
+			bo->domain = DOMAIN_GTT;
+		}
+	}
+}
+
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(!bo->reusable);
@@ -3424,7 +3447,6 @@ void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 
 void kgem_sync(struct kgem *kgem)
 {
-	struct drm_i915_gem_set_domain set_domain;
 	struct kgem_request *rq;
 	struct kgem_bo *bo;
 
@@ -3437,14 +3459,7 @@ void kgem_sync(struct kgem *kgem)
 	if (rq == kgem->next_request)
 		_kgem_submit(kgem);
 
-	VG_CLEAR(set_domain);
-	set_domain.handle = rq->bo->handle;
-	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-
-	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-	kgem_retire(kgem);
-
+	kgem_bo_sync__gtt(kgem, rq->bo);
 	list_for_each_entry(bo, &kgem->sync_list, list)
 		kgem_bo_sync__cpu(kgem, bo);
 
@@ -3599,8 +3614,12 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->used = size;
 			list_move(&bo->base.list, &kgem->active_partials);
 
-			if (bo->base.vmap)
-				kgem_bo_sync__cpu(kgem, &bo->base);
+			if (bo->mmapped) {
+				if (IS_CPU_MAP(bo->base.map))
+					kgem_bo_sync__cpu(kgem, &bo->base);
+				else
+					kgem_bo_sync__gtt(kgem, &bo->base);
+			}
 
 			goto done;
 		} while (kgem_retire(kgem));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 913e1a9..1235b83 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -365,6 +365,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
commit cb6a3dc2edf3cd612f833bc9a4656166735ee856
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Apr 19 10:34:23 2012 +0100

    sna: Discard proxy upload buffer if we choose to render to it
    
    Even if we try to avoid treating an upload buffer as a real GPU target,
    we may still choose to migrate the buffer to the GPU in order to keep
    other buffers on the GPU. In that case, we do want to create a real GPU
    bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1c43fb7..06bd2c0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2288,6 +2288,13 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		goto active;
 	}
 
+	if (flags & MOVE_WRITE && priv->gpu_bo && priv->gpu_bo->proxy) {
+		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
+		assert(priv->gpu_bo->proxy->rq);
+		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+		priv->gpu_bo = NULL;
+	}
+
 	if ((flags & MOVE_READ) == 0)
 		sna_damage_destroy(&priv->cpu_damage);
 
commit 4cf74d409ca63c6a479c1ee2187908c04f3b830b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Apr 19 09:09:32 2012 +0100

    sna: Don't consider upload proxies as being on the GPU for render targets
    
    The upload proxy is a fake buffer that we do not want to render to as
    then the damage tracking become extremely confused and the buffer it
    self is not optimised for persistent rendering. We assert that we do not
    use it as a render target, and this patch adds the check so that we
    avoid treating the proxy as a valid target when choosing the render
    path.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 03e1969..b9acea1 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -75,7 +75,7 @@ is_gpu(DrawablePtr drawable)
 	if (priv == NULL || priv->clear)
 		return false;
 
-	if (priv->gpu_damage || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)))
+	if (priv->gpu_damage || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo) && !priv->gpu_bo->proxy))
 		return true;
 
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
commit fd81408b978c9b57c046ee43d2d32e1370e83a7d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 18 11:39:43 2012 +0100

    sna: Increase the render target alignment to 4 pixels on gen4+ as well
    
    Repoerted-and-tested-by: Toralf Förster <toralf.foerster at gmx.de
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48865
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3d722d0..72b6ad7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -838,13 +838,8 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		if (kgem->gen < 40) {
-			tile_width = scanout ? 64 : 4 * bpp >> 3;
-			tile_height = 4;
-		} else {
-			tile_width = scanout ? 64 : 2 * bpp >> 3;
-			tile_height = 2;
-		}
+		tile_width = scanout ? 64 : 4 * bpp >> 3;
+		tile_height = 2;
 		break;
 	case I915_TILING_X:
 		tile_width = 512;
@@ -898,7 +893,7 @@ static uint32_t kgem_aligned_height(struct kgem *kgem,
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		tile_height = kgem->gen < 40 ? 4 : 2;
+		tile_height = 2;
 		break;
 	case I915_TILING_X:
 		tile_height = 8;
@@ -2881,7 +2876,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 		stride = ALIGN(width, 2) * bpp >> 3;
 		stride = ALIGN(stride, 4);
-		size = ALIGN(height, kgem->gen < 40 ? 4 : 2) * stride;
+		size = ALIGN(height, 2) * stride;
 
 		assert(size >= PAGE_SIZE);
 
commit 11599e52b842b5db76798879b0fbb57762fe6002
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 17 20:08:14 2012 +0100

    sna/dri: Decouple the frame event info after attaching along error paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index afec831..f4d55e0 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1372,15 +1372,15 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			return FALSE;
 		}
 
+		sna_dri_reference_buffer(front);
+		sna_dri_reference_buffer(back);
+
 		if (!sna_dri_page_flip(sna, info)) {
 			DBG(("%s: failed to queue page flip\n", __FUNCTION__));
-			free(info);
+			sna_dri_frame_event_info_free(info);
 			return FALSE;
 		}
 
-		sna_dri_reference_buffer(front);
-		sna_dri_reference_buffer(back);
-
 		get_private(info->back)->bo =
 			kgem_create_2d(&sna->kgem,
 				       draw->width,
@@ -1426,7 +1426,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			vbl.request.type |= DRM_VBLANK_SECONDARY;
 		vbl.request.sequence = 0;
 		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-			free(info);
+			sna_dri_frame_event_info_free(info);
 			return FALSE;
 		}
 
@@ -1482,7 +1482,7 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence -= 1;
 		vbl.request.signal = (unsigned long)info;
 		if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
-			free(info);
+			sna_dri_frame_event_info_free(info);
 			return FALSE;
 		}
 
@@ -1610,9 +1610,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	if (pipe > 0)
 		vbl.request.type |= DRM_VBLANK_SECONDARY;
 	vbl.request.sequence = 0;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
+	if (drmWaitVBlank(sna->kgem.fd, &vbl))
 		goto blit_fallback;
-	}
 
 	current_msc = vbl.reply.sequence;
 
@@ -1677,9 +1676,8 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 		vbl.request.sequence += divisor;
 
 	vbl.request.signal = (unsigned long)info;
-	if (drmWaitVBlank(sna->kgem.fd, &vbl)) {
+	if (drmWaitVBlank(sna->kgem.fd, &vbl))
 		goto blit_fallback;
-	}
 
 	*target_msc = vbl.reply.sequence;
 	info->frame = *target_msc;
@@ -1762,12 +1760,11 @@ blit:
 		if (!sna_dri_add_frame_event(info)) {
 			DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 			free(info);
-			info = NULL;
 			goto blit;
 		}
 
 		if (!sna_dri_page_flip(sna, info)) {
-			free(info);
+			sna_dri_frame_event_info_free(info);
 			goto blit;
 		}
 
@@ -1935,7 +1932,6 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 	if (!sna_dri_add_frame_event(info)) {
 		DBG(("%s: failed to hook up frame event\n", __FUNCTION__));
 		free(info);
-		info = NULL;
 		goto out_complete;
 	}
 
@@ -1959,7 +1955,7 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 					   strerror(errno));
 				limit--;
 			}
-			goto out_complete;
+			goto out_free_info;
 		}
 
 		info->frame = vbl.reply.sequence;
@@ -1996,15 +1992,16 @@ sna_dri_schedule_wait_msc(ClientPtr client, DrawablePtr draw, CARD64 target_msc,
 				   strerror(errno));
 			limit--;
 		}
-		goto out_complete;
+		goto out_free_info;
 	}
 
 	info->frame = vbl.reply.sequence;
 	DRI2BlockClient(client, draw);
 	return TRUE;
 
+out_free_info:
+	sna_dri_frame_event_info_free(info);
 out_complete:
-	free(info);
 	DRI2WaitMSCComplete(client, draw, target_msc, 0, 0);
 	return TRUE;
 }
commit b817200371bfe16f44b879a793cf4a75ad17bc5c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 17 17:54:58 2012 +0100

    Don't issue a scanline wait while VT switched
    
    Be paranoid and check that we own the VT before emitting a scanline
    wait. If we attempt to wait on a fb/pipe that we do not own, we may
    issue an illegal command and cause a lockup.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index f6f0c86..a5ed545 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -553,7 +553,8 @@ I830DRI2CopyRegion(DrawablePtr drawable, RegionPtr pRegion,
 	ValidateGC(dst, gc);
 
 	/* Wait for the scanline to be outside the region to be copied */
-	if (pixmap_is_scanout(get_drawable_pixmap(dst)) &&
+	if (scrn->vtSema &&
+	    pixmap_is_scanout(get_drawable_pixmap(dst)) &&
 	    intel->swapbuffers_wait && INTEL_INFO(intel)->gen < 60) {
 		BoxPtr box;
 		BoxRec crtcbox;
diff --git a/src/intel_video.c b/src/intel_video.c
index 25f6483..0834bb2 100644
--- a/src/intel_video.c
+++ b/src/intel_video.c
@@ -1285,7 +1285,7 @@ intel_wait_for_scanline(ScrnInfoPtr scrn, PixmapPtr pixmap,
 	int y1, y2;
 
 	pipe = -1;
-	if (pixmap_is_scanout(pixmap))
+	if (scrn->vtSema && pixmap_is_scanout(pixmap))
 		pipe = intel_crtc_to_pipe(crtc);
 	if (pipe < 0)
 		return;
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ef3b0f9..cb051b0 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2018,6 +2018,10 @@ sna_covering_crtc(ScrnInfoPtr scrn,
 	int best_coverage, c;
 	BoxRec best_crtc_box;
 
+	/* If we do not own the VT, we do not own the CRTC either */
+	if (!scrn->vtSema)
+		return NULL;
+
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
 
commit af4a6e8cb52ace594934446e6d8a7aaa1945a9b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 16 20:57:03 2012 +0100

    sna: Don't assert exported buffers are not busy
    
    As we do not fully control these buffers, we cannot truly say when they
    are idle, we can only trust that the split between us and the compositor
    doesn't lead to much corruption.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 9b080f1..3d722d0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -312,7 +312,7 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 {
 	DBG(("%s: handle=%d, domain=%d\n",
 	     __FUNCTION__, bo->handle, bo->domain));
-	assert(!kgem_busy(kgem, bo->handle));
+	assert(bo->flush || !kgem_busy(kgem, bo->handle));
 
 	if (bo->rq)
 		kgem_retire(kgem);
@@ -332,7 +332,7 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 {
 	assert(bo->refcnt);
 	assert(!bo->purged);
-	assert(!kgem_busy(kgem, bo->handle));
+	assert(bo->flush || !kgem_busy(kgem, bo->handle));
 	assert(bo->proxy == NULL);
 
 	assert(length <= bytes(bo));
@@ -1033,13 +1033,13 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 					    struct kgem_bo *bo)
 {
+	assert(bo->reusable);
+	assert(bo->rq == NULL);
+	assert(bo->domain != DOMAIN_GPU);
 	assert(!kgem_busy(kgem, bo->handle));
 	assert(!bo->proxy);
 	assert(!bo->io);
 	assert(!bo->needs_flush);
-	assert(bo->rq == NULL);
-	assert(bo->domain != DOMAIN_GPU);
-	assert(bo->reusable);
 	assert(list_is_empty(&bo->vma));
 
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
@@ -1098,6 +1098,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		goto destroy;
 
 	if (bo->vmap) {
+		assert(!bo->flush);
 		DBG(("%s: handle=%d is vmapped, tracking until free\n",
 		     __FUNCTION__, bo->handle));
 		if (bo->rq == NULL) {
@@ -2279,6 +2280,7 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 	}
 
 	bo->reusable = false;
+	bo->flush = true;
 	return bo;
 }
 
@@ -2807,8 +2809,7 @@ search_inactive:
 		assert(bo->refcnt == 0);
 		assert(bo->reusable);
 		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
-		assert((flags & CREATE_INACTIVE) == 0 ||
-		       !kgem_busy(kgem, bo->handle));
+		assert((flags & CREATE_INACTIVE) == 0 || !kgem_busy(kgem, bo->handle));
 		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 		bo->refcnt = 1;
 		return bo;
@@ -3206,8 +3207,8 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->domain != DOMAIN_GTT) {
 		struct drm_i915_gem_set_domain set_domain;
 
-		DBG(("%s: sync: needs_flush? %d, domain? %d\n", __FUNCTION__,
-		     bo->needs_flush, bo->domain));
+		DBG(("%s: sync: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
+		     bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
 
 		/* XXX use PROT_READ to avoid the write flush? */
 
commit a16616209bb2dcb7aaa859b38e154f0a10faa82b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 14 19:03:25 2012 +0100

    uxa: Fix leak of glyph mask for unhandled glyph composition
    
    ==1401== 7,344 bytes in 34 blocks are possibly lost in loss record 570 of 587
    ==1401==    at 0x4027034: calloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
    ==1401==    by 0x8BE5150: drm_intel_gem_bo_alloc_internal (intel_bufmgr_gem.c:689)
    ==1401==    by 0x899FC04: intel_uxa_create_pixmap (intel_uxa.c:1077)
    ==1401==    by 0x89C2C41: uxa_glyphs (uxa-glyphs.c:254)
    ==1401==    by 0x21F05E: damageGlyphs (damage.c:647)
    ==1401==    by 0x218E06: ProcRenderCompositeGlyphs (render.c:1434)
    ==1401==    by 0x15AA40: Dispatch (dispatch.c:439)
    ==1401==    by 0x1499E9: main (main.c:287)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index b754f4e..921b99c 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -812,8 +812,10 @@ uxa_glyphs_via_mask(CARD8 op,
 				if (!uxa_pixmap_is_offscreen(src_pixmap) ||
 				    !uxa_screen->info->prepare_composite(PictOpAdd,
 									 this_atlas, NULL, mask,
-									 src_pixmap, NULL, pixmap))
+									 src_pixmap, NULL, pixmap)) {
+					FreePicture(mask, 0);
 					return -1;
+				}
 
 				glyph_atlas = this_atlas;
 			}
commit ae145c21e9fd3a12164f8b4720d059f9c158249e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 14 18:42:23 2012 +0100

    sna: Avoid leaking the plane resources when determining sprite planes
    
    Fixes the tiny, one-off leak:
    
    ==1407== 8 bytes in 1 blocks are definitely lost in loss record 48 of 527
    ==1407==    at 0x402894D: malloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
    ==1407==    by 0x8580BE8: drmMalloc (xf86drm.c:147)
    ==1407==    by 0x8583D54: drmAllocCpy (xf86drmMode.c:73)
    ==1407==    by 0x8585265: drmModeGetPlaneResources (xf86drmMode.c:955)
    ==1407==    by 0x8A1BCE9: sna_video_sprite_setup (sna_video_sprite.c:367)
    ==1407==    by 0x8A1A0A3: sna_video_init (sna_video.c:523)
    ==1407==    by 0x89FD4E0: sna_screen_init (sna_driver.c:935)
    ==1407==    by 0x15AD80: AddScreen (dispatch.c:3909)
    ==1407==    by 0x19A2DB: InitOutput (xf86Init.c:817)
    ==1407==    by 0x14981C: main (main.c:204)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
index 82db122..0e5f3ab 100644
--- a/src/sna/sna_video_sprite.c
+++ b/src/sna/sna_video_sprite.c
@@ -361,21 +361,21 @@ XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna,
 					   ScreenPtr screen)
 {
 	XF86VideoAdaptorPtr adaptor;
+	struct drm_mode_get_plane_res r;
 	struct sna_video *video;
-	drmModePlaneRes *plane_resources;
 
-	plane_resources = drmModeGetPlaneResources(sna->kgem.fd);
-	if (!plane_resources)
+	memset(&r, 0, sizeof(struct drm_mode_get_plane_res));
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETPLANERESOURCES, &r))
+		return NULL;
+	if (r.count_planes == 0)
 		return NULL;
 
 	adaptor = calloc(1,
 			 sizeof(XF86VideoAdaptorRec) +
 			 sizeof(struct sna_video) +
 			 sizeof(DevUnion));
-	if (!adaptor) {
-		free(plane_resources);
+	if (!adaptor)
 		return NULL;
-	}
 
 	adaptor->type = XvWindowMask | XvInputMask | XvImageMask;
 	adaptor->flags = VIDEO_OVERLAID_IMAGES /*| VIDEO_CLIP_TO_VIEWPORT */ ;
@@ -428,7 +428,5 @@ XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna,
 
 	xvColorKey = MAKE_ATOM("XV_COLORKEY");
 
-	free(plane_resources);
-
 	return adaptor;
 }
commit 69a7737abeded6ee923643bd8a80a5a84e6a979c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 14 12:06:51 2012 +0100

    sna: Align texture subsurfaces to 2x2 texture samples
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 66e078a..ec2aaad 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -839,8 +839,15 @@ sna_render_pixmap_partial(struct sna *sna,
 		box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
 
 		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
-	} else
+	} else {
+		box.y1 = box.y1 & ~1;
+		box.y2 = ALIGN(box.y2, 2);
+
+		box.x1 = box.x1 & ~1;
+		box.x2 = ALIGN(box.x2, 2);
+
 		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8;
+	}
 
 	if (box.x2 > pixmap->drawable.width)
 		box.x2 = pixmap->drawable.width;
commit 1d2a46e0902d82b43a5e12af36521a6a7fd6ba39
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 14 12:04:23 2012 +0100

    sna: Align redirect subsurfaces to 2x2 or 4x4 render spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 8af80f2..66e078a 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1767,8 +1767,23 @@ sna_render_composite_redirect(struct sna *sna,
 			box.x2 = ALIGN(box.x2, tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel);
 
 			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
-		} else
+		} else {
+			if (sna->kgem.gen < 40) {
+				box.y1 = box.y1 & ~3;
+				box.y2 = ALIGN(box.y2, 4);
+
+				box.x1 = box.x1 & ~3;
+				box.x2 = ALIGN(box.x2, 4);
+			} else {
+				box.y1 = box.y1 & ~1;
+				box.y2 = ALIGN(box.y2, 2);
+
+				box.x1 = box.x1 & ~1;
+				box.x2 = ALIGN(box.x2, 2);
+			}
+
 			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8;
+		}
 
 		if (box.y2 > op->dst.pixmap->drawable.height)
 			box.y2 = op->dst.pixmap->drawable.height;
commit 1ce2b65d622797000e0a4db7dc851d5b1da04f85
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 14 11:59:31 2012 +0100

    sna: Align render target sizes on gen2/3 to 4x4 render spans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f1b0376..9b080f1 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -686,12 +686,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: partial buffer size=%d [%d KiB]\n", __FUNCTION__,
 	     kgem->partial_buffer_size, kgem->partial_buffer_size / 1024));
 
-	kgem->min_alignment = 4;
-	if (gen < 60)
-		/* XXX workaround an issue where we appear to fail to
-		 * disable dual-stream mode */
-		kgem->min_alignment = 64;
-
 	kgem->max_object_size = 2 * aperture.aper_size / 3;
 	kgem->max_gpu_size = kgem->max_object_size;
 	if (!kgem->has_llc)
@@ -776,8 +770,8 @@ static uint32_t kgem_untiled_pitch(struct kgem *kgem,
 				   uint32_t width, uint32_t bpp,
 				   bool scanout)
 {
-	width = width * bpp >> 3;
-	return ALIGN(width, scanout ? 64 : kgem->min_alignment);
+	width = ALIGN(width, 4) * bpp >> 3;
+	return ALIGN(width, scanout ? 64 : 4);
 }
 
 void kgem_get_tile_size(struct kgem *kgem, int tiling,
@@ -838,14 +832,19 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 			tile_width = 512;
 			tile_height = kgem->gen < 30 ? 16 : 8;
 		} else {
-			tile_width = scanout ? 64 : kgem->min_alignment;
-			tile_height = 2;
+			tile_width = scanout ? 64 : 4 * bpp >> 3;
+			tile_height = 4;
 		}
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		tile_width = scanout ? 64 : kgem->min_alignment;
-		tile_height = 2;
+		if (kgem->gen < 40) {
+			tile_width = scanout ? 64 : 4 * bpp >> 3;
+			tile_height = 4;
+		} else {
+			tile_width = scanout ? 64 : 2 * bpp >> 3;
+			tile_height = 2;
+		}
 		break;
 	case I915_TILING_X:
 		tile_width = 512;
@@ -899,7 +898,7 @@ static uint32_t kgem_aligned_height(struct kgem *kgem,
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
-		tile_height = 2;
+		tile_height = kgem->gen < 40 ? 4 : 2;
 		break;
 	case I915_TILING_X:
 		tile_height = 8;
@@ -2881,7 +2880,7 @@ struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
 
 		stride = ALIGN(width, 2) * bpp >> 3;
 		stride = ALIGN(stride, 4);
-		size = ALIGN(height, 2) * stride;
+		size = ALIGN(height, kgem->gen < 40 ? 4 : 2) * stride;
 
 		assert(size >= PAGE_SIZE);
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index e52645c..913e1a9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -161,7 +161,6 @@ struct kgem {
 	uint16_t half_cpu_cache_pages;
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
-	uint32_t min_alignment;
 	uint32_t max_upload_tile_size, max_copy_tile_size;
 	uint32_t max_gpu_size, max_cpu_size;
 	uint32_t large_object_size, max_object_size;
commit 89f2b09b1e5be9842747998ea4fe32a6f1ede4cc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 16:37:43 2012 +0100

    sna: Avoid using TILING_Y for large objects on gen2/3
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48636
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8fcdd14..1c43fb7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -458,6 +458,16 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 		     __FUNCTION__));
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = false;
+
+		/* Only on later generations was the render pipeline
+		 * more flexible than the BLT. So on gen2/3, prefer to
+		 * keep large objects accessible through the BLT.
+		 */
+		if (sna->kgem.gen < 40 &&
+		    (pixmap->drawable.width > sna->render.max_3d_size ||
+		     pixmap->drawable.height > sna->render.max_3d_size))
+			return I915_TILING_X;
+
 		return I915_TILING_Y;
 	}
 
commit eaadbce122059066353743f1653aa16e9d9b747f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 15:12:36 2012 +0100

    sna: Relax bogus assertion
    
    The bo may be considered unmappable due to being bound to outside the
    mappable region, which we are attempting to rectify through mapping into
    the GTT domain.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2d09666..f1b0376 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3186,7 +3186,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 	ptr = bo->map;
 	if (ptr == NULL) {
-		assert(kgem_bo_is_mappable(kgem, bo));
+		assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
commit b478420740d05fa87ddbd92042b1f7f2d002f73e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 15:07:13 2012 +0100

    sna: Limit the buffer reuse for mappable uploads to only those with mmaps
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f0c971e..2d09666 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2156,6 +2156,9 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 			//assert(!kgem_busy(kgem, bo->handle));
 			return bo;
 		}
+
+		if (flags & CREATE_EXACT)
+			return NULL;
 	}
 
 	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
@@ -3683,7 +3686,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		 * devices like gen2 or with relatively slow gpu like i3.
 		 */
 		old = search_linear_cache(kgem, alloc,
-					  CREATE_INACTIVE | CREATE_GTT_MAP);
+					  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
 #if HAVE_I915_GEM_BUFFER_INFO
 		if (old) {
 			struct drm_i915_gem_buffer_info info;
@@ -3705,7 +3708,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 #endif
 		if (old == NULL)
 			old = search_linear_cache(kgem, NUM_PAGES(size),
-						  CREATE_INACTIVE | CREATE_GTT_MAP);
+						  CREATE_EXACT | CREATE_INACTIVE | CREATE_GTT_MAP);
 		if (old) {
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
commit 90e2740e7e459c56205fa65bab1ae3dbfd5d3945
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 13:51:57 2012 +0100

    sna: Remove the conflicting assertion during GTT map
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48636
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 14a0067..f0c971e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3183,7 +3183,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 	ptr = bo->map;
 	if (ptr == NULL) {
-		assert(bytes(bo) <= kgem->aperture_mappable / 4);
+		assert(kgem_bo_is_mappable(kgem, bo));
 
 		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
commit 9e6d55a8d63f10ca6a2b10e44d00c84b07724485
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 09:56:39 2012 +0100

    sna: Don't use miSpan code for wide-spans by default, too expensive
    
    Only use the fall-forward miSpans code when it prevents a readback.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b3507ad..8fcdd14 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -60,7 +60,7 @@
 #define FORCE_FLUSH 0
 
 #define USE_INPLACE 1
-#define USE_WIDE_SPANS 1 /* -1 force CPU, 1 force GPU */
+#define USE_WIDE_SPANS 0 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
 #define USE_SHM_VMAP 0
 #define PREFER_VMAP 0
commit 9becfbbf89f2b170e50f705cabfc7bbf1dcf9846
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 13 09:38:57 2012 +0100

    sna: Draw dashed PolyLines twice, once for the fgPixel, once for the bgPixel
    
    As the fast paths only setup state upfront, we were missing the state
    changes required between dash-on/off. Take advantage of that each pixel
    is only drawn once to batch the state changes and run the
    miZeroDashLines twice.
    
    A future task would be to use a custom line drawing routine...
    
    Fixes regression from ec1267df746512c2e262ef0bd9e9527bc5efe6f4.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 590cc11..b3507ad 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4275,6 +4275,18 @@ sna_fill_spans__fill(DrawablePtr drawable,
 }
 
 static void
+sna_fill_spans__dash(DrawablePtr drawable,
+		     GCPtr gc, int n,
+		     DDXPointPtr pt, int *width, int sorted)
+{
+	struct sna_fill_spans *data = sna_gc(gc)->priv;
+	struct sna_fill_op *op = data->op;
+
+	if (op->base.u.blt.pixel == gc->fgPixel)
+		sna_fill_spans__fill(drawable, gc, n, pt, width, sorted);
+}
+
+static void
 sna_fill_spans__fill_offset(DrawablePtr drawable,
 			    GCPtr gc, int n,
 			    DDXPointPtr pt, int *width, int sorted)
@@ -4306,6 +4318,18 @@ sna_fill_spans__fill_offset(DrawablePtr drawable,
 }
 
 static void
+sna_fill_spans__dash_offset(DrawablePtr drawable,
+			    GCPtr gc, int n,
+			    DDXPointPtr pt, int *width, int sorted)
+{
+	struct sna_fill_spans *data = sna_gc(gc)->priv;
+	struct sna_fill_op *op = data->op;
+
+	if (op->base.u.blt.pixel == gc->fgPixel)
+		sna_fill_spans__fill_offset(drawable, gc, n, pt, width, sorted);
+}
+
+static void
 sna_fill_spans__fill_clip_extents(DrawablePtr drawable,
 				  GCPtr gc, int n,
 				  DDXPointPtr pt, int *width, int sorted)
@@ -4349,6 +4373,18 @@ sna_fill_spans__fill_clip_extents(DrawablePtr drawable,
 }
 
 static void
+sna_fill_spans__dash_clip_extents(DrawablePtr drawable,
+				  GCPtr gc, int n,
+				  DDXPointPtr pt, int *width, int sorted)
+{
+	struct sna_fill_spans *data = sna_gc(gc)->priv;
+	struct sna_fill_op *op = data->op;
+
+	if (op->base.u.blt.pixel == gc->fgPixel)
+		sna_fill_spans__fill_clip_extents(drawable, gc, n, pt, width, sorted);
+}
+
+static void
 sna_fill_spans__fill_clip_boxes(DrawablePtr drawable,
 				GCPtr gc, int n,
 				DDXPointPtr pt, int *width, int sorted)
@@ -4422,6 +4458,18 @@ sna_fill_spans__fill_clip_boxes(DrawablePtr drawable,
 		op->boxes(data->sna, op, box, b - box);
 }
 
+static void
+sna_fill_spans__dash_clip_boxes(DrawablePtr drawable,
+				GCPtr gc, int n,
+				DDXPointPtr pt, int *width, int sorted)
+{
+	struct sna_fill_spans *data = sna_gc(gc)->priv;
+	struct sna_fill_op *op = data->op;
+
+	if (op->base.u.blt.pixel == gc->fgPixel)
+		sna_fill_spans__fill_clip_boxes(drawable, gc, n, pt, width, sorted);
+}
+
 static Bool
 sna_fill_spans_blt(DrawablePtr drawable,
 		   struct kgem_bo *bo, struct sna_damage **damage,
@@ -6646,44 +6694,78 @@ spans_fallback:
 		get_drawable_deltas(drawable, data.pixmap, &data.dx, &data.dy);
 		sna_gc(gc)->priv = &data;
 
-		if (gc->lineWidth == 0 &&
-		    gc_is_solid(gc, &color)) {
+		if (gc->lineWidth == 0 && gc_is_solid(gc, &color)) {
 			struct sna_fill_op fill;
 
-			if (!sna_fill_init_blt(&fill,
-					       data.sna, data.pixmap,
-					       data.bo, gc->alu, color))
-				goto fallback;
+			if (gc->lineStyle == LineSolid) {
+				if (!sna_fill_init_blt(&fill,
+						       data.sna, data.pixmap,
+						       data.bo, gc->alu, color))
+					goto fallback;
 
-			data.op = &fill;
+				data.op = &fill;
 
-			if ((data.flags & 2) == 0) {
-				if (data.dx | data.dy)
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_offset;
-				else
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill;
-			} else {
-				region_maybe_clip(&data.region,
-						  gc->pCompositeClip);
-				if (!RegionNotEmpty(&data.region))
-					return;
+				if ((data.flags & 2) == 0) {
+					if (data.dx | data.dy)
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_offset;
+					else
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill;
+				} else {
+					region_maybe_clip(&data.region,
+							  gc->pCompositeClip);
+					if (!RegionNotEmpty(&data.region))
+						return;
 
-				if (region_is_singular(&data.region))
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_extents;
-				else
-					sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_boxes;
-			}
-			assert(gc->miTranslate);
+					if (region_is_singular(&data.region))
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_extents;
+					else
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__fill_clip_boxes;
+				}
+				assert(gc->miTranslate);
 
-			gc->ops = &sna_gc_ops__tmp;
-			if (gc->lineStyle == LineSolid) {
+				gc->ops = &sna_gc_ops__tmp;
 				DBG(("%s: miZeroLine (solid fill)\n", __FUNCTION__));
 				miZeroLine(drawable, gc, mode, n, pt);
+				fill.done(data.sna, &fill);
 			} else {
-				DBG(("%s: miZeroDashLine (solid fill)\n", __FUNCTION__));
+				data.op = &fill;
+
+				if ((data.flags & 2) == 0) {
+					if (data.dx | data.dy)
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__dash_offset;
+					else
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__dash;
+				} else {
+					region_maybe_clip(&data.region,
+							  gc->pCompositeClip);
+					if (!RegionNotEmpty(&data.region))
+						return;
+
+					if (region_is_singular(&data.region))
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__dash_clip_extents;
+					else
+						sna_gc_ops__tmp.FillSpans = sna_fill_spans__dash_clip_boxes;
+				}
+				assert(gc->miTranslate);
+
+				gc->ops = &sna_gc_ops__tmp;
+				DBG(("%s: miZeroLine (solid dash)\n", __FUNCTION__));
+				if (!sna_fill_init_blt(&fill,
+						       data.sna, data.pixmap,
+						       data.bo, gc->alu, color))
+					goto fallback;
+
 				miZeroDashLine(drawable, gc, mode, n, pt);
+				fill.done(data.sna, &fill);
+
+				if (sna_fill_init_blt(&fill,
+						       data.sna, data.pixmap,
+						       data.bo, gc->alu,
+						       gc->bgPixel)) {
+					miZeroDashLine(drawable, gc, mode, n, pt);
+					fill.done(data.sna, &fill);
+				}
 			}
-			fill.done(data.sna, &fill);
 		} else {
 			/* Note that the WideDash functions alternate
 			 * between filling using fgPixel and bgPixel
commit e269ed5d4e1b9d758aeb9a85ed0fa631f0aff0b1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Apr 12 22:46:22 2012 +0100

    sna: Restore CPU domain for vmapped buffers when reusing
    
    For a vmapped upload buffer, we need to notify the kernel (and thereby
    the GPU) to invalidate the sampler and flush its caches when we reuse an
    idle buffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6ea4d48..14a0067 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3600,6 +3600,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			offset = 0;
 			bo->used = size;
 			list_move(&bo->base.list, &kgem->active_partials);
+
+			if (bo->base.vmap)
+				kgem_bo_sync__cpu(kgem, &bo->base);
+
 			goto done;
 		} while (kgem_retire(kgem));
 	}
commit d29b8650c40c673e6ddddaf52db9247e9836cba8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Apr 12 22:23:12 2012 +0100

    sna: Revert use of mmap64()
    
    As this just causes mayhem on a 64-bit platform. Doomed if you, doomed
    if you don't. :(
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 109f3b7..6ea4d48 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -220,7 +220,7 @@ static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
 		return NULL;
 	}
 
-	ptr = mmap64(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
+	ptr = mmap(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
 	if (ptr == MAP_FAILED) {
 		assert(0);
 		ptr = NULL;
commit a78b1d71a39ae29d5f85bd82c09202ebec3e6539
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Apr 12 15:37:25 2012 +0100

    sna: Declare AC_SYS_LARGEFILE for mmap64
    
    In order to use the full 32-bits of mmap address space on small
    platforms we need to use mmap64().
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index b5919bf..5124100 100644
--- a/configure.ac
+++ b/configure.ac
@@ -52,6 +52,7 @@ m4_ifndef([XORG_DRIVER_CHECK_EXT],
 # Initialize libtool
 AC_DISABLE_STATIC
 AC_PROG_LIBTOOL
+AC_SYS_LARGEFILE
 
 # Are we in a git checkout?
 dot_git=no
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6ea4d48..109f3b7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -220,7 +220,7 @@ static void *gem_mmap(int fd, uint32_t handle, int size, int prot)
 		return NULL;
 	}
 
-	ptr = mmap(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
+	ptr = mmap64(0, size, prot, MAP_SHARED, fd, mmap_arg.offset);
 	if (ptr == MAP_FAILED) {
 		assert(0);
 		ptr = NULL;
commit 09deba927daa96be6230b1c3e1b425622512d8a2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 20:59:36 2012 +0100

    sna: Check ioctl return from set-domain
    
    Let's not assume it succeeds and so avoid altering our bookkeeping along
    failure paths.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2a8b3cd..6ea4d48 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3213,10 +3213,10 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 		set_domain.handle = bo->handle;
 		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
 		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-
-		kgem_bo_retire(kgem, bo);
-		bo->domain = DOMAIN_GTT;
+		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+			kgem_bo_retire(kgem, bo);
+			bo->domain = DOMAIN_GTT;
+		}
 	}
 
 	return ptr;
@@ -3409,10 +3409,10 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 		set_domain.read_domains = I915_GEM_DOMAIN_CPU;
 		set_domain.write_domain = I915_GEM_DOMAIN_CPU;
 
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-
-		kgem_bo_retire(kgem, bo);
-		bo->domain = DOMAIN_CPU;
+		if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+			kgem_bo_retire(kgem, bo);
+			bo->domain = DOMAIN_CPU;
+		}
 	}
 }
 
@@ -4033,11 +4033,15 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		set_domain.read_domains =
 			IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
 
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+		if (drmIoctl(kgem->fd,
+			     DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain))
+			return;
 	} else {
-		gem_read(kgem->fd,
-			 bo->base.handle, (char *)bo->mem+offset,
-			 offset, length);
+		if (gem_read(kgem->fd,
+			     bo->base.handle, (char *)bo->mem+offset,
+			     offset, length))
+			return;
+
 		kgem_bo_map__cpu(kgem, &bo->base);
 	}
 	kgem_bo_retire(kgem, &bo->base);
commit 0b12f1d8e4d0a4fafac9553f144535efc4ebe0be
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 15:23:54 2012 +0100

    sna: Fix shadowed variable
    
    sna_accel.c: In function 'sna_pixmap_move_area_to_gpu':
    sna_accel.c:1751:12: warning: declaration of 'flags' shadows a parameter
    [-Wshadow]
    sna_accel.c:1731:72: warning: shadowed declaration is here [-Wshadow]
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b6adc60..590cc11 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1748,20 +1748,20 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 	if (priv->gpu_bo == NULL) {
-		unsigned flags;
+		unsigned create;
 
-		flags = 0;
+		create = 0;
 		if (priv->cpu_damage)
-			flags |= CREATE_INACTIVE;
+			create |= CREATE_INACTIVE;
 		if (pixmap->usage_hint == SNA_CREATE_FB)
-			flags |= CREATE_EXACT | CREATE_SCANOUT;
+			create |= CREATE_EXACT | CREATE_SCANOUT;
 
 		priv->gpu_bo = kgem_create_2d(&sna->kgem,
 					      pixmap->drawable.width,
 					      pixmap->drawable.height,
 					      pixmap->drawable.bitsPerPixel,
 					      sna_pixmap_choose_tiling(pixmap),
-					      flags);
+					      create);
 		if (priv->gpu_bo == NULL)
 			return false;
 
commit 755a7107aed268d87c5cc0feb1ba388b0cb7fc59
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 15:19:19 2012 +0100

    sna: Fix typo and use the right pointer for kgem_bo_destroy
    
    Useless warnings in xorg headers ftl.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48400
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ab2997d..b6adc60 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1934,7 +1934,8 @@ sna_drawable_use_bo(DrawablePtr drawable,
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
 		assert(priv->gpu_bo->proxy->rq);
-		kgem_bo_destroy(to_sna_from_pixmap(pixmap), priv->gpu_bo);
+		kgem_bo_destroy(&to_sna_from_pixmap(pixmap)->kgem,
+				priv->gpu_bo);
 		priv->gpu_bo = NULL;
 		goto use_cpu_bo;
 	}
@@ -2727,7 +2728,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
 		assert(priv->gpu_bo->proxy->rq);
-		kgem_bo_destroy(sna, priv->gpu_bo);
+		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 		priv->gpu_bo = NULL;
 	}
 
commit c8502e350cb18f6f5d821d237ffcee453f347eba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 14:43:28 2012 +0100

    sna: Add missing alloc failure check for creating tile source
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 083cd3c..ab2997d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -8767,6 +8767,9 @@ sna_pixmap_get_source_bo(PixmapPtr pixmap)
 					       pixmap->drawable.bitsPerPixel,
 					       KGEM_BUFFER_WRITE_INPLACE,
 					       &ptr);
+		if (upload == NULL)
+			return NULL;
+
 		memcpy_blt(pixmap->devPrivate.ptr, ptr,
 			   pixmap->drawable.bitsPerPixel,
 			   pixmap->devKind, upload->pitch,
commit 9cc6f7ccc55cc11f47b3b7d626c9f5a7c1327d57
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 12:06:56 2012 +0100

    sna: Release the freed bo cache upon expire
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 581e3c8..2a8b3cd 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1968,6 +1968,12 @@ bool kgem_expire_cache(struct kgem *kgem)
 	bool idle;
 	unsigned int i;
 
+	while (__kgem_freed_bo) {
+		bo = __kgem_freed_bo;
+		__kgem_freed_bo = *(struct kgem_bo **)bo;
+		free(bo);
+	}
+
 	kgem_retire(kgem);
 	if (kgem->wedged)
 		kgem_cleanup(kgem);
commit 102d11906a672140bac099e7bd1b35345d13a2fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 22:54:51 2012 +0100

    sna: Check for an inactive partial buffer to reuse after retiring requests
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 470cefb..581e3c8 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3575,7 +3575,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	}
 
 	if (flags & KGEM_BUFFER_WRITE) {
-		list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
+		do list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
 			assert(bo->base.io);
 			assert(bo->base.refcnt == 1);
 
@@ -3595,7 +3595,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->used = size;
 			list_move(&bo->base.list, &kgem->active_partials);
 			goto done;
-		}
+		} while (kgem_retire(kgem));
 	}
 
 #if !DBG_NO_MAP_UPLOAD
commit e2fb2421d8f5c07925d1699673aa4b1dd6c6b22c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 22:54:15 2012 +0100

    sna: Release partial buffers during cache expiration
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6dd1871..470cefb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1925,6 +1925,7 @@ void kgem_throttle(struct kgem *kgem)
 
 static void kgem_expire_partial(struct kgem *kgem)
 {
+	kgem_retire_partials(kgem);
 	while (!list_is_empty(&kgem->inactive_partials)) {
 		struct kgem_partial_bo *bo =
 			list_first_entry(&kgem->inactive_partials,
commit 333fdcad8677675a4758223c1a980c90d970ee42
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 22:53:31 2012 +0100

    sna: Repeat expire whilst there remaining outstanding requests
    
    Do not allow the cache expiration to finish if we are still running
    requests.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index dde9d1d..6dd1871 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1979,7 +1979,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 	time(&now);
 	expire = 0;
 
-	idle = true;
+	idle = !kgem->need_retire;
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
 		idle &= list_is_empty(&kgem->inactive[i]);
 		list_for_each_entry(bo, &kgem->inactive[i], list) {
@@ -1999,7 +1999,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 	if (expire == 0)
 		return true;
 
-	idle = true;
+	idle = !kgem->need_retire;
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
 		struct list preserve;
 
commit 778232e3d2fb5340a3092014801dc00a56c56d42
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 10 11:14:50 2012 +0100

    sna: Only move the bo into the read domain for readback
    
    And mark it as currently in no domain afterwards, so that if we reuse
    it, it will be appropriately moved later.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c8be7c8..dde9d1d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3997,7 +3997,6 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
 	struct kgem_partial_bo *bo;
 	uint32_t offset = _bo->delta, length = _bo->size.bytes;
-	int domain;
 
 	assert(_bo->io);
 	assert(_bo->exec == &_kgem_dummy_exec);
@@ -4023,15 +4022,9 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 
 		VG_CLEAR(set_domain);
 		set_domain.handle = bo->base.handle;
-		if (IS_CPU_MAP(bo->base.map)) {
-			set_domain.read_domains = I915_GEM_DOMAIN_CPU;
-			set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-			domain = DOMAIN_CPU;
-		} else {
-			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-			domain = DOMAIN_GTT;
-		}
+		set_domain.write_domain = 0;
+		set_domain.read_domains =
+			IS_CPU_MAP(bo->base.map) ? I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
 
 		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
 	} else {
@@ -4039,11 +4032,9 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 			 bo->base.handle, (char *)bo->mem+offset,
 			 offset, length);
 		kgem_bo_map__cpu(kgem, &bo->base);
-		domain = DOMAIN_NONE;
 	}
-	kgem_retire(kgem);
-	assert(bo->base.rq == NULL);
-	bo->base.domain = domain;
+	kgem_bo_retire(kgem, &bo->base);
+	bo->base.domain = DOMAIN_NONE;
 }
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
commit dd093eafb9b94b8e4cd8853d74078c3aa7e72f57
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 14:09:42 2012 +0100

    sna: Add assertions around proxy list handling
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48400
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f23bf0c..c8be7c8 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -320,6 +320,7 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->exec == NULL) {
 		DBG(("%s: retiring bo handle=%d (needed flush? %d), rq? %d\n",
 		     __FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL));
+		assert(list_is_empty(&bo->vma));
 		bo->rq = NULL;
 		list_del(&bo->request);
 		bo->needs_flush = bo->flush;
@@ -1040,6 +1041,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert(bo->rq == NULL);
 	assert(bo->domain != DOMAIN_GPU);
 	assert(bo->reusable);
+	assert(list_is_empty(&bo->vma));
 
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
 		kgem_bo_free(kgem, bo);
@@ -1051,12 +1053,11 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 		int type = IS_CPU_MAP(bo->map);
 		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
 		    (!type && !kgem_bo_is_mappable(kgem, bo))) {
-			list_del(&bo->vma);
 			munmap(MAP(bo->map), bytes(bo));
 			bo->map = NULL;
 		}
 		if (bo->map) {
-			list_move(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
+			list_add(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
 			kgem->vma[type].count++;
 		}
 	}
commit 7f0bede3e7e3f92a637d1c886304b16afc0e34f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 10:48:08 2012 +0100

    sna/traps: Use a temporary variable for the write pointer
    
    To avoid accumulating the write offset for wide spans, we need to reset
    the destination pointer between spans.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48332
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f7a2111..120e755 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1476,55 +1476,61 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 		} else
 			FAST_SAMPLES_X_TO_INT_FRAC(right->x.quo, rix, rfx);
 		if (lix == rix) {
-			if (rfx != lfx)
+			if (rfx != lfx) {
+				assert(lix < width);
 				row[lix] += (rfx-lfx) * 256 / FAST_SAMPLES_X;
+			}
 		} else {
+			assert(lix < width);
 			if (lfx == 0)
 				row[lix] = 0xff;
 			else
 				row[lix] += 256 - lfx * 256 / FAST_SAMPLES_X;
 
-			if (rfx)
+			assert(rix <= width);
+			if (rfx) {
+				assert(rix < width);
 				row[rix] += rfx * 256 / FAST_SAMPLES_X;
+			}
 
 			if (rix > ++lix) {
+				uint8_t *r = row + lix;
 				rix -= lix;
-				row += lix;
 #if 0
 				if (rix == 1)
 					*row = 0xff;
 				else
 					memset(row, 0xff, rix);
 #else
-				if ((uintptr_t)row & 1 && rix) {
-					*row++ = 0xff;
+				if ((uintptr_t)r & 1 && rix) {
+					*r++ = 0xff;
 					rix--;
 				}
-				if ((uintptr_t)row & 2 && rix >= 2) {
-					*(uint16_t *)row = 0xffff;
-					row += 2;
+				if ((uintptr_t)r & 2 && rix >= 2) {
+					*(uint16_t *)r = 0xffff;
+					r += 2;
 					rix -= 2;
 				}
-				if ((uintptr_t)row & 4 && rix >= 4) {
-					*(uint32_t *)row = 0xffffffff;
-					row += 4;
+				if ((uintptr_t)r & 4 && rix >= 4) {
+					*(uint32_t *)r = 0xffffffff;
+					r += 4;
 					rix -= 4;
 				}
 				while (rix >= 8) {
-					*(uint64_t *)row = 0xffffffffffffffff;
-					row += 8;
+					*(uint64_t *)r = 0xffffffffffffffff;
+					r += 8;
 					rix -= 8;
 				}
 				if (rix & 4) {
-					*(uint32_t *)row = 0xffffffff;
-					row += 4;
+					*(uint32_t *)r = 0xffffffff;
+					r += 4;
 				}
 				if (rix & 2) {
-					*(uint16_t *)row = 0xffff;
-					row += 2;
+					*(uint16_t *)r = 0xffff;
+					r += 2;
 				}
 				if (rix & 1)
-					*row = 0xff;
+					*r = 0xff;
 #endif
 			}
 		}
commit 2e4da00e3e03b873f5cad0cc5b1f6cc791852ca5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 9 10:42:18 2012 +0100

    sna/traps: Assert that the inplace row is contained before writing
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48332
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index dbf581e..f7a2111 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1786,6 +1786,7 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 
 				inplace_subrow(active, ptr, width, &min, &max);
 			}
+			assert(min >= 0 && max <= width);
 			memset(row, 0, min);
 			if (max > min)
 				inplace_end_subrows(active, row+min, (int8_t*)ptr+min, max-min);
commit 0464e93a088a9e8bc29ad8b36b6e12c3dda32ec6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 17:16:03 2012 +0100

    sna: Add some assertions for misuse of proxies
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48400
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index de02e9d..f23bf0c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -332,6 +332,7 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 	assert(bo->refcnt);
 	assert(!bo->purged);
 	assert(!kgem_busy(kgem, bo->handle));
+	assert(bo->proxy == NULL);
 
 	assert(length <= bytes(bo));
 	if (gem_write(kgem->fd, bo->handle, 0, length, data))
@@ -3156,6 +3157,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
 
 	assert(!bo->purged);
+	assert(bo->proxy == NULL);
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
 
@@ -3385,6 +3387,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
+	assert(bo->proxy == NULL);
 	kgem_bo_submit(kgem, bo);
 
 	if (bo->domain != DOMAIN_CPU) {
@@ -4037,7 +4040,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		kgem_bo_map__cpu(kgem, &bo->base);
 		domain = DOMAIN_NONE;
 	}
-	kgem_bo_retire(kgem, &bo->base);
+	kgem_retire(kgem);
+	assert(bo->base.rq == NULL);
 	bo->base.domain = domain;
 }
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7425a51..083cd3c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1933,6 +1933,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 	}
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
+		assert(priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(to_sna_from_pixmap(pixmap), priv->gpu_bo);
 		priv->gpu_bo = NULL;
 		goto use_cpu_bo;
@@ -2725,6 +2726,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 
 	if (priv->gpu_bo && priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
+		assert(priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(sna, priv->gpu_bo);
 		priv->gpu_bo = NULL;
 	}
@@ -3520,6 +3522,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
 		DBG(("%s: discarding cached upload\n", __FUNCTION__));
+		assert(dst_priv->gpu_bo->proxy->rq);
 		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
 		dst_priv->gpu_bo = NULL;
 	}
commit 479cb6ba71038fe44f66fb31fad90d0d454fea7a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 14:25:49 2012 +0100

    sna: Compress adjoining spans during FillSpans
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8f02380..7425a51 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4255,8 +4255,15 @@ sna_fill_spans__fill(DrawablePtr drawable,
 			b->y2 = b->y1 + 1;
 			DBG(("%s: (%d, %d), (%d, %d)\n",
 			     __FUNCTION__, b->x1, b->y1, b->x2, b->y2));
-			if (b->x2 > b->x1)
-				b++;
+			if (b->x2 > b->x1) {
+				if (b != box &&
+				    b->y1 == b[-1].y2 &&
+				    b->x1 == b[-1].x1 &&
+				    b->x2 == b[-1].x2)
+					b[-1].y2 = b->y2;
+				else
+					b++;
+			}
 		} while (--nbox);
 		if (b != box)
 			op->boxes(data->sna, op, box, b - box);
@@ -4322,7 +4329,12 @@ sna_fill_spans__fill_clip_extents(DrawablePtr drawable,
 				b->x1 += data->dx; b->x2 += data->dx;
 				b->y1 += data->dy; b->y2 += data->dy;
 			}
-			if (++b == last_box) {
+			if (b != box &&
+			    b->y1 == b[-1].y2 &&
+			    b->x1 == b[-1].x1 &&
+			    b->x2 == b[-1].x2) {
+				b[-1].y2 = b->y2;
+			} else if (++b == last_box) {
 				op->boxes(data->sna, op, box, last_box - box);
 				b = box;
 			}
commit c5c01c13badeb7c2ead0c848b746d8d474277a77
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 13:51:13 2012 +0100

    sna: Remove the duplicated check for use-bo? in PolySegments
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 61710ad..8f02380 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7458,6 +7458,13 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 	     data.flags & 4));
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
+
+	data.bo = sna_drawable_use_bo(drawable,
+				      &data.region.extents,
+				      &data.damage);
+	if (data.bo == NULL)
+		goto fallback;
+
 	if (gc->lineStyle != LineSolid || gc->lineWidth > 1)
 		goto spans_fallback;
 	if (gc_is_solid(gc, &color)) {
@@ -7465,10 +7472,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		     __FUNCTION__, (unsigned)color, data.flags));
 
 		if (data.flags & 4) {
-			if ((data.bo = sna_drawable_use_bo(drawable,
-							   &data.region.extents,
-							   &data.damage)) &&
-			    sna_poly_segment_blt(drawable,
+			if (sna_poly_segment_blt(drawable,
 						 data.bo, data.damage,
 						 gc, color, n, seg,
 						 &data.region.extents,
@@ -7476,9 +7480,6 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 				return;
 		} else {
 			if (use_zero_spans(drawable, gc, &data.region.extents) &&
-			    (data.bo = sna_drawable_use_bo(drawable,
-							   &data.region.extents,
-							   &data.damage)) &&
 			    sna_poly_zero_segment_blt(drawable,
 						      data.bo, data.damage,
 						      gc, n, seg,
@@ -7488,70 +7489,67 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		}
 	} else if (data.flags & 4) {
 		/* Try converting these to a set of rectangles instead */
-		if ((data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
-			xRectangle *rect;
-			int i;
-
-			DBG(("%s: converting to rectagnles\n", __FUNCTION__));
+		xRectangle *rect;
+		int i;
 
-			rect = malloc (n * sizeof (xRectangle));
-			if (rect == NULL)
-				return;
+		DBG(("%s: converting to rectagnles\n", __FUNCTION__));
 
-			for (i = 0; i < n; i++) {
-				if (seg[i].x1 < seg[i].x2) {
-					rect[i].x = seg[i].x1;
-					rect[i].width = seg[i].x2 - seg[i].x1 + 1;
-				} else if (seg[i].x1 > seg[i].x2) {
-					rect[i].x = seg[i].x2;
-					rect[i].width = seg[i].x1 - seg[i].x2 + 1;
-				} else {
-					rect[i].x = seg[i].x1;
-					rect[i].width = 1;
-				}
-				if (seg[i].y1 < seg[i].y2) {
-					rect[i].y = seg[i].y1;
-					rect[i].height = seg[i].y2 - seg[i].y1 + 1;
-				} else if (seg[i].x1 > seg[i].y2) {
-					rect[i].y = seg[i].y2;
-					rect[i].height = seg[i].y1 - seg[i].y2 + 1;
-				} else {
-					rect[i].y = seg[i].y1;
-					rect[i].height = 1;
-				}
+		rect = malloc (n * sizeof (xRectangle));
+		if (rect == NULL)
+			return;
 
-				/* don't paint last pixel */
-				if (gc->capStyle == CapNotLast) {
-					if (seg[i].x1 == seg[i].x2)
-						rect[i].height--;
-					else
-						rect[i].width--;
-				}
+		for (i = 0; i < n; i++) {
+			if (seg[i].x1 < seg[i].x2) {
+				rect[i].x = seg[i].x1;
+				rect[i].width = seg[i].x2 - seg[i].x1 + 1;
+			} else if (seg[i].x1 > seg[i].x2) {
+				rect[i].x = seg[i].x2;
+				rect[i].width = seg[i].x1 - seg[i].x2 + 1;
+			} else {
+				rect[i].x = seg[i].x1;
+				rect[i].width = 1;
 			}
-
-			if (gc->fillStyle == FillTiled) {
-				i = sna_poly_fill_rect_tiled_blt(drawable,
-								 data.bo, data.damage,
-								 gc, n, rect,
-								 &data.region.extents,
-								 data.flags & 2);
+			if (seg[i].y1 < seg[i].y2) {
+				rect[i].y = seg[i].y1;
+				rect[i].height = seg[i].y2 - seg[i].y1 + 1;
+			} else if (seg[i].x1 > seg[i].y2) {
+				rect[i].y = seg[i].y2;
+				rect[i].height = seg[i].y1 - seg[i].y2 + 1;
 			} else {
-				i = sna_poly_fill_rect_stippled_blt(drawable,
-								    data.bo, data.damage,
-								    gc, n, rect,
-								    &data.region.extents,
-								    data.flags & 2);
+				rect[i].y = seg[i].y1;
+				rect[i].height = 1;
 			}
-			free (rect);
 
-			if (i)
-				return;
+			/* don't paint last pixel */
+			if (gc->capStyle == CapNotLast) {
+				if (seg[i].x1 == seg[i].x2)
+					rect[i].height--;
+				else
+					rect[i].width--;
+			}
+		}
+
+		if (gc->fillStyle == FillTiled) {
+			i = sna_poly_fill_rect_tiled_blt(drawable,
+							 data.bo, data.damage,
+							 gc, n, rect,
+							 &data.region.extents,
+							 data.flags & 2);
+		} else {
+			i = sna_poly_fill_rect_stippled_blt(drawable,
+							    data.bo, data.damage,
+							    gc, n, rect,
+							    &data.region.extents,
+							    data.flags & 2);
 		}
+		free (rect);
+
+		if (i)
+			return;
 	}
 
 spans_fallback:
-	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    (data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
+	if (use_wide_spans(drawable, gc, &data.region.extents)) {
 		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
 		int i;
 
commit f5deea4f60433ee2b0c2d02fba682fff8b7829e8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 13:39:47 2012 +0100

    sna: Correct partial-write flag for PolySegments fallback
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6afd582..61710ad 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7642,7 +7642,7 @@ fallback:
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable, gc,
-							       !(data.flags & 4 && n > 1))))
+							       !(data.flags & 4 && n == 1))))
 		goto out;
 
 	/* Install FillSpans in case we hit a fallback path in fbPolySegment */
commit c3d7f4c1cf7d052163b7c4e74bb202c618f0eb76
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 13:18:56 2012 +0100

    sna/gen3: Reset accumulated constants for each composite
    
    In particular the glyph routines require the composite setup to
    reinitialise state between glyph runs. This affects anything trying to
    use glyphs without a mask with a gradient source, for example.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 529884c..ed2eaf1 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2437,7 +2437,7 @@ try_blt(struct sna *sna,
 
 static void
 gen3_align_vertex(struct sna *sna,
-		  struct sna_composite_op *op)
+		  const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen3.last_floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
@@ -2450,6 +2450,7 @@ gen3_align_vertex(struct sna *sna,
 		     (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex));
 		sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex;
 		sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex;
+		assert(sna->render.vertex_used < sna->render.vertex_size - op->floats_per_rect);
 		sna->render_state.gen3.last_floats_per_vertex = op->floats_per_vertex;
 	}
 }
@@ -2824,6 +2825,7 @@ gen3_render_composite(struct sna *sna,
 			return FALSE;
 	}
 
+	tmp->u.gen3.num_constants = 0;
 	tmp->src.u.gen3.type = SHADER_TEXTURE;
 	tmp->src.is_affine = TRUE;
 	DBG(("%s: preparing source\n", __FUNCTION__));
commit 701473d20485a0557b4fb36efcbfbb8656e2f619
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 8 10:09:42 2012 +0100

    sna: Release cached upload buffers when reusing a write buffer for readback
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=48400
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 27fa165..de02e9d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1234,6 +1234,24 @@ static void bubble_sort_partial(struct list *head, struct kgem_partial_bo *bo)
 	}
 }
 
+static void kgem_partial_buffer_release(struct kgem *kgem,
+					struct kgem_partial_bo *bo)
+{
+	while (!list_is_empty(&bo->base.vma)) {
+		struct kgem_bo *cached;
+
+		cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
+		assert(cached->proxy == &bo->base);
+		list_del(&cached->vma);
+
+		assert(*(struct kgem_bo **)cached->map == cached);
+		*(struct kgem_bo **)cached->map = NULL;
+		cached->map = NULL;
+
+		kgem_bo_destroy(kgem, cached);
+	}
+}
+
 static void kgem_retire_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
@@ -1247,19 +1265,7 @@ static void kgem_retire_partials(struct kgem *kgem)
 
 		DBG(("%s: releasing upload cache for handle=%d? %d\n",
 		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
-		while (!list_is_empty(&bo->base.vma)) {
-			struct kgem_bo *cached;
-
-			cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
-			assert(cached->proxy == &bo->base);
-			list_del(&cached->vma);
-
-			assert(*(struct kgem_bo **)cached->map == cached);
-			*(struct kgem_bo **)cached->map = NULL;
-			cached->map = NULL;
-
-			kgem_bo_destroy(kgem, cached);
-		}
+		kgem_partial_buffer_release(kgem, bo);
 
 		assert(bo->base.refcnt > 0);
 		if (bo->base.refcnt != 1)
@@ -3519,6 +3525,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 			gem_write(kgem->fd, bo->base.handle,
 				  0, bo->used, bo->mem);
+			kgem_partial_buffer_release(kgem, bo);
 			bo->need_io = 0;
 			bo->write = 0;
 			offset = 0;
@@ -3991,8 +3998,9 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	assert(_bo->io);
 	assert(_bo->exec == &_kgem_dummy_exec);
 	assert(_bo->rq == NULL);
-	if (_bo->proxy)
-		_bo = _bo->proxy;
+	assert(_bo->proxy);
+
+	_bo = _bo->proxy;
 	assert(_bo->exec == NULL);
 
 	bo = (struct kgem_partial_bo *)_bo;
commit 1ecf17b2507f95e1fefea15833fa9f57ec256a2e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Apr 7 10:01:01 2012 +0100

    sna/gradient: Compute the absolute delta between color stops
    
    Otherwise we do not detect gradients that start from white!
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48407
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 943cbf9..32d26c8 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -44,39 +44,48 @@ sna_gradient_sample_width(PictGradient *gradient)
 {
 	int n, width;
 
-	width = 2;
+	width = 0;
 	for (n = 1; n < gradient->nstops; n++) {
 		xFixed dx = gradient->stops[n].x - gradient->stops[n-1].x;
-		uint16_t delta, max;
-		int ramp;
+		int delta, max, ramp;
 
 		if (dx == 0)
 			return 1024;
 
 		max = gradient->stops[n].color.red -
 			gradient->stops[n-1].color.red;
+		if (max < 0)
+			max = -max;
 
 		delta = gradient->stops[n].color.green -
 			gradient->stops[n-1].color.green;
+		if (delta < 0)
+			delta = -delta;
 		if (delta > max)
 			max = delta;
 
 		delta = gradient->stops[n].color.blue -
 			gradient->stops[n-1].color.blue;
+		if (delta < 0)
+			delta = -delta;
 		if (delta > max)
 			max = delta;
 
 		delta = gradient->stops[n].color.alpha -
 			gradient->stops[n-1].color.alpha;
+		if (delta < 0)
+			delta = -delta;
 		if (delta > max)
 			max = delta;
 
-		ramp = 128 * max / dx;
+		ramp = 256 * max / dx;
 		if (ramp > width)
 			width = ramp;
 	}
 
-	width *= gradient->nstops-1;
+	if (width == 0)
+		return 1;
+
 	width = (width + 7) & -8;
 	return min(width, 1024);
 }
commit 4356fae72db3a33935b575edf95c84fbb48072a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 21:10:50 2012 +0100

    sna/video: Only wait upon the scanout pixmap
    
    Caught by the addition of the assertion.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index b740b6a..4975f55 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -287,7 +287,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		}
 	}
 
-	if (crtc && video->SyncToVblank != 0)
+	if (crtc && video->SyncToVblank != 0 &&
+	    pixmap == sna->front && !sna->shadow)
 		flush = sna_wait_for_scanline(sna, pixmap, crtc,
 					      &clip->extents);
 
commit b790ba2ec9ead51227d85fc8630bc7505eb7d7b3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 15:38:02 2012 +0100

    sna: Correct the damage offset for redirected rendering
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48385
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 9c646d8..f2d682d 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -945,8 +945,11 @@ struct sna_damage *_sna_damage_is_all(struct sna_damage *damage,
 {
 	DBG(("%s(%d, %d)%s?\n", __FUNCTION__, width, height,
 	     damage->dirty ? "*" : ""));
-	assert(damage->mode == DAMAGE_ADD);
+	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
+	     damage->extents.x1, damage->extents.y1,
+	     damage->extents.x2, damage->extents.y2));
 
+	assert(damage->mode == DAMAGE_ADD);
 	assert(damage->extents.x1 == 0 &&
 	       damage->extents.y1 == 0 &&
 	       damage->extents.x2 == width &&
@@ -962,10 +965,6 @@ struct sna_damage *_sna_damage_is_all(struct sna_damage *damage,
 		return damage;
 	}
 
-	DBG(("%s: (%d, %d), (%d, %d)\n", __FUNCTION__,
-	     damage->extents.x1, damage->extents.y1,
-	     damage->extents.x2, damage->extents.y2));
-
 	assert(damage->extents.x1 == 0 &&
 	       damage->extents.y1 == 0 &&
 	       damage->extents.x2 == width &&
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index d774a34..8af80f2 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1878,8 +1878,7 @@ sna_render_composite_redirect_done(struct sna *sna,
 		}
 		if (t->damage) {
 			sna_damage_combine(t->real_damage, t->damage,
-					   t->box.x1 - op->dst.x,
-					   t->box.y1 - op->dst.y);
+					   -t->box.x1, -t->box.y1);
 			__sna_damage_destroy(t->damage);
 		}
 
commit 0b81bafb802bb86454739ed46cf45571bccef735
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 15:14:45 2012 +0100

    sna/glyphs: Prefer a temporary upload mask for large glyph masks
    
    If the required temporary mask is larger than the 3D pipeline can
    handle, just render to a CPU buffer rather than redirect every glyph
    composition.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 235528c..87371aa 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -662,6 +662,13 @@ clear_pixmap(struct sna *sna, PixmapPtr pixmap)
 	return sna->render.clear(sna, pixmap, priv->gpu_bo);
 }
 
+static bool
+too_large(struct sna *sna, int width, int height)
+{
+	return (width > sna->render.max_3d_size ||
+		height > sna->render.max_3d_size);
+}
+
 static Bool
 glyphs_via_mask(struct sna *sna,
 		CARD8 op,
@@ -724,7 +731,8 @@ glyphs_via_mask(struct sna *sna,
 
 	component_alpha = NeedsComponent(format->format);
 	if (!NO_SMALL_MASK &&
-	    (uint32_t)width * height * format->depth < 8 * 4096) {
+	    ((uint32_t)width * height * format->depth < 8 * 4096 ||
+	     too_large(sna, width, height))) {
 		pixman_image_t *mask_image;
 		int s;
 
commit 42a84613e34522af885b4b50d6c68ef77e81ffc3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 14:27:15 2012 +0100

    sna: Relase the upload cache when overwriting with PutImage
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48359
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 0d229cd..529884c 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2499,6 +2499,7 @@ gen3_composite_set_target(struct sna *sna,
 			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
 			priv->gpu_bo = bo;
 		}
+		assert(priv->gpu_bo->proxy == NULL);
 
 		op->dst.bo = priv->gpu_bo;
 		op->damage = &priv->gpu_damage;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f1d4d00..6afd582 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1032,6 +1032,8 @@ skip_inplace_map:
 		goto done;
 	}
 
+	assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
 	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
@@ -1772,6 +1774,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 			goto done;
 		}
 	}
+	assert(priv->gpu_bo->proxy == NULL);
 
 	if ((flags & MOVE_READ) == 0)
 		sna_damage_subtract_box(&priv->cpu_damage, box);
@@ -1929,6 +1932,12 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		return NULL;
 	}
 
+	if (priv->gpu_bo && priv->gpu_bo->proxy) {
+		kgem_bo_destroy(to_sna_from_pixmap(pixmap), priv->gpu_bo);
+		priv->gpu_bo = NULL;
+		goto use_cpu_bo;
+	}
+
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		goto use_gpu_bo;
 
@@ -2320,7 +2329,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		goto done;
 
 	if (priv->gpu_bo->proxy) {
-		assert((flags & MOVE_WRITE) ==0);
+		DBG(("%s: reusing cached upload\n", __FUNCTION__));
+		assert((flags & MOVE_WRITE) == 0);
 		goto done;
 	}
 
@@ -2604,6 +2614,7 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		return FALSE;
 
 	assert(priv->gpu_bo);
+	assert(priv->gpu_bo->proxy == NULL);
 
 	if (!priv->pinned && nbox == 1 &&
 	    box->x1 <= 0 && box->y1 <= 0 &&
@@ -2712,6 +2723,12 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		return true;
 	}
 
+	if (priv->gpu_bo && priv->gpu_bo->proxy) {
+		DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
+		kgem_bo_destroy(sna, priv->gpu_bo);
+		priv->gpu_bo = NULL;
+	}
+
 	if (priv->cpu_bo) {
 		/* If the GPU is currently accessing the CPU pixmap, then
 		 * we will need to wait for that to finish before we can
@@ -3307,6 +3324,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		goto fallback;
 
 	if (priv->gpu_bo) {
+		assert(priv->gpu_bo->proxy == NULL);
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ)) {
 			DBG(("%s: fallback - not a pure copy and failed to move dst to GPU\n",
 			     __FUNCTION__));
@@ -3501,6 +3519,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	}
 
 	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
+		DBG(("%s: discarding cached upload\n", __FUNCTION__));
 		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
 		dst_priv->gpu_bo = NULL;
 	}
commit 999aa210ff87919945c673bdd34bae76ac097681
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 09:24:36 2012 +0100

    sna: Use a sentinel value to prevent accessing beyond the end of the y_buckets
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 9000f82..dbf581e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -650,12 +650,13 @@ polygon_init(struct polygon *polygon,
 			goto bail_no_mem;
 	}
 
-	if (num_buckets > ARRAY_SIZE(polygon->y_buckets_embedded)) {
-		polygon->y_buckets = malloc(num_buckets*sizeof(struct edge *));
+	if (num_buckets >= ARRAY_SIZE(polygon->y_buckets_embedded)) {
+		polygon->y_buckets = malloc((1+num_buckets)*sizeof(struct edge *));
 		if (unlikely(NULL == polygon->y_buckets))
 			goto bail_no_mem;
 	}
 	memset(polygon->y_buckets, 0, num_buckets * sizeof(struct edge *));
+	polygon->y_buckets[num_buckets] = (void *)-1;
 
 	polygon->ymin = ymin;
 	polygon->ymax = ymax;
@@ -1363,7 +1364,7 @@ tor_render(struct sna *sna,
 			if (active->head.next == &active->tail) {
 				active->min_height = INT_MAX;
 				active->is_vertical = 1;
-				for (; j < h && !polygon->y_buckets[j]; j++)
+				for (; !polygon->y_buckets[j]; j++)
 					;
 				__DBG(("%s: no new edges and no exisiting edges, skipping, %d -> %d\n",
 				       __FUNCTION__, i, j));
@@ -1386,8 +1387,7 @@ tor_render(struct sna *sna,
 			assert(active->is_vertical);
 			nonzero_row(active, coverages);
 
-			while (j < h &&
-			       polygon->y_buckets[j] == NULL &&
+			while (polygon->y_buckets[j] == NULL &&
 			       active->min_height >= 2*FAST_SAMPLES_Y)
 			{
 				active->min_height -= FAST_SAMPLES_Y;
@@ -1713,6 +1713,9 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 
 	__DBG(("%s: mono=%d, buf=%d\n", __FUNCTION__, mono, buf));
 	assert(!mono);
+	assert(converter->ymin == 0);
+	assert(converter->xmin == 0);
+	assert(scratch->drawable.depth == 8);
 
 	/* Render each pixel row. */
 	for (i = 0; i < h; i = j) {
@@ -1727,7 +1730,7 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 			if (active->head.next == &active->tail) {
 				active->min_height = INT_MAX;
 				active->is_vertical = 1;
-				for (; j < h && !polygon->y_buckets[j]; j++)
+				for (; !polygon->y_buckets[j]; j++)
 					;
 				__DBG(("%s: no new edges and no exisiting edges, skipping, %d -> %d\n",
 				       __FUNCTION__, i, j));
@@ -1754,8 +1757,7 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 			if (row != ptr)
 				memcpy(row, ptr, width);
 
-			while (j < h &&
-			       polygon->y_buckets[j] == NULL &&
+			while (polygon->y_buckets[j] == NULL &&
 			       active->min_height >= 2*FAST_SAMPLES_Y)
 			{
 				active->min_height -= FAST_SAMPLES_Y;
commit 1ae6328c57eb496072f0d0e27440f5d0901633b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Apr 6 09:12:08 2012 +0100

    sna: Remove redundant check from tor_inplace()
    
    We only execute full-steps for vertical edges so we do not need the
    second check.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2b4b2db..9000f82 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1383,22 +1383,21 @@ tor_render(struct sna *sna,
 		       active->min_height,
 		       active->is_vertical));
 		if (do_full_step) {
+			assert(active->is_vertical);
 			nonzero_row(active, coverages);
 
-			if (active->is_vertical) {
-				while (j < h &&
-				       polygon->y_buckets[j] == NULL &&
-				       active->min_height >= 2*FAST_SAMPLES_Y)
-				{
-					active->min_height -= FAST_SAMPLES_Y;
-					j++;
-				}
-				if (j != i + 1)
-					step_edges(active, j - (i + 1));
-
-				__DBG(("%s: vertical edges, full step (%d, %d)\n",
-				       __FUNCTION__,  i, j));
+			while (j < h &&
+			       polygon->y_buckets[j] == NULL &&
+			       active->min_height >= 2*FAST_SAMPLES_Y)
+			{
+				active->min_height -= FAST_SAMPLES_Y;
+				j++;
 			}
+			if (j != i + 1)
+				step_edges(active, j - (i + 1));
+
+			__DBG(("%s: vertical edges, full step (%d, %d)\n",
+			       __FUNCTION__,  i, j));
 		} else {
 			grid_scaled_y_t suby;
 
@@ -1748,27 +1747,27 @@ tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
 		       active->min_height,
 		       active->is_vertical));
 		if (do_full_step) {
+			assert(active->is_vertical);
+
 			memset(ptr, 0, width);
 			inplace_row(active, ptr, width);
 			if (row != ptr)
 				memcpy(row, ptr, width);
 
-			if (active->is_vertical) {
-				while (j < h &&
-				       polygon->y_buckets[j] == NULL &&
-				       active->min_height >= 2*FAST_SAMPLES_Y)
-				{
-					active->min_height -= FAST_SAMPLES_Y;
-					row += stride;
-					memcpy(row, ptr, width);
-					j++;
-				}
-				if (j != i + 1)
-					step_edges(active, j - (i + 1));
-
-				__DBG(("%s: vertical edges, full step (%d, %d)\n",
-				       __FUNCTION__,  i, j));
+			while (j < h &&
+			       polygon->y_buckets[j] == NULL &&
+			       active->min_height >= 2*FAST_SAMPLES_Y)
+			{
+				active->min_height -= FAST_SAMPLES_Y;
+				row += stride;
+				memcpy(row, ptr, width);
+				j++;
 			}
+			if (j != i + 1)
+				step_edges(active, j - (i + 1));
+
+			__DBG(("%s: vertical edges, full step (%d, %d)\n",
+			       __FUNCTION__,  i, j));
 		} else {
 			grid_scaled_y_t suby;
 			int min = width, max = 0;
commit 51b9202d27db3d98c6d82ba224bd8eb218533dd9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Apr 4 11:13:27 2012 +0100

    sna: Only engage the GPU detiler for multiple rows
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 4f5a634..02a5c75 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -178,7 +178,7 @@ fallback:
 	}
 	if (kgem_bo_is_mappable(kgem, src_bo)) {
 		/* Is it worth detiling? */
-		if ((extents.y2 - extents.y1) * src_bo->pitch < 4096)
+		if ((extents.y2 - extents.y1 - 1) * src_bo->pitch < 4096)
 			goto fallback;
 	}
 
commit 98ad4c3cd8647ba3ec90fb45157773c8e85e886c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 19:47:15 2012 +0100

    sna/gen3: Don't force use of the render pipeline just for vmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index f77521f..0d229cd 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2404,10 +2404,6 @@ source_use_blt(struct sna *sna, PicturePtr picture)
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
 		return true;
 
-	/* If we can sample directly from user-space, do so */
-	if (sna->kgem.has_vmap)
-		return false;
-
 	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
commit 0915d414f55a1bff4171981feb87bae212f29f23
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 19:04:29 2012 +0100

    sna/gen3: Fix pre-multiplication of mask value
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index ca3d141..f77521f 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2364,7 +2364,6 @@ gen3_composite_picture(struct sna *sna,
 	x += dx + picture->pDrawable->x;
 	y += dy + picture->pDrawable->y;
 
-	channel->is_affine = sna_transform_is_affine(picture->transform);
 	if (sna_transform_is_integer_translation(picture->transform, &dx, &dy)) {
 		DBG(("%s: integer translation (%d, %d), removing\n",
 		     __FUNCTION__, dx, dy));
@@ -2372,8 +2371,10 @@ gen3_composite_picture(struct sna *sna,
 		y += dy;
 		channel->transform = NULL;
 		channel->filter = PictFilterNearest;
-	} else
+	} else {
 		channel->transform = picture->transform;
+		channel->is_affine = sna_transform_is_affine(picture->transform);
+	}
 
 	if (!gen3_composite_channel_set_format(channel, picture->format) &&
 	    !gen3_composite_channel_set_xformat(picture, channel, x, y, w, h))
@@ -2912,13 +2913,13 @@ gen3_render_composite(struct sna *sna,
 					v = multa(tmp->src.u.gen3.mode,
 						  tmp->mask.u.gen3.mode,
 						  24);
-					v != multa(tmp->src.u.gen3.mode,
+					v |= multa(tmp->src.u.gen3.mode,
 						   tmp->mask.u.gen3.mode,
 						   16);
-					v != multa(tmp->src.u.gen3.mode,
+					v |= multa(tmp->src.u.gen3.mode,
 						   tmp->mask.u.gen3.mode,
 						   8);
-					v != multa(tmp->src.u.gen3.mode,
+					v |= multa(tmp->src.u.gen3.mode,
 						   tmp->mask.u.gen3.mode,
 						   0);
 
@@ -2986,10 +2987,11 @@ gen3_render_composite(struct sna *sna,
 		tmp->floats_per_vertex += tmp->src.is_affine ? 2 : 4;
 	if (!is_constant_ps(tmp->mask.u.gen3.type))
 		tmp->floats_per_vertex += tmp->mask.is_affine ? 2 : 4;
-	DBG(("%s: floats_per_vertex = 2 + %d + %d = %d\n", __FUNCTION__,
+	DBG(("%s: floats_per_vertex = 2 + %d + %d = %d [specialised emitter? %d]\n", __FUNCTION__,
 	     !is_constant_ps(tmp->src.u.gen3.type) ? tmp->src.is_affine ? 2 : 4 : 0,
 	     !is_constant_ps(tmp->mask.u.gen3.type) ? tmp->mask.is_affine ? 2 : 4 : 0,
-	     tmp->floats_per_vertex));
+	     tmp->floats_per_vertex,
+	     tmp->prim_emit != gen3_emit_composite_primitive));
 	tmp->floats_per_rect = 3 * tmp->floats_per_vertex;
 
 	tmp->blt   = gen3_render_composite_blt;
commit 04851e4210d2d71542359c14d4b68d0851b36326
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 12:34:24 2012 +0100

    sna/gen3: Convert the clear-color from picture->format to a8r8g8b8
    
    The shaders treat colours as an argb value, however the clear color is
    stored in the pixmap's native format (a8, r5g6b5, x8r8g8b8 etc). So
    before using the value of the clear color as a solid we need to convert
    it into the a8r8g8b8 format.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48204
    Reported-by: Paul Neumann <paul104x at yahoo.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47308
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 0478709..ca3d141 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2080,7 +2080,7 @@ gen3_init_solid(struct sna_composite_channel *channel, uint32_t color)
 		channel->u.gen3.type = SHADER_WHITE;
 
 	channel->bo = NULL;
-	channel->is_opaque = (color & 0xff000000) == 0xff000000;
+	channel->is_opaque = (color >> 24) == 0xff;
 	channel->is_affine = 1;
 	channel->alpha_fixup = 0;
 	channel->rb_reversed = 0;
@@ -2303,6 +2303,8 @@ gen3_composite_picture(struct sna *sna,
 
 		switch (source->type) {
 		case SourcePictTypeSolidFill:
+			DBG(("%s: solid fill [%08x], format %x\n",
+			     __FUNCTION__, source->solidFill.color, picture->format));
 			ret = gen3_init_solid(channel, source->solidFill.color);
 			break;
 
@@ -2334,11 +2336,15 @@ gen3_composite_picture(struct sna *sna,
 						x, y, w, h, dst_x, dst_y);
 	}
 
-	if (sna_picture_is_solid(picture, &color))
+	if (sna_picture_is_solid(picture, &color)) {
+		DBG(("%s: solid drawable [%08x]\n", __FUNCTION__, color));
 		return gen3_init_solid(channel, color);
+	}
 
-	if (sna_picture_is_clear(picture, x, y, w, h, &color))
-		return gen3_init_solid(channel, color);
+	if (sna_picture_is_clear(picture, x, y, w, h, &color)) {
+		DBG(("%s: clear drawable [%08x]\n", __FUNCTION__, color));
+		return gen3_init_solid(channel, color_convert(color, picture->format, PICT_a8r8g8b8));
+	}
 
 	if (!gen3_check_repeat(picture))
 		return sna_render_picture_fixup(sna, picture, channel,
@@ -2517,11 +2523,16 @@ gen3_composite_set_target(struct sna *sna,
 	return TRUE;
 }
 
+static inline uint8_t
+mul_8_8(uint8_t a, uint8_t b)
+{
+    uint16_t t = a * (uint16_t)b + 0x7f;
+    return ((t >> 8) + t) >> 8;
+}
+
 static inline uint8_t multa(uint32_t s, uint32_t m, int shift)
 {
-	s = (s >> shift) & 0xff;
-	m >>= 24;
-	return (s * m) >> 8;
+	return mul_8_8((s >> shift) & 0xff, m >> 24) << shift;
 }
 
 static inline bool is_constant_ps(uint32_t type)
@@ -2894,33 +2905,31 @@ gen3_render_composite(struct sna *sna,
 			} else {
 				if (tmp->mask.is_opaque) {
 					tmp->mask.u.gen3.type = SHADER_NONE;
-					tmp->has_component_alpha = FALSE;
 				} else if (is_constant_ps(tmp->src.u.gen3.type) &&
 					   is_constant_ps(tmp->mask.u.gen3.type)) {
-					uint32_t a,r,g,b;
+					uint32_t v;
 
-					a = multa(tmp->src.u.gen3.mode,
+					v = multa(tmp->src.u.gen3.mode,
 						  tmp->mask.u.gen3.mode,
 						  24);
-					r = multa(tmp->src.u.gen3.mode,
-						  tmp->mask.u.gen3.mode,
-						  16);
-					g = multa(tmp->src.u.gen3.mode,
-						  tmp->mask.u.gen3.mode,
-						  8);
-					b = multa(tmp->src.u.gen3.mode,
-						  tmp->mask.u.gen3.mode,
-						  0);
+					v != multa(tmp->src.u.gen3.mode,
+						   tmp->mask.u.gen3.mode,
+						   16);
+					v != multa(tmp->src.u.gen3.mode,
+						   tmp->mask.u.gen3.mode,
+						   8);
+					v != multa(tmp->src.u.gen3.mode,
+						   tmp->mask.u.gen3.mode,
+						   0);
 
 					DBG(("%s: combining constant source/mask: %x x %x -> %x\n",
 					     __FUNCTION__,
 					     tmp->src.u.gen3.mode,
 					     tmp->mask.u.gen3.mode,
-					     a << 24 | r << 16 | g << 8 | b));
+					     v));
 
 					tmp->src.u.gen3.type = SHADER_CONSTANT;
-					tmp->src.u.gen3.mode =
-						a << 24 | r << 16 | g << 8 | b;
+					tmp->src.u.gen3.mode = v;
 
 					tmp->mask.u.gen3.type = SHADER_NONE;
 				}
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index e7a6182..a81a145 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -476,13 +476,13 @@ static void sna_blt_copy_one(struct sna *sna,
 	kgem->nbatch += 8;
 }
 
-static Bool
-get_rgba_from_pixel(uint32_t pixel,
-		    uint16_t *red,
-		    uint16_t *green,
-		    uint16_t *blue,
-		    uint16_t *alpha,
-		    uint32_t format)
+Bool
+sna_get_rgba_from_pixel(uint32_t pixel,
+			uint16_t *red,
+			uint16_t *green,
+			uint16_t *blue,
+			uint16_t *alpha,
+			uint32_t format)
 {
 	int rbits, bbits, gbits, abits;
 	int rshift, bshift, gshift, ashift;
@@ -607,31 +607,6 @@ _sna_get_pixel_from_rgba(uint32_t * pixel,
 	return TRUE;
 }
 
-static uint32_t
-color_convert(uint32_t pixel,
-	      uint32_t src_format,
-	      uint32_t dst_format)
-{
-	DBG(("%s: src=%08x [%08x]\n", __FUNCTION__, pixel, src_format));
-
-	if (src_format != dst_format) {
-		uint16_t red, green, blue, alpha;
-
-		if (!get_rgba_from_pixel(pixel,
-					 &red, &green, &blue, &alpha,
-					 src_format))
-			return 0;
-
-		if (!sna_get_pixel_from_rgba(&pixel,
-					     red, green, blue, alpha,
-					     dst_format))
-			return 0;
-	}
-
-	DBG(("%s: dst=%08x [%08x]\n", __FUNCTION__, pixel, dst_format));
-	return pixel;
-}
-
 uint32_t
 sna_rgba_for_color(uint32_t color, int depth)
 {
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 73ef568..9c49281 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -485,6 +485,12 @@ sna_render_get_gradient(struct sna *sna,
 
 uint32_t sna_rgba_for_color(uint32_t color, int depth);
 uint32_t sna_rgba_to_color(uint32_t rgba, uint32_t format);
+Bool sna_get_rgba_from_pixel(uint32_t pixel,
+			     uint16_t *red,
+			     uint16_t *green,
+			     uint16_t *blue,
+			     uint16_t *alpha,
+			     uint32_t format);
 Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
 
 void no_render_init(struct sna *sna);
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 956e2aa..03e1969 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -190,4 +190,29 @@ sna_render_reduce_damage(struct sna_composite_op *op,
 	}
 }
 
+inline static uint32_t
+color_convert(uint32_t pixel,
+	      uint32_t src_format,
+	      uint32_t dst_format)
+{
+	DBG(("%s: src=%08x [%08x]\n", __FUNCTION__, pixel, src_format));
+
+	if (src_format != dst_format) {
+		uint16_t red, green, blue, alpha;
+
+		if (!sna_get_rgba_from_pixel(pixel,
+					     &red, &green, &blue, &alpha,
+					     src_format))
+			return 0;
+
+		if (!sna_get_pixel_from_rgba(&pixel,
+					     red, green, blue, alpha,
+					     dst_format))
+			return 0;
+	}
+
+	DBG(("%s: dst=%08x [%08x]\n", __FUNCTION__, pixel, dst_format));
+	return pixel;
+}
+
 #endif /* SNA_RENDER_INLINE_H */
commit 87a672dafd9d6f47f31b77b406b7f0fb2b4030ac
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 09:24:06 2012 +0100

    sna: Apply CoordMode when computing point extents
    
    Reported-by: Patrick Truebe <eko-priv at gmx.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48220
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6eee977..f1d4d00 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5556,9 +5556,19 @@ sna_poly_point_extents(DrawablePtr drawable, GCPtr gc,
 
 	box.x2 = box.x1 = pt->x;
 	box.y2 = box.y1 = pt->y;
-	while (--n) {
-		pt++;
-		box_add_pt(&box, pt->x, pt->y);
+	if (mode == CoordModePrevious) {
+		DDXPointRec last = *pt++;
+		while (--n) {
+			last.x += pt->x;
+			last.y += pt->y;
+			pt++;
+			box_add_pt(&box, last.x, last.y);
+		}
+	} else {
+		while (--n) {
+			++pt;
+			box_add_pt(&box, pt->x, pt->y);
+		}
 	}
 	box.x2++;
 	box.y2++;
commit 0a0ee491ea18dc59748ff4419ae73bd1a369ae79
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Apr 3 09:23:49 2012 +0100

    sna: Debugging flil spans and their clipping
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 32eae52..6eee977 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4291,14 +4291,18 @@ sna_fill_spans__fill_clip_extents(DrawablePtr drawable,
 	     extents->x2, extents->y2));
 
 	while (n--) {
+		DBG(("%s: [%d] pt=(%d, %d), width=%d\n",
+		     __FUNCTION__, n, pt->x, pt->y, *width));
 		*(DDXPointRec *)b = *pt++;
 		b->x2 = b->x1 + (int)*width++;
 		b->y2 = b->y1 + 1;
 		if (box_intersect(b, extents)) {
-			b->x1 += data->dx;
-			b->x2 += data->dx;
-			b->y1 += data->dy;
-			b->y2 += data->dy;
+			DBG(("%s: [%d] clipped=(%d, %d), (%d, %d)\n",
+			     __FUNCTION__, n, b->x1, b->y1, b->x2, b->y2));
+			if (data->dx|data->dy) {
+				b->x1 += data->dx; b->x2 += data->dx;
+				b->y1 += data->dy; b->y2 += data->dy;
+			}
 			if (++b == last_box) {
 				op->boxes(data->sna, op, box, last_box - box);
 				b = box;
@@ -8578,6 +8582,9 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 	      (gc->fillStyle == FillTiled && gc->tileIsPixel)),
 	     gc->fillStyle, gc->tileIsPixel,
 	     gc->alu));
+	DBG(("%s: draw=%d, offset=(%d, %d), size=%dx%d\n",
+	     __FUNCTION__, draw->serialNumber,
+	     draw->x, draw->y, draw->width, draw->height));
 
 	data.flags = sna_poly_point_extents(draw, gc, mode, n, pt,
 					    &data.region.extents);
commit ec1267df746512c2e262ef0bd9e9527bc5efe6f4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 2 16:16:24 2012 +0100

    sna: Use the solid spans fast paths for dashed zero-width lines as well
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9829ada..32eae52 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4499,10 +4499,10 @@ no_damage_clipped:
 				b->y2 = b->y1 + 1;
 
 				if (box_intersect(b, &clip.extents)) {
-					b->x1 += dx;
-					b->x2 += dx;
-					b->y1 += dy;
-					b->y2 += dy;
+					if (dx|dy) {
+						b->x1 += dx; b->x2 += dx;
+						b->y1 += dy; b->y2 += dy;
+					}
 					if (++b == last_box) {
 						fill.boxes(sna, &fill, box, last_box - box);
 						b = box;
@@ -6598,7 +6598,6 @@ spans_fallback:
 		sna_gc(gc)->priv = &data;
 
 		if (gc->lineWidth == 0 &&
-		    gc->lineStyle == LineSolid &&
 		    gc_is_solid(gc, &color)) {
 			struct sna_fill_op fill;
 
@@ -6628,12 +6627,19 @@ spans_fallback:
 			assert(gc->miTranslate);
 
 			gc->ops = &sna_gc_ops__tmp;
-			miZeroLine(drawable, gc, mode, n, pt);
+			if (gc->lineStyle == LineSolid) {
+				DBG(("%s: miZeroLine (solid fill)\n", __FUNCTION__));
+				miZeroLine(drawable, gc, mode, n, pt);
+			} else {
+				DBG(("%s: miZeroDashLine (solid fill)\n", __FUNCTION__));
+				miZeroDashLine(drawable, gc, mode, n, pt);
+			}
 			fill.done(data.sna, &fill);
 		} else {
-			/* Note that the WideDash functions alternate between filling
-			 * using fgPixel and bgPixel so we need to reset state between
-			 * FillSpans.
+			/* Note that the WideDash functions alternate
+			 * between filling using fgPixel and bgPixel
+			 * so we need to reset state between FillSpans and
+			 * cannot use the fill fast paths.
 			 */
 			sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
 			gc->ops = &sna_gc_ops__tmp;
commit 2d1f3cb198f2fe9602356a334a076abf3c68a9c7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 2 13:51:37 2012 +0100

    sna/gen4: Remove the accidental debugging hack from the last commit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 1868e2f..b8a2459 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1932,7 +1932,7 @@ gen4_composite_linear_init(struct sna *sna,
 
 	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
 	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
-	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y) - .5/sf);
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
 
 	channel->embedded_transform.matrix[1][0] = 0;
 	channel->embedded_transform.matrix[1][1] = 0;
commit 5c4dc9c5db7b2a5b936bdbc15536c5cf0f7f5f23
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Apr 2 13:40:22 2012 +0100

    sna/gen3+: Fix sampling of borders around gradients
    
    Incurs a slight loss of precision for the internal gradient, but much
    more preferable to the artefacts around the borders with RepeatNone.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45016
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index e798096..0478709 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2137,7 +2137,7 @@ static Bool gen3_gradient_setup(struct sna *sna,
 
 	channel->pict_format = PICT_a8r8g8b8;
 	channel->card_format = MAPSURF_32BIT | MT_32BIT_ARGB8888;
-	channel->filter = PictFilterBilinear;
+	channel->filter = PictFilterNearest;
 	channel->is_affine = sna_transform_is_affine(picture->transform);
 	if (sna_transform_is_integer_translation(picture->transform, &dx, &dy)) {
 		DBG(("%s: integer translation (%d, %d), removing\n",
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 2e78a92..1868e2f 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1871,7 +1871,7 @@ gen4_composite_linear_init(struct sna *sna,
 	if (!channel->bo)
 		return 0;
 
-	channel->filter = PictFilterBilinear;
+	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
@@ -1932,7 +1932,7 @@ gen4_composite_linear_init(struct sna *sna,
 
 	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
 	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
-	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y) - .5/sf);
 
 	channel->embedded_transform.matrix[1][0] = 0;
 	channel->embedded_transform.matrix[1][1] = 0;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index c27accd..1fb7f65 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1904,7 +1904,7 @@ gen5_composite_linear_init(struct sna *sna,
 	if (!channel->bo)
 		return 0;
 
-	channel->filter = PictFilterBilinear;
+	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 3be9195..5bbe5e3 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2080,7 +2080,7 @@ gen6_composite_linear_init(struct sna *sna,
 	if (!channel->bo)
 		return 0;
 
-	channel->filter = PictFilterBilinear;
+	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 6917c21..7dff02f 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2209,7 +2209,7 @@ gen7_composite_linear_init(struct sna *sna,
 	if (!channel->bo)
 		return 0;
 
-	channel->filter = PictFilterBilinear;
+	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
commit 0b2651dc04cef8f9692b2557684f044b4980700f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 1 19:43:15 2012 +0100

    sna: Apply composite offset to damage for spans fast paths
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 20d9166..9829ada 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6665,8 +6665,12 @@ spans_fallback:
 
 		gc->ops = (GCOps *)&sna_gc_ops;
 		assert_pixmap_contains_box(data.pixmap, &data.region.extents);
-		if (data.damage)
+		if (data.damage) {
+			if (data.dx | data.dy)
+				pixman_region_translate(&data.region, data.dx, data.dy);
+			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
+		}
 		RegionUninit(&data.region);
 		return;
 	}
@@ -7579,8 +7583,12 @@ spans_fallback:
 		}
 
 		gc->ops = (GCOps *)&sna_gc_ops;
-		if (data.damage)
+		if (data.damage) {
+			if (data.dx | data.dy)
+				pixman_region_translate(&data.region, data.dx, data.dy);
+			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
+		}
 		RegionUninit(&data.region);
 		return;
 	}
@@ -8299,8 +8307,12 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 			gc->ops = (GCOps *)&sna_gc_ops;
 
 			fill.done(data.sna, &fill);
-			if (data.damage)
+			if (data.damage) {
+				if (data.dx | data.dy)
+					pixman_region_translate(&data.region, data.dx, data.dy);
+				assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 				sna_damage_add(data.damage, &data.region);
+			}
 			RegionUninit(&data.region);
 			return;
 		}
@@ -8645,8 +8657,12 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 		}
 
 		gc->ops = (GCOps *)&sna_gc_ops;
-		if (data.damage)
+		if (data.damage) {
+			if (data.dx | data.dy)
+				pixman_region_translate(&data.region, data.dx, data.dy);
+			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
+		}
 		RegionUninit(&data.region);
 		return;
 	}
@@ -10207,8 +10223,12 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 		}
 
 		gc->ops = (GCOps *)&sna_gc_ops;
-		if (data.damage)
+		if (data.damage) {
+			if (data.dx | data.dy)
+				pixman_region_translate(&data.region, data.dx, data.dy);
+			assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 			sna_damage_add(data.damage, &data.region);
+		}
 		RegionUninit(&data.region);
 		return;
 	}
commit 4ea9ab9303d21a62683055b75eaed66c97a5f289
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 1 16:36:16 2012 +0100

    sna: Fix assertion to look at bbox of all boxes/points
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 47a627a..20d9166 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -233,6 +233,8 @@ static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, BoxPtr box, int n, i
 
 	extents = *box;
 	while (--n) {
+		++box;
+
 		if (box->x1 < extents.x1)
 			extents.x1 = box->x1;
 		if (box->x2 > extents.x2)
@@ -258,6 +260,8 @@ static void _assert_pixmap_contains_points(PixmapPtr pixmap, DDXPointRec *pt, in
 	extents.x2 = extents.x1 = pt->x;
 	extents.y2 = extents.y1 = pt->y;
 	while (--n) {
+		++pt;
+
 		if (pt->x < extents.x1)
 			extents.x1 = pt->x;
 		else if (pt->x > extents.x2)
commit 932743bb333e35d5f6529a701137aad4d7490555
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Apr 1 09:54:43 2012 +0100

    sna: Assert that drawing boxes are within bounds
    
    More sanity checks required.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index bf76948..47a627a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -227,6 +227,54 @@ static void _assert_pixmap_contains_box(PixmapPtr pixmap, BoxPtr box, const char
 	}
 }
 
+static void _assert_pixmap_contains_boxes(PixmapPtr pixmap, BoxPtr box, int n, int dx, int dy, const char *function)
+{
+	BoxRec extents;
+
+	extents = *box;
+	while (--n) {
+		if (box->x1 < extents.x1)
+			extents.x1 = box->x1;
+		if (box->x2 > extents.x2)
+			extents.x2 = box->x2;
+
+		if (box->y1 < extents.y1)
+			extents.y1 = box->y1;
+		if (box->y2 > extents.y2)
+			extents.y2 = box->y2;
+	}
+	extents.x1 += dx;
+	extents.x2 += dx;
+	extents.y1 += dy;
+	extents.y2 += dy;
+	_assert_pixmap_contains_box(pixmap, &extents, function);
+}
+
+
+static void _assert_pixmap_contains_points(PixmapPtr pixmap, DDXPointRec *pt, int n, int dx, int dy, const char *function)
+{
+	BoxRec extents;
+
+	extents.x2 = extents.x1 = pt->x;
+	extents.y2 = extents.y1 = pt->y;
+	while (--n) {
+		if (pt->x < extents.x1)
+			extents.x1 = pt->x;
+		else if (pt->x > extents.x2)
+			extents.x2 = pt->x;
+
+		if (pt->y < extents.y1)
+			extents.y1 = pt->y;
+		else if (pt->y > extents.y2)
+			extents.y2 = pt->y;
+	}
+	extents.x1 += dx;
+	extents.x2 += dx + 1;
+	extents.y1 += dy;
+	extents.y2 += dy + 1;
+	_assert_pixmap_contains_box(pixmap, &extents, function);
+}
+
 static void _assert_drawable_contains_box(DrawablePtr drawable, const BoxRec *box, const char *function)
 {
 	if (box->x1 < drawable->x ||
@@ -244,8 +292,12 @@ static void _assert_drawable_contains_box(DrawablePtr drawable, const BoxRec *bo
 }
 #define assert_pixmap_contains_box(p, b) _assert_pixmap_contains_box(p, b, __FUNCTION__)
 #define assert_drawable_contains_box(d, b) _assert_drawable_contains_box(d, b, __FUNCTION__)
+#define assert_pixmap_contains_boxes(p, b, n, x, y) _assert_pixmap_contains_boxes(p, b, n, x, y, __FUNCTION__)
+#define assert_pixmap_contains_points(p, pt, n, x, y) _assert_pixmap_contains_points(p, pt, n, x, y, __FUNCTION__)
 #else
 #define assert_pixmap_contains_box(p, b)
+#define assert_pixmap_contains_boxes(p, b, n, x, y)
+#define assert_pixmap_contains_points(p, pt, n, x, y)
 #define assert_drawable_contains_box(d, b)
 #endif
 
@@ -4403,12 +4455,14 @@ damage:
 		b->y2 = b->y1 + 1;
 
 		if (++b == last_box) {
+			assert_pixmap_contains_boxes(pixmap, box, last_box-box, 0, 0);
 			fill.boxes(sna, &fill, box, last_box - box);
 			sna_damage_add_boxes(damage, box, last_box - box, 0, 0);
 			b = box;
 		}
 	} while (--n);
 	if (b != box) {
+		assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 		fill.boxes(sna, &fill, box, b - box);
 		sna_damage_add_boxes(damage, box, b - box, 0, 0);
 	}
@@ -4549,6 +4603,7 @@ damage_clipped:
 					b->y1 += dy;
 					b->y2 += dy;
 					if (++b == last_box) {
+						assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 						fill.boxes(sna, &fill, box, last_box - box);
 						sna_damage_add_boxes(damage, box, b - box, 0, 0);
 						b = box;
@@ -4609,6 +4664,7 @@ damage_clipped:
 					b->y1 = y + dy;
 					b->y2 = b->y1 + 1;
 					if (++b == last_box) {
+						assert_pixmap_contains_boxes(pixmap, box, last_box-box, 0, 0);
 						fill.boxes(sna, &fill, box, last_box - box);
 						sna_damage_add_boxes(damage, box, last_box - box, 0, 0);
 						b = box;
@@ -4618,6 +4674,7 @@ damage_clipped:
 			RegionUninit(&clip);
 		}
 		if (b != box) {
+			assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 			fill.boxes(sna, &fill, box, b - box);
 			sna_damage_add_boxes(damage, box, b - box, 0, 0);
 		}
@@ -4922,6 +4979,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		return;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
+	assert_pixmap_contains_boxes(pixmap, box, n, dx, dy);
 	if (arg->damage)
 		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
 
@@ -5082,6 +5140,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	sy += dy;
 
 	get_drawable_deltas(drawable, dst_pixmap, &dx, &dy);
+	assert_pixmap_contains_boxes(dst_pixmap, box, n, dx, dy);
 	if (arg->damage)
 		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
 
@@ -5411,6 +5470,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 		last.x += dx;
 		last.y += dy;
 
+		assert_pixmap_contains_points(pixmap, pt, n, last.x, last.y);
 		sna_damage_add_points(damage, pt, n, last.x, last.y);
 		do {
 			unsigned nbox = n;
@@ -5457,6 +5517,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 				b->x2 = b->x1 + 1;
 				b->y2 = b->y1 + 1;
 				if (++b == last_box){
+					assert_pixmap_contains_boxes(pixmap, box, last_box-box, 0, 0);
 					fill.boxes(sna, &fill, box, last_box - box);
 					if (damage)
 						sna_damage_add_boxes(damage, box, last_box-box, 0, 0);
@@ -5465,6 +5526,7 @@ sna_poly_point_blt(DrawablePtr drawable,
 			}
 		}
 		if (b != box){
+			assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 			fill.boxes(sna, &fill, box, b - box);
 			if (damage)
 				sna_damage_add_boxes(damage, box, b-box, 0, 0);
@@ -5916,6 +5978,7 @@ done:
 	return true;
 
 damage:
+	assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 	sna_damage_add_boxes(damage, box, b-box, 0, 0);
 no_damage:
 	fill.boxes(sna, &fill, box, b-box);
@@ -5930,6 +5993,7 @@ no_damage_offset:
 			bb->y1 += dy;
 			bb->y2 += dy;
 		} while (++bb != b);
+		assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 		fill.boxes(sna, &fill, box, b - box);
 	}
 	goto *ret;
@@ -5943,6 +6007,7 @@ damage_offset:
 			bb->y1 += dy;
 			bb->y2 += dy;
 		} while (++bb != b);
+		assert_pixmap_contains_boxes(pixmap, box, b-box, 0, 0);
 		fill.boxes(sna, &fill, box, b - box);
 		sna_damage_add_boxes(damage, box, b - box, 0, 0);
 	}
@@ -6014,6 +6079,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 			     __FUNCTION__,
 			     b->x1, b->y1, b->x2, b->y2));
 			if (++b == last_box) {
+				assert_pixmap_contains_boxes(pixmap, boxes, last_box-boxes, 0, 0);
 				fill.boxes(sna, &fill, boxes, last_box - boxes);
 				if (damage)
 					sna_damage_add_boxes(damage, boxes, last_box - boxes, 0, 0);
@@ -6075,6 +6141,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 					b->y1 += dy;
 					b->y2 += dy;
 					if (++b == last_box) {
+						assert_pixmap_contains_boxes(pixmap, boxes, last_box-boxes, 0, 0);
 						fill.boxes(sna, &fill, boxes, last_box - boxes);
 						if (damage)
 							sna_damage_add_boxes(damage, boxes, last_box - boxes, 0, 0);
@@ -6139,6 +6206,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 						b->y1 += dy;
 						b->y2 += dy;
 						if (++b == last_box) {
+							assert_pixmap_contains_boxes(pixmap, boxes, last_box-boxes, 0, 0);
 							fill.boxes(sna, &fill, boxes, last_box-boxes);
 							if (damage)
 								sna_damage_add_boxes(damage, boxes, last_box-boxes, 0, 0);
@@ -6153,6 +6221,7 @@ sna_poly_line_blt(DrawablePtr drawable,
 		RegionUninit(&clip);
 	}
 	if (b != boxes) {
+		assert_pixmap_contains_boxes(pixmap, boxes, b-boxes, 0, 0);
 		fill.boxes(sna, &fill, boxes, b - boxes);
 		if (damage)
 			sna_damage_add_boxes(damage, boxes, b - boxes, 0, 0);
@@ -6591,6 +6660,7 @@ spans_fallback:
 		}
 
 		gc->ops = (GCOps *)&sna_gc_ops;
+		assert_pixmap_contains_box(data.pixmap, &data.region.extents);
 		if (data.damage)
 			sna_damage_add(data.damage, &data.region);
 		RegionUninit(&data.region);
commit cc20c45aa0ca15720510668d6918bf3c99104626
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 22:51:21 2012 +0100

    sna: Minimise the risk of hotplug hangs by checking fb before vsync
    
    Everytime we issue a MI_WAIT_FOR_EVENT on a scan-line from userspace we
    run the risk of that pipe being disable before we submit a batch. As the
    pipe is then disabled or configured differently, we encounter an
    indefinite wait and trigger a GPU hang.
    
    To minimise the risk of a hotplug event being detected and submitting a
    vsynced batch prior to noticing the removal of the pipe, perform an
    explicit query of the current CRTC and delete the wait if we spot that
    our framebuffer is no longer attached. This is about as good as we can
    achieve without extra help from the kernel.
    
    Reported-by: Francis Leblanc <Francis.Leblanc-Lebeau at verint.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45413 (and others)
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 27e0e04..e52645c 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -141,6 +141,7 @@ struct kgem {
 	uint16_t nexec;
 	uint16_t nreloc;
 	uint16_t nfence;
+	uint16_t wait;
 	uint16_t max_batch_size;
 
 	uint32_t flush:1;
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 7a1e2f6..308e329 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -363,6 +363,7 @@ extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 
 extern bool sna_wait_for_scanline(struct sna *sna, PixmapPtr pixmap,
 				  xf86CrtcPtr crtc, const BoxRec *clip);
+extern bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc);
 
 Bool sna_dri_open(struct sna *sna, ScreenPtr pScreen);
 void sna_dri_wakeup(struct sna *sna);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index f413ac1..ef3b0f9 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2102,6 +2102,7 @@ static void sna_emit_wait_for_scanline_gen6(struct sna *sna,
 	b[1] = pipe;
 	b[2] = y2 - 1;
 	b[3] = MI_WAIT_FOR_EVENT | event;
+	sna->kgem.wait = sna->kgem.nbatch + 3;
 	kgem_advance_batch(&sna->kgem, 4);
 }
 
@@ -2131,6 +2132,7 @@ static void sna_emit_wait_for_scanline_gen4(struct sna *sna,
 	b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
 	b[3] = b[1] = (y1 << 16) | (y2-1);
 	b[4] = MI_WAIT_FOR_EVENT | event;
+	sna->kgem.wait = sna->kgem.nbatch + 4;
 	kgem_advance_batch(&sna->kgem, 5);
 }
 
@@ -2158,6 +2160,7 @@ static void sna_emit_wait_for_scanline_gen2(struct sna *sna,
 		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
 	else
 		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
+	sna->kgem.wait = sna->kgem.nbatch + 4;
 	kgem_advance_batch(&sna->kgem, 5);
 }
 
@@ -2171,21 +2174,15 @@ sna_wait_for_scanline(struct sna *sna,
 	Bool full_height;
 	int y1, y2, pipe;
 
+	assert(crtc);
+	assert(sna_crtc_on(crtc));
+	assert(pixmap_is_scanout(pixmap));
+
 	/* XXX WAIT_EVENT is still causing hangs on SNB */
 	if (sna->kgem.gen >= 60)
 		return false;
 
-	if (!pixmap_is_scanout(pixmap))
-		return false;
-
-	if (crtc == NULL) {
-		crtc = sna_covering_crtc(sna->scrn, clip, NULL, &crtc_box);
-		if (crtc == NULL)
-			return false;
-	} else
-		sna_crtc_box(crtc, &crtc_box);
-	assert(sna_crtc_on(crtc));
-
+	sna_crtc_box(crtc, &crtc_box);
 	if (crtc->transform_in_use) {
 		box = *clip;
 		pixman_f_transform_bounds(&crtc->f_framebuffer_to_crtc, &box);
@@ -2227,3 +2224,14 @@ sna_wait_for_scanline(struct sna *sna,
 
 	return true;
 }
+
+bool sna_crtc_is_bound(struct sna *sna, xf86CrtcPtr crtc)
+{
+	struct drm_mode_crtc mode;
+
+	mode.crtc_id = crtc_id(crtc->driver_private);
+	if (drmIoctl(sna->kgem.fd, DRM_IOCTL_MODE_GETCRTC, &mode))
+		return false;
+
+	return mode.mode_valid && sna->mode.fb_id == mode.fb_id;
+}
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 95ec07e..afec831 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -400,6 +400,7 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 	PixmapPtr pixmap = get_drawable_pixmap(draw);
 	pixman_region16_t clip;
 	bool flush = false;
+	xf86CrtcPtr crtc;
 	BoxRec box, *boxes;
 	int16_t dx, dy;
 	int n;
@@ -442,9 +443,15 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 			return;
 		}
 
-		if (pixmap == sna->front && sync)
-			flush = sna_wait_for_scanline(sna, pixmap, NULL,
-						      &region->extents);
+		if (pixmap == sna->front && sync) {
+			BoxRec crtc_box;
+
+			crtc = sna_covering_crtc(sna->scrn, &region->extents,
+						 NULL, &crtc_box);
+			if (crtc)
+				flush = sna_wait_for_scanline(sna, pixmap, crtc,
+							      &region->extents);
+		}
 
 		get_drawable_deltas(draw, pixmap, &dx, &dy);
 	}
@@ -482,8 +489,11 @@ sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
 			       boxes, n);
 
 	DBG(("%s: flushing? %d\n", __FUNCTION__, flush));
-	if (flush) /* STAT! */
+	if (flush) { /* STAT! */
+		if (!sna_crtc_is_bound(sna, crtc))
+			sna->kgem.batch[sna->kgem.wait] = 0;
 		kgem_submit(&sna->kgem);
+	}
 
 	pixman_region_translate(region, dx, dy);
 	DamageRegionAppend(&pixmap->drawable, region);
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index a71751c..b740b6a 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -306,8 +306,11 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	/* Push the frame to the GPU as soon as possible so
 	 * we can hit the next vsync.
 	 */
-	if (flush)
+	if (flush) {
+		if (!sna_crtc_is_bound(sna, crtc))
+			sna->kgem.batch[sna->kgem.wait] = 0;
 		kgem_submit(&sna->kgem);
+	}
 
 	return ret;
 }
commit 305734ebdf3d51c084cfbee8804b6c60b1f03a98
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 21:27:29 2012 +0100

    sna: Separate out scanline waiting for gen4
    
    So that we do not set a gen4 bit on gen2 and apply the old workaround of
    trimming y2 instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ecfe670..f413ac1 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -2105,44 +2105,62 @@ static void sna_emit_wait_for_scanline_gen6(struct sna *sna,
 	kgem_advance_batch(&sna->kgem, 4);
 }
 
-static void sna_emit_wait_for_scanline_gen2(struct sna *sna,
+static void sna_emit_wait_for_scanline_gen4(struct sna *sna,
 					    int pipe, int y1, int y2,
 					    bool full_height)
 {
 	uint32_t event;
 	uint32_t *b;
 
-	/*
-	 * Pre-965 doesn't have SVBLANK, so we need a bit
-	 * of extra time for the blitter to start up and
-	 * do its job for a full height blit
-	 */
 	if (pipe == 0) {
-		pipe = MI_LOAD_SCAN_LINES_DISPLAY_PIPEA;
-		event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
 		if (full_height)
 			event = MI_WAIT_FOR_PIPEA_SVBLANK;
+		else
+			event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
 	} else {
-		pipe = MI_LOAD_SCAN_LINES_DISPLAY_PIPEB;
-		event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
 		if (full_height)
 			event = MI_WAIT_FOR_PIPEB_SVBLANK;
+		else
+			event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
 	}
 
-	if (sna->kgem.mode == KGEM_NONE)
-		kgem_set_mode(&sna->kgem, KGEM_BLT);
-
+	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	b = kgem_get_batch(&sna->kgem, 5);
 	/* The documentation says that the LOAD_SCAN_LINES command
 	 * always comes in pairs. Don't ask me why. */
-	b[0] = MI_LOAD_SCAN_LINES_INCL | pipe;
-	b[1] = (y1 << 16) | (y2-1);
-	b[2] = MI_LOAD_SCAN_LINES_INCL | pipe;
-	b[3] = (y1 << 16) | (y2-1);
+	b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
+	b[3] = b[1] = (y1 << 16) | (y2-1);
 	b[4] = MI_WAIT_FOR_EVENT | event;
 	kgem_advance_batch(&sna->kgem, 5);
 }
 
+static void sna_emit_wait_for_scanline_gen2(struct sna *sna,
+					    int pipe, int y1, int y2,
+					    bool full_height)
+{
+	uint32_t *b;
+
+	/*
+	 * Pre-965 doesn't have SVBLANK, so we need a bit
+	 * of extra time for the blitter to start up and
+	 * do its job for a full height blit
+	 */
+	if (full_height)
+		y2 -= 2;
+
+	kgem_set_mode(&sna->kgem, KGEM_BLT);
+	b = kgem_get_batch(&sna->kgem, 5);
+	/* The documentation says that the LOAD_SCAN_LINES command
+	 * always comes in pairs. Don't ask me why. */
+	b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
+	b[3] = b[1] = (y1 << 16) | (y2-1);
+	if (pipe == 0)
+		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
+	else
+		b[4] = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
+	kgem_advance_batch(&sna->kgem, 5);
+}
+
 bool
 sna_wait_for_scanline(struct sna *sna,
 		      PixmapPtr pixmap,
@@ -2202,6 +2220,8 @@ sna_wait_for_scanline(struct sna *sna,
 
 	if (sna->kgem.gen >= 60)
 		sna_emit_wait_for_scanline_gen6(sna, pipe, y1, y2, full_height);
+	else if (sna->kgem.gen >= 40)
+		sna_emit_wait_for_scanline_gen4(sna, pipe, y1, y2, full_height);
 	else
 		sna_emit_wait_for_scanline_gen2(sna, pipe, y1, y2, full_height);
 
commit 6f2814db6f7b89e94e54b8d73c7e176ab7d1c469
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 20:45:55 2012 +0100

    sna/traps: Align the pointer+index
    
    It's the location of the pixels within the row that matter for
    alignment!
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47418
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
    Tested-by: Magnus Kessler <Magnus.Kessler at gmx.net>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index b6c5b65..2b4b2db 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1490,42 +1490,42 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 
 			if (rix > ++lix) {
 				rix -= lix;
+				row += lix;
 #if 0
 				if (rix == 1)
-					row[lix] = 0xff;
+					*row = 0xff;
 				else
-					memset(row+lix, 0xff, rix);
+					memset(row, 0xff, rix);
 #else
 				if ((uintptr_t)row & 1 && rix) {
-					row[lix] = 0xff;
-					lix++;
+					*row++ = 0xff;
 					rix--;
 				}
 				if ((uintptr_t)row & 2 && rix >= 2) {
-					*(uint16_t *)(row+lix) = 0xffff;
-					lix += 2;
+					*(uint16_t *)row = 0xffff;
+					row += 2;
 					rix -= 2;
 				}
 				if ((uintptr_t)row & 4 && rix >= 4) {
-					*(uint32_t *)(row+lix) = 0xffffffff;
-					lix += 4;
+					*(uint32_t *)row = 0xffffffff;
+					row += 4;
 					rix -= 4;
 				}
 				while (rix >= 8) {
-					*(uint64_t *)(row+lix) = 0xffffffffffffffff;
-					lix += 8;
+					*(uint64_t *)row = 0xffffffffffffffff;
+					row += 8;
 					rix -= 8;
 				}
 				if (rix & 4) {
-					*(uint32_t *)(row+lix) = 0xffffffff;
-					lix += 4;
+					*(uint32_t *)row = 0xffffffff;
+					row += 4;
 				}
 				if (rix & 2) {
-					*(uint16_t *)(row+lix) = 0xffff;
-					lix += 2;
+					*(uint16_t *)row = 0xffff;
+					row += 2;
 				}
 				if (rix & 1)
-					row[lix] = 0xff;
+					*row = 0xff;
 #endif
 			}
 		}
commit ee075ced844350785685a0f93f88f1dc310bcc73
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 19:09:30 2012 +0100

    sna/traps: Align the pointer not the indices
    
    Magnus found that inplace_row was still crashing on his setup when it
    tried to perform an 8-byte aligned write to an unaligned pointer. This
    time it looks like the row pointer itself was not 8-byte aligned, so
    instead of assuming that and fixing up the indices, ensure that the
    (index+row) results in an 8-byte aligned value.
    
    Reported-by: Magnus Kessler <Magnus.Kessler at gmx.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47418
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 4067757..b6c5b65 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1496,17 +1496,17 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 				else
 					memset(row+lix, 0xff, rix);
 #else
-				if (lix & 1 && rix) {
+				if ((uintptr_t)row & 1 && rix) {
 					row[lix] = 0xff;
 					lix++;
 					rix--;
 				}
-				if (lix & 2 && rix >= 2) {
+				if ((uintptr_t)row & 2 && rix >= 2) {
 					*(uint16_t *)(row+lix) = 0xffff;
 					lix += 2;
 					rix -= 2;
 				}
-				if (lix & 4 && rix >= 4) {
+				if ((uintptr_t)row & 4 && rix >= 4) {
 					*(uint32_t *)(row+lix) = 0xffffffff;
 					lix += 4;
 					rix -= 4;
commit fde8a010b3d9406c2f65ee99978360a6ca54e006
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 12:47:21 2012 +0100

    uxa: Remove broken render glyphs-to-dst
    
    Reported-by: Vincent Untz <vuntz at gnome.org>
    Reported-by: Robert Bradford <robert.bradford at intel.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=48045
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-glyphs.c b/uxa/uxa-glyphs.c
index 6172f2f..b754f4e 100644
--- a/uxa/uxa-glyphs.c
+++ b/uxa/uxa-glyphs.c
@@ -663,190 +663,6 @@ uxa_glyph_cache(ScreenPtr screen, GlyphPtr glyph, int *out_x, int *out_y)
 	return cache->picture;
 }
 
-static int
-uxa_glyphs_to_dst(CARD8 op,
-		  PicturePtr pSrc,
-		  PicturePtr pDst,
-		  INT16 src_x, INT16 src_y,
-		  INT16 xDst, INT16 yDst,
-		  int nlist, GlyphListPtr list, GlyphPtr * glyphs,
-		  BoxPtr extents)
-{
-	ScreenPtr screen = pDst->pDrawable->pScreen;
-	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
-	PixmapPtr src_pixmap, dst_pixmap;
-	PicturePtr localSrc, glyph_atlas;
-	int x, y, n;
-	BoxRec box;
-
-	if (uxa_screen->info->check_composite_texture &&
-	    uxa_screen->info->check_composite_texture(screen, pSrc)) {
-		if (pSrc->pDrawable) {
-			int src_off_x, src_off_y;
-
-			src_pixmap = uxa_get_offscreen_pixmap(pSrc->pDrawable, &src_off_x, &src_off_y);
-			if (src_pixmap == NULL)
-				return -1;
-
-			src_x += pSrc->pDrawable->x + src_off_x;
-			src_y += pSrc->pDrawable->y + src_off_y;
-		} else {
-			src_pixmap = NULL;
-		}
-		localSrc = pSrc;
-	} else {
-		int width, height;
-
-		if (extents == NULL) {
-			uxa_glyph_extents(nlist, list, glyphs, &box);
-			extents = &box;
-		}
-
-		width  = extents->x2 - extents->x1;
-		height = extents->y2 - extents->y1;
-		if (width == 0 || height == 0)
-			return 0;
-
-		if (pSrc->pDrawable) {
-			int src_off_x, src_off_y;
-
-			src_off_x = extents->x1 - xDst;
-			src_off_y = extents->y1 - yDst;
-			localSrc = uxa_acquire_drawable(screen, pSrc,
-							src_x + src_off_x, src_y + src_off_y,
-							width, height,
-							&src_x, &src_y);
-			if (uxa_screen->info->check_composite_texture &&
-			    !uxa_screen->info->check_composite_texture(screen, localSrc)) {
-				if (localSrc != pSrc)
-					FreePicture(localSrc, 0);
-				return -1;
-			}
-
-			src_pixmap = uxa_get_offscreen_pixmap(localSrc->pDrawable, &src_off_x, &src_off_y);
-			if (src_pixmap == NULL) {
-				if (localSrc != pSrc)
-					FreePicture(localSrc, 0);
-				return -1;
-			}
-
-			src_x += localSrc->pDrawable->x + src_off_x;
-			src_y += localSrc->pDrawable->y + src_off_y;
-		} else {
-			localSrc = uxa_acquire_pattern(screen, pSrc,
-						       PICT_a8r8g8b8, x, y, width, height);
-			if (!localSrc)
-				return 1;
-
-			src_pixmap = uxa_get_drawable_pixmap(localSrc->pDrawable);
-			if (src_pixmap == NULL) {
-				FreePicture(localSrc, 0);
-				return -1;
-			}
-
-			src_x = src_y = 0;
-		}
-	}
-
-	dst_pixmap = uxa_get_offscreen_pixmap(pDst->pDrawable, &x, &y);
-	x += xDst + pDst->pDrawable->x - list->xOff;
-	y += yDst + pDst->pDrawable->y - list->yOff;
-
-	glyph_atlas = NULL;
-	while (nlist--) {
-		x += list->xOff;
-		y += list->yOff;
-		n = list->len;
-		while (n--) {
-			GlyphPtr glyph = *glyphs++;
-			PicturePtr this_atlas;
-			int mask_x, mask_y, nrect;
-			struct uxa_glyph *priv;
-			BoxPtr rects;
-
-			if (glyph->info.width == 0 || glyph->info.height == 0)
-				goto next_glyph;
-
-			priv = uxa_glyph_get_private(glyph);
-			if (priv != NULL) {
-				mask_x = priv->x;
-				mask_y = priv->y;
-				this_atlas = priv->cache->picture;
-			} else {
-				if (glyph_atlas) {
-					uxa_screen->info->done_composite(dst_pixmap);
-					glyph_atlas = NULL;
-				}
-				this_atlas = uxa_glyph_cache(screen, glyph, &mask_x, &mask_y);
-				if (this_atlas == NULL) {
-					/* no cache for this glyph */
-					this_atlas = GlyphPicture(glyph)[screen->myNum];
-					mask_x = mask_y = 0;
-				}
-			}
-
-			if (this_atlas != glyph_atlas) {
-				PixmapPtr mask_pixmap;
-
-				if (glyph_atlas)
-					uxa_screen->info->done_composite(dst_pixmap);
-
-				mask_pixmap =
-					uxa_get_drawable_pixmap(this_atlas->pDrawable);
-				if (!uxa_pixmap_is_offscreen(mask_pixmap) ||
-				    !uxa_screen->info->prepare_composite(op,
-									 localSrc, this_atlas, pDst,
-									 src_pixmap, mask_pixmap, dst_pixmap))
-					return -1;
-
-				glyph_atlas = this_atlas;
-			}
-
-			rects = REGION_RECTS(pDst->pCompositeClip);
-			nrect = REGION_NUM_RECTS(pDst->pCompositeClip);
-			while (nrect--) {
-				int x1 = x - glyph->info.x, dx = 0;
-				int y1 = y - glyph->info.y, dy = 0;
-				int x2 = x1 + glyph->info.width;
-				int y2 = y1 + glyph->info.height;
-
-				if (rects->y1 >= y2)
-					break;
-
-				if (x1 < rects->x1)
-					dx = rects->x1 - x1, x1 = rects->x1;
-				if (x2 > rects->x2)
-					x2 = rects->x2;
-				if (y1 < rects->y1)
-					dy = rects->y1 - y1, y1 = rects->y1;
-				if (y2 > rects->y2)
-					y2 = rects->y2;
-
-				if (x1 < x2 && y1 < y2) {
-					uxa_screen->info->composite(dst_pixmap,
-								    x1 + src_x,  y1 + src_y,
-								    dx + mask_x, dy + mask_y,
-								    x1, y1,
-								    x2 - x1, y2 - y1);
-				}
-				rects++;
-			}
-
-next_glyph:
-			x += glyph->info.xOff;
-			y += glyph->info.yOff;
-		}
-		list++;
-	}
-	if (glyph_atlas)
-		uxa_screen->info->done_composite(dst_pixmap);
-
-	if (localSrc != pSrc)
-		FreePicture(localSrc, 0);
-
-	return 0;
-}
-
 static void
 uxa_clear_pixmap(ScreenPtr screen,
 		 uxa_screen_t *uxa_screen,
@@ -894,37 +710,30 @@ uxa_glyphs_via_mask(CARD8 op,
 		    PicturePtr pDst,
 		    PictFormatPtr maskFormat,
 		    INT16 xSrc, INT16 ySrc,
-		    INT16 xDst, INT16 yDst,
-		    int nlist, GlyphListPtr list, GlyphPtr * glyphs,
-		    BoxPtr extents)
+		    int nlist, GlyphListPtr list, GlyphPtr * glyphs)
 {
 	ScreenPtr screen = pDst->pDrawable->pScreen;
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
 	CARD32 component_alpha;
 	PixmapPtr pixmap;
 	PicturePtr glyph_atlas, mask;
+	int xDst = list->xOff, yDst = list->yOff;
 	int x, y, width, height;
 	int dst_off_x, dst_off_y;
 	int n, error;
 	BoxRec box;
 
-	if (!extents) {
-		uxa_glyph_extents(nlist, list, glyphs, &box);
+	uxa_glyph_extents(nlist, list, glyphs, &box);
+	if (box.x2 <= box.x1 || box.y2 <= box.y1)
+		return 0;
 
-		if (box.x2 <= box.x1 || box.y2 <= box.y1)
-			return 0;
+	dst_off_x = box.x1;
+	dst_off_y = box.y1;
 
-		extents = &box;
-		dst_off_x = box.x1;
-		dst_off_y = box.y1;
-	} else {
-		dst_off_x = dst_off_y = 0;
-	}
-
-	width  = extents->x2 - extents->x1;
-	height = extents->y2 - extents->y1;
-	x = -extents->x1;
-	y = -extents->y1;
+	width  = box.x2 - box.x1;
+	height = box.y2 - box.y1;
+	x = -box.x1;
+	y = -box.y1;
 
 	if (maskFormat->depth == 1) {
 		PictFormatPtr a8Format =
@@ -1061,11 +870,6 @@ uxa_glyphs(CARD8 op,
 {
 	ScreenPtr screen = pDst->pDrawable->pScreen;
 	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
-	int xDst = list->xOff, yDst = list->yOff;
-	BoxRec extents = { 0, 0, 0, 0 };
-	Bool have_extents = FALSE;
-	int width, height, ret;
-	PicturePtr localDst = pDst;
 
 	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
 		int ok;
@@ -1128,112 +932,12 @@ fallback:
 		}
 	}
 
-	if (!maskFormat &&
-	    uxa_screen->info->check_composite_target &&
-	    !uxa_screen->info->check_composite_target(uxa_get_drawable_pixmap(pDst->pDrawable))) {
-		int depth = pDst->pDrawable->depth;
-		PixmapPtr pixmap;
-		int x, y, error;
-		GCPtr gc;
-
-		pixmap = uxa_get_drawable_pixmap(pDst->pDrawable);
-		if (uxa_screen->info->check_copy &&
-		    !uxa_screen->info->check_copy(pixmap, pixmap, GXcopy, FB_ALLONES))
-			goto fallback;
-
-		uxa_glyph_extents(nlist, list, glyphs, &extents);
-
-		/* clip against dst bounds */
-		if (extents.x1 < 0)
-			extents.x1 = 0;
-		if (extents.y1 < 0)
-			extents.y1 = 0;
-		if (extents.x2 > pDst->pDrawable->width)
-			extents.x2 = pDst->pDrawable->width;
-		if (extents.y2 > pDst->pDrawable->height)
-			extents.y2 = pDst->pDrawable->height;
-
-		if (extents.x2 <= extents.x1 || extents.y2 <= extents.y1)
-			return;
-		width  = extents.x2 - extents.x1;
-		height = extents.y2 - extents.y1;
-		x = -extents.x1;
-		y = -extents.y1;
-		have_extents = TRUE;
-
-		xDst += x;
-		yDst += y;
-
-		pixmap = screen->CreatePixmap(screen,
-					      width, height, depth,
-					      CREATE_PIXMAP_USAGE_SCRATCH);
-		if (!pixmap)
-			return;
-
-		if (!uxa_pixmap_is_offscreen(pixmap)) {
-			screen->DestroyPixmap(pixmap);
-			goto fallback;
-		}
-
-		gc = GetScratchGC(depth, screen);
-		if (!gc) {
-			screen->DestroyPixmap(pixmap);
-			return;
-		}
-
-		ValidateGC(&pixmap->drawable, gc);
-		gc->ops->CopyArea(pDst->pDrawable, &pixmap->drawable, gc,
-				  extents.x1, extents.y1,
-				  width, height,
-				  0, 0);
-		FreeScratchGC(gc);
-
-		localDst = CreatePicture(0, &pixmap->drawable,
-					 PictureMatchFormat(screen, depth, pDst->format),
-					 0, 0, serverClient, &error);
-		screen->DestroyPixmap(pixmap);
-
-		if (!localDst)
-			return;
-
-		ValidatePicture(localDst);
-	}
-
-	if (maskFormat) {
-		ret = uxa_glyphs_via_mask(op,
-					  pSrc, localDst, maskFormat,
-					  xSrc, ySrc,
-					  xDst, yDst,
-					  nlist, list, glyphs,
-					  have_extents ? &extents : NULL);
-	} else {
-		ret = uxa_glyphs_to_dst(op,
-					pSrc, localDst,
-					xSrc, ySrc,
-					xDst, yDst,
-					nlist, list, glyphs,
-					have_extents ? &extents : NULL);
-	}
-	if (ret) {
-		if (localDst != pDst)
-			FreePicture(localDst, 0);
-
+	if (!maskFormat)
 		goto fallback;
-	}
 
-	if (localDst != pDst) {
-		GCPtr gc;
-
-		gc = GetScratchGC(pDst->pDrawable->depth, screen);
-		if (gc) {
-			ValidateGC(pDst->pDrawable, gc);
-			gc->ops->CopyArea(localDst->pDrawable, pDst->pDrawable, gc,
-					  0, 0,
-					  width, height,
-					  extents.x1, extents.y1);
-			FreeScratchGC(gc);
-		}
-
-		FreePicture(localDst, 0);
-	}
+	if (uxa_glyphs_via_mask(op,
+				pSrc, pDst, maskFormat,
+				xSrc, ySrc,
+				nlist, list, glyphs))
+		goto fallback;
 }
commit 451489b49916cf5a9d27844196f9656e590d9124
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 30 10:21:26 2012 +0100

    sna/gen7: Allow per-device specific maxima
    
    As the maximum thread count and urb size differs between different
    incarnations of the GT units, be a little more flexible in programming
    those maximums.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 1491167..6917c21 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -67,6 +67,31 @@
 
 #define is_aligned(x, y) (((x) & ((y) - 1)) == 0)
 
+struct gt_info {
+	int max_vs_threads;
+	int max_gs_threads;
+	int max_wm_threads;
+	struct {
+		int size;
+		int max_vs_entries;
+		int max_gs_entries;
+	} urb;
+};
+
+static const struct gt_info gt1_info = {
+	.max_vs_threads = 36,
+	.max_gs_threads = 36,
+	.max_wm_threads = 86,
+	.urb = { 128, 512, 192 },
+};
+
+static const struct gt_info gt2_info = {
+	.max_vs_threads = 128,
+	.max_gs_threads = 128,
+	.max_wm_threads = 86,
+	.urb = { 256, 704, 320 },
+};
+
 static const uint32_t ps_kernel_nomask_affine[][4] = {
 #include "exa_wm_src_affine.g7b"
 #include "exa_wm_src_sample_argb.g7b"
@@ -427,7 +452,7 @@ gen7_emit_urb(struct sna *sna)
 
 	/* num of VS entries must be divisible by 8 if size < 9 */
 	OUT_BATCH(GEN7_3DSTATE_URB_VS | (2 - 2));
-	OUT_BATCH((32 << GEN7_URB_ENTRY_NUMBER_SHIFT) | /* at least 32 */
+	OUT_BATCH((sna->render_state.gen7.info->urb.max_vs_entries << GEN7_URB_ENTRY_NUMBER_SHIFT) |
 		  (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
 		  (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
 
@@ -802,7 +827,7 @@ gen7_emit_wm(struct sna *sna, unsigned int kernel, int nr_surfaces, int nr_input
 	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
 		  nr_surfaces << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
 	OUT_BATCH(0); /* scratch address */
-	OUT_BATCH((86 - 1) << GEN7_PS_MAX_THREADS_SHIFT |
+	OUT_BATCH((sna->render_state.gen7.info->max_wm_threads - 1) << GEN7_PS_MAX_THREADS_SHIFT |
 		  GEN7_PS_ATTRIBUTE_ENABLE |
 		  GEN7_PS_16_DISPATCH_ENABLE);
 	OUT_BATCH(6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
@@ -4292,6 +4317,10 @@ static Bool gen7_render_setup(struct sna *sna)
 	struct gen7_sampler_state *ss;
 	int i, j, k, l, m;
 
+	state->info = &gt1_info;
+	if (DEVICE_ID(sna->PciInfo) & 0x20)
+		state->info = &gt2_info;
+
 	sna_static_stream_init(&general);
 
 	/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 8ad8efc..7a1e2f6 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -257,7 +257,6 @@ struct sna {
 #define SNA_TILING_3D		0x4
 #define SNA_TILING_ALL (~0)
 
-	int Chipset;
 	EntityInfoPtr pEnt;
 	struct pci_device *PciInfo;
 	struct intel_chipset chipset;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index a83f78e..73ef568 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -428,6 +428,7 @@ enum {
 };
 
 struct gen7_render_state {
+	const struct gt_info *info;
 	struct kgem_bo *general_bo;
 
 	uint32_t vs_state;
commit ea36f2c4a3fa9afa8184eeaf944af9924c080368
Author: Eugeni Dodonov <eugeni.dodonov at intel.com>
Date:   Thu Mar 29 21:08:29 2012 -0300

    Add support for Ivy Bridge GT2 Server chipset
    
    Sometimes known as Bromlow.
    
    Signed-off-by: Eugeni Dodonov <eugeni.dodonov at intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.h b/src/intel_driver.h
index e9d6d9e..98973e5 100644
--- a/src/intel_driver.h
+++ b/src/intel_driver.h
@@ -190,6 +190,7 @@
 #define PCI_CHIP_IVYBRIDGE_D_GT1	0x0152
 #define PCI_CHIP_IVYBRIDGE_D_GT2	0x0162
 #define PCI_CHIP_IVYBRIDGE_S_GT1	0x015a
+#define PCI_CHIP_IVYBRIDGE_S_GT2	0x016a
 
 #endif
 
diff --git a/src/intel_module.c b/src/intel_module.c
index 2c0e5cc..c6f94f5 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -142,6 +142,7 @@ static const SymTabRec _intel_chipsets[] = {
 	{PCI_CHIP_IVYBRIDGE_D_GT1,		"Ivybridge Desktop (GT1)" },
 	{PCI_CHIP_IVYBRIDGE_D_GT2,		"Ivybridge Desktop (GT2)" },
 	{PCI_CHIP_IVYBRIDGE_S_GT1,		"Ivybridge Server" },
+	{PCI_CHIP_IVYBRIDGE_S_GT2,		"Ivybridge Server (GT2)" },
 	{-1,					NULL}
 };
 #define NUM_CHIPSETS (sizeof(_intel_chipsets) / sizeof(_intel_chipsets[0]))
@@ -210,6 +211,7 @@ static const struct pci_id_match intel_device_match[] = {
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
 	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
+	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
 
 	{ 0, 0, 0 },
 };
commit 6142232fa0feeb39412cda85ca727cc770eaa042
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 28 18:59:26 2012 +0100

    sna: Add video sprite support for ILK+
    
    Based on the work by Jesse Barnes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 70afd53..911a857 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -60,6 +60,7 @@ libsna_la_SOURCES = \
 	sna_video.c \
 	sna_video.h \
 	sna_video_overlay.c \
+	sna_video_sprite.c \
 	sna_video_textured.c \
 	gen2_render.c \
 	gen2_render.h \
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 949c18f..8ad8efc 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -371,6 +371,7 @@ void sna_dri_close(struct sna *sna, ScreenPtr pScreen);
 
 extern Bool sna_crtc_on(xf86CrtcPtr crtc);
 int sna_crtc_to_pipe(xf86CrtcPtr crtc);
+int sna_crtc_to_plane(xf86CrtcPtr crtc);
 
 CARD32 sna_format_for_depth(int depth);
 CARD32 sna_render_format_for_depth(int depth);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index a5d69dd..ecfe670 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -60,6 +60,7 @@ struct sna_crtc {
 	int num;
 	int id;
 	int pipe;
+	int plane;
 	int active;
 	struct list link;
 };
@@ -144,6 +145,12 @@ int sna_crtc_to_pipe(xf86CrtcPtr crtc)
 	return sna_crtc->pipe;
 }
 
+int sna_crtc_to_plane(xf86CrtcPtr crtc)
+{
+	struct sna_crtc *sna_crtc = crtc->driver_private;
+	return sna_crtc->plane;
+}
+
 static uint32_t gem_create(int fd, int size)
 {
 	struct drm_i915_gem_create create;
@@ -852,6 +859,37 @@ static const xf86CrtcFuncsRec sna_crtc_funcs = {
 	.destroy = sna_crtc_destroy,
 };
 
+static uint32_t
+sna_crtc_find_plane(struct sna *sna, int pipe)
+{
+	drmModePlaneRes *resources;
+	uint32_t id = 0;
+	int i;
+
+	resources = drmModeGetPlaneResources(sna->kgem.fd);
+	if (!resources) {
+		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+			   "failed to get plane resources: %s\n",
+			   strerror(errno));
+		return 0;
+	}
+
+	for (i = 0; id == 0 && i < resources->count_planes; i++) {
+		drmModePlane *p;
+
+		p = drmModeGetPlane(sna->kgem.fd, resources->planes[i]);
+		if (p) {
+			if (p->possible_crtcs & (1 << pipe))
+				id = p->plane_id;
+
+			drmModeFreePlane(p);
+		}
+	}
+
+	free(resources);
+	return id;
+}
+
 static void
 sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 {
@@ -878,6 +916,7 @@ sna_crtc_init(ScrnInfoPtr scrn, struct sna_mode *mode, int num)
 		 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
 		 &get_pipe);
 	sna_crtc->pipe = get_pipe.pipe;
+	sna_crtc->plane = sna_crtc_find_plane(sna, sna_crtc->pipe);
 
 	if (xf86IsEntityShared(scrn->entityList[0]) &&
 	    scrn->confScreen->device->screen != sna_crtc->pipe) {
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 56cf260..c80ccfb 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -83,12 +83,17 @@ void sna_video_free_buffers(struct sna *sna, struct sna_video *video)
 
 	for (i = 0; i < ARRAY_SIZE(video->old_buf); i++) {
 		if (video->old_buf[i]) {
+			if (video->old_buf[i]->unique_id)
+				drmModeRmFB(sna->kgem.fd,
+						video->old_buf[i]->unique_id);
 			kgem_bo_destroy(&sna->kgem, video->old_buf[i]);
 			video->old_buf[i] = NULL;
 		}
 	}
 
 	if (video->buf) {
+		if (video->buf->unique_id)
+			drmModeRmFB(sna->kgem.fd, video->buf->unique_id);
 		kgem_bo_destroy(&sna->kgem, video->buf);
 		video->buf = NULL;
 	}
@@ -440,6 +445,11 @@ sna_video_copy_data(struct sna *sna,
 	if (frame->bo == NULL)
 		return FALSE;
 
+	DBG(("%s: handle=%d, size=%dx%d, rotation=%d\n",
+	     __FUNCTION__, frame->bo->handle, frame->width, frame->height,
+	     video->rotation));
+	DBG(("%s: top=%d, left=%d\n", __FUNCTION__, frame->top, frame->left));
+
 	/* In the common case, we can simply the upload in a single pwrite */
 	if (video->rotation == RR_Rotate_0) {
 		if (is_planar_fourcc(frame->id)) {
@@ -472,8 +482,8 @@ sna_video_copy_data(struct sna *sna,
 		}
 	}
 
-	/* copy data */
-	dst = kgem_bo_map(&sna->kgem, frame->bo);
+	/* copy data, must use GTT so that we keep the overlay uncached */
+	dst = kgem_bo_map__gtt(&sna->kgem, frame->bo);
 	if (dst == NULL)
 		return FALSE;
 
@@ -510,7 +520,9 @@ void sna_video_init(struct sna *sna, ScreenPtr screen)
 	 * supported hardware.
 	 */
 	textured = sna_video_textured_setup(sna, screen);
-	overlay = sna_video_overlay_setup(sna, screen);
+	overlay = sna_video_sprite_setup(sna, screen);
+	if (overlay == NULL)
+		overlay = sna_video_overlay_setup(sna, screen);
 
 	if (overlay && prefer_overlay)
 		adaptors[num_adaptors++] = overlay;
diff --git a/src/sna/sna_video.h b/src/sna/sna_video.h
index 47ddab0..687fbe1 100644
--- a/src/sna/sna_video.h
+++ b/src/sna/sna_video.h
@@ -51,6 +51,7 @@ struct sna_video {
 	uint32_t gamma5;
 
 	int color_key;
+	int color_key_changed;
 
 	/** YUV data buffers */
 	struct kgem_bo *old_buf[2];
@@ -58,6 +59,7 @@ struct sna_video {
 
 	Bool textured;
 	Rotation rotation;
+	int plane;
 
 	int SyncToVblank;	/* -1: auto, 0: off, 1: on */
 };
@@ -78,10 +80,9 @@ struct sna_video_frame {
 };
 
 void sna_video_init(struct sna *sna, ScreenPtr screen);
-XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna,
-					    ScreenPtr screen);
-XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna,
-					     ScreenPtr screen);
+XF86VideoAdaptorPtr sna_video_overlay_setup(struct sna *sna, ScreenPtr screen);
+XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna, ScreenPtr screen);
+XF86VideoAdaptorPtr sna_video_textured_setup(struct sna *sna, ScreenPtr screen);
 
 #define FOURCC_XVMC     (('C' << 24) + ('M' << 16) + ('V' << 8) + 'X')
 
diff --git a/src/sna/sna_video_sprite.c b/src/sna/sna_video_sprite.c
new file mode 100644
index 0000000..82db122
--- /dev/null
+++ b/src/sna/sna_video_sprite.c
@@ -0,0 +1,434 @@
+/***************************************************************************
+
+ Copyright 2000-2011 Intel Corporation.  All Rights Reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sub license, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice (including the
+ next paragraph) shall be included in all copies or substantial portions
+ of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ IN NO EVENT SHALL INTEL, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
+ THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ **************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "sna.h"
+#include "sna_video.h"
+
+#include <xf86xv.h>
+#include <X11/extensions/Xv.h>
+#include <fourcc.h>
+#include <drm_fourcc.h>
+#include <i915_drm.h>
+
+#if DEBUG_VIDEO_OVERLAY
+#undef DBG
+#define DBG(x) ErrorF x
+#endif
+
+#define IMAGE_MAX_WIDTH		2048
+#define IMAGE_MAX_HEIGHT	2048
+
+#define MAKE_ATOM(a) MakeAtom(a, sizeof(a) - 1, TRUE)
+
+static Atom xvColorKey;
+
+static XF86VideoFormatRec xv_formats[] = {
+	{15, TrueColor}, {16, TrueColor}, {24, TrueColor}
+};
+static XF86ImageRec xv_images[] = { XVIMAGE_YUY2, XVIMAGE_UYVY, };
+static const XF86VideoEncodingRec xv_dummy_encoding[] = {
+	{ 0, "XV_IMAGE", IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT, {1, 1} }
+};
+static XF86AttributeRec attribs[] = {
+	{XvSettable | XvGettable, 0, 0xffffff, "XV_COLORKEY"},
+};
+
+static void sna_video_sprite_off(struct sna *sna, struct sna_video *video)
+{
+	if (video->plane == 0)
+		return;
+
+	if (drmModeSetPlane(sna->kgem.fd,
+			    video->plane, 0, 0, 0,
+			    0, 0, 0, 0,
+			    0, 0, 0, 0))
+		xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+			   "failed to disable plane\n");
+
+	video->plane = 0;
+}
+
+static void sna_video_sprite_stop(ScrnInfoPtr scrn, pointer data, Bool shutdown)
+{
+	return sna_video_sprite_off(to_sna(scrn), data);
+}
+
+static int sna_video_sprite_set_attr(ScrnInfoPtr scrn,
+				     Atom attribute, INT32 value,
+				     pointer data)
+{
+	struct sna_video *video = data;
+
+	if (attribute == xvColorKey) {
+		video->color_key_changed = TRUE;
+		video->color_key = value;
+		DBG(("COLORKEY = %d\n", value));
+	} else
+		return BadMatch;
+
+	return Success;
+}
+
+static int sna_video_sprite_get_attr(ScrnInfoPtr scrn,
+				     Atom attribute, INT32 *value,
+				     pointer data)
+{
+	struct sna_video *video = data;
+
+	if (attribute == xvColorKey)
+		*value = video->color_key;
+	else
+		return BadMatch;
+
+	return Success;
+}
+
+static void sna_video_sprite_best_size(ScrnInfoPtr scrn, Bool motion,
+				       short vid_w, short vid_h,
+				       short drw_w, short drw_h,
+				       unsigned int *p_w, unsigned int *p_h,
+				       pointer data)
+{
+	*p_w = vid_w;
+	*p_h = vid_h;
+}
+
+static void
+update_dst_box_to_crtc_coords(struct sna *sna, xf86CrtcPtr crtc, BoxPtr dstBox)
+{
+	ScrnInfoPtr scrn = sna->scrn;
+	int tmp;
+
+	switch (crtc->rotation & 0xf) {
+	case RR_Rotate_0:
+		dstBox->x1 -= crtc->x;
+		dstBox->x2 -= crtc->x;
+		dstBox->y1 -= crtc->y;
+		dstBox->y2 -= crtc->y;
+		break;
+
+	case RR_Rotate_90:
+		tmp = dstBox->x1;
+		dstBox->x1 = dstBox->y1 - crtc->x;
+		dstBox->y1 = scrn->virtualX - tmp - crtc->y;
+		tmp = dstBox->x2;
+		dstBox->x2 = dstBox->y2 - crtc->x;
+		dstBox->y2 = scrn->virtualX - tmp - crtc->y;
+		tmp = dstBox->y1;
+		dstBox->y1 = dstBox->y2;
+		dstBox->y2 = tmp;
+		break;
+
+	case RR_Rotate_180:
+		tmp = dstBox->x1;
+		dstBox->x1 = scrn->virtualX - dstBox->x2 - crtc->x;
+		dstBox->x2 = scrn->virtualX - tmp - crtc->x;
+		tmp = dstBox->y1;
+		dstBox->y1 = scrn->virtualY - dstBox->y2 - crtc->y;
+		dstBox->y2 = scrn->virtualY - tmp - crtc->y;
+		break;
+
+	case RR_Rotate_270:
+		tmp = dstBox->x1;
+		dstBox->x1 = scrn->virtualY - dstBox->y1 - crtc->x;
+		dstBox->y1 = tmp - crtc->y;
+		tmp = dstBox->x2;
+		dstBox->x2 = scrn->virtualY - dstBox->y2 - crtc->x;
+		dstBox->y2 = tmp - crtc->y;
+		tmp = dstBox->x1;
+		dstBox->x1 = dstBox->x2;
+		dstBox->x2 = tmp;
+		break;
+	}
+}
+
+static Bool
+sna_video_sprite_show(struct sna *sna,
+		      struct sna_video *video,
+		      struct sna_video_frame *frame,
+		      xf86CrtcPtr crtc,
+		      BoxPtr dstBox)
+{
+	int plane = sna_crtc_to_plane(crtc);
+
+	update_dst_box_to_crtc_coords(sna, crtc, dstBox);
+	if (crtc->rotation & (RR_Rotate_90 | RR_Rotate_270)) {
+		int tmp = frame->width;
+		frame->width = frame->height;
+		frame->height = tmp;
+	}
+
+#if defined(DRM_I915_SET_SPRITE_DESTKEY)
+	if (video->color_key_changed || video->plane != plane) {
+		struct drm_intel_set_sprite_destkey set;
+
+		DBG(("%s: updating color key: %x\n",
+		     __FUNCTION__, video->color_key));
+
+		set.plane_id = plane;
+		set.value = video->color_key;
+
+		if (drmCommandWrite(sna->kgem.fd,
+				    DRM_I915_SET_SPRITE_DESTKEY,
+				    &set, sizeof(set)))
+			xf86DrvMsg(sna->scrn->scrnIndex, X_ERROR,
+				   "failed to update color key\n");
+
+		video->color_key_changed = FALSE;
+	}
+#endif
+
+	if (frame->bo->unique_id == 0) {
+		uint32_t offsets[4], pitches[4], handles[4];
+		uint32_t pixel_format;
+
+		switch (frame->id) {
+		case FOURCC_UYVY:
+			pixel_format = DRM_FORMAT_UYVY;
+			break;
+		case FOURCC_YUY2:
+		default:
+			pixel_format = DRM_FORMAT_YUYV;
+			break;
+		}
+
+		handles[0] = frame->bo->handle;
+		pitches[0] = frame->pitch[0];
+		offsets[0] = 0;
+
+		DBG(("%s: creating new fb for handle=%d\n",
+		     __FUNCTION__, frame->bo->handle));
+
+		if (drmModeAddFB2(sna->kgem.fd,
+				  frame->width, frame->height, pixel_format,
+				  handles, pitches, offsets,
+				  &frame->bo->unique_id, 0)) {
+			xf86DrvMsg(sna->scrn->scrnIndex,
+				   X_ERROR, "failed to add fb\n");
+			return false;
+		}
+	}
+
+	DBG(("%s: updating plane=%d, handle=%d [fb %d], dst=(%d,%d)x(%d,%d)\n",
+	     __FUNCTION__, plane, frame->bo->handle, frame->bo->unique_id,
+	     dstBox->x1, dstBox->y1,
+	     dstBox->x2 - dstBox->x1, dstBox->y2 - dstBox->y1));
+	if (drmModeSetPlane(sna->kgem.fd,
+			    plane, sna_crtc_id(crtc), frame->bo->unique_id, 0,
+			    dstBox->x1, dstBox->y1,
+			    dstBox->x2 - dstBox->x1, dstBox->y2 - dstBox->y1,
+			    0, 0, frame->width << 16, frame->height << 16))
+		return false;
+
+	video->plane = plane;
+	return true;
+}
+
+static int sna_video_sprite_put_image(ScrnInfoPtr scrn,
+				      short src_x, short src_y,
+				      short drw_x, short drw_y,
+				      short src_w, short src_h,
+				      short drw_w, short drw_h,
+				      int id, unsigned char *buf,
+				      short width, short height,
+				      Bool sync, RegionPtr clip, pointer data,
+				      DrawablePtr drawable)
+{
+	struct sna *sna = to_sna(scrn);
+	struct sna_video *video = data;
+	struct sna_video_frame frame;
+	xf86CrtcPtr crtc;
+	BoxRec dst_box;
+
+	sna_video_frame_init(sna, video, id, width, height, &frame);
+
+	if (!sna_video_clip_helper(scrn, video, &frame, &crtc, &dst_box,
+				   src_x, src_y, drw_x, drw_y,
+				   src_w, src_h, drw_w, drw_h,
+				   clip))
+		return Success;
+
+	if (!crtc || !sna_crtc_to_plane(crtc)) {
+		/* If the video isn't visible on any CRTC, turn it off */
+		sna_video_sprite_off(sna, video);
+		return Success;
+	}
+
+	/* sprites can't handle rotation natively, store it for the copy func */
+	video->rotation = crtc->rotation;
+
+	frame.bo = sna_video_buffer(sna, video, &frame);
+	if (frame.bo == NULL) {
+		DBG(("%s: failed to allocate video bo\n", __FUNCTION__));
+		return BadAlloc;
+	}
+
+	if (!sna_video_copy_data(sna, video, &frame, buf)) {
+		DBG(("%s: failed to copy video data\n", __FUNCTION__));
+		return BadAlloc;
+	}
+
+	if (!sna_video_sprite_show(sna, video, &frame, crtc, &dst_box)) {
+		DBG(("%s: failed to show video frame\n", __FUNCTION__));
+		return BadAlloc;
+	}
+
+	sna_video_buffer_fini(sna, video);
+
+	if (!REGION_EQUAL(scrn->pScreen, &video->clip, clip)) {
+		REGION_COPY(scrn->pScreen, &video->clip, clip);
+		xf86XVFillKeyHelperDrawable(drawable, video->color_key, clip);
+	}
+
+	return Success;
+}
+
+static int sna_video_sprite_query_attrs(ScrnInfoPtr scrn, int id,
+					unsigned short *w, unsigned short *h,
+					int *pitches, int *offsets)
+{
+	int size;
+
+	if (*w > IMAGE_MAX_WIDTH)
+		*w = IMAGE_MAX_WIDTH;
+	if (*h > IMAGE_MAX_HEIGHT)
+		*h = IMAGE_MAX_HEIGHT;
+
+	*w = (*w + 1) & ~1;
+	if (offsets)
+		offsets[0] = 0;
+
+	switch (id) {
+	case FOURCC_YUY2:
+	default:
+		size = *w << 1;
+		if (pitches)
+			pitches[0] = size;
+		size *= *h;
+		break;
+	}
+
+	return size;
+}
+
+static int sna_video_sprite_color_key(struct sna *sna)
+{
+	ScrnInfoPtr scrn = sna->scrn;
+	int color_key;
+
+	if (xf86GetOptValInteger(sna->Options, OPTION_VIDEO_KEY,
+				 &color_key)) {
+	} else if (xf86GetOptValInteger(sna->Options, OPTION_COLOR_KEY,
+					&color_key)) {
+	} else {
+		color_key =
+		    (1 << scrn->offset.red) |
+		    (1 << scrn->offset.green) |
+		    (((scrn->mask.blue >> scrn->offset.blue) - 1) << scrn->offset.blue);
+	}
+
+	return color_key & ((1 << scrn->depth) - 1);
+}
+
+XF86VideoAdaptorPtr sna_video_sprite_setup(struct sna *sna,
+					   ScreenPtr screen)
+{
+	XF86VideoAdaptorPtr adaptor;
+	struct sna_video *video;
+	drmModePlaneRes *plane_resources;
+
+	plane_resources = drmModeGetPlaneResources(sna->kgem.fd);
+	if (!plane_resources)
+		return NULL;
+
+	adaptor = calloc(1,
+			 sizeof(XF86VideoAdaptorRec) +
+			 sizeof(struct sna_video) +
+			 sizeof(DevUnion));
+	if (!adaptor) {
+		free(plane_resources);
+		return NULL;
+	}
+
+	adaptor->type = XvWindowMask | XvInputMask | XvImageMask;
+	adaptor->flags = VIDEO_OVERLAID_IMAGES /*| VIDEO_CLIP_TO_VIEWPORT */ ;
+	adaptor->name = "Intel(R) Video Sprite";
+	adaptor->nEncodings = ARRAY_SIZE(xv_dummy_encoding);
+	adaptor->pEncodings = xnfalloc(sizeof(xv_dummy_encoding));
+	memcpy(adaptor->pEncodings, xv_dummy_encoding, sizeof(xv_dummy_encoding));
+	adaptor->nFormats = ARRAY_SIZE(xv_formats);
+	adaptor->pFormats = xv_formats;
+	adaptor->nPorts = 1;
+	adaptor->pPortPrivates = (DevUnion *)&adaptor[1];
+
+	video = (struct sna_video *)&adaptor->pPortPrivates[1];
+	adaptor->pPortPrivates[0].ptr = video;
+
+	adaptor->nAttributes = ARRAY_SIZE(attribs);
+	adaptor->pAttributes = attribs;
+
+	adaptor->nImages = ARRAY_SIZE(xv_images);
+	adaptor->pImages = xv_images;
+
+	adaptor->PutVideo = NULL;
+	adaptor->PutStill = NULL;
+	adaptor->GetVideo = NULL;
+	adaptor->GetStill = NULL;
+	adaptor->StopVideo = sna_video_sprite_stop;
+	adaptor->SetPortAttribute = sna_video_sprite_set_attr;
+	adaptor->GetPortAttribute = sna_video_sprite_get_attr;
+	adaptor->QueryBestSize = sna_video_sprite_best_size;
+	adaptor->PutImage = sna_video_sprite_put_image;
+	adaptor->QueryImageAttributes = sna_video_sprite_query_attrs;
+
+	video->textured = FALSE;
+	video->color_key = sna_video_sprite_color_key(sna);
+	video->color_key_changed = TRUE;
+	video->brightness = -19;	/* (255/219) * -16 */
+	video->contrast = 75;	/* 255/219 * 64 */
+	video->saturation = 146;	/* 128/112 * 128 */
+	video->desired_crtc = NULL;
+	video->gamma5 = 0xc0c0c0;
+	video->gamma4 = 0x808080;
+	video->gamma3 = 0x404040;
+	video->gamma2 = 0x202020;
+	video->gamma1 = 0x101010;
+	video->gamma0 = 0x080808;
+
+	video->rotation = RR_Rotate_0;
+
+	REGION_NULL(screen, &video->clip);
+
+	xvColorKey = MAKE_ATOM("XV_COLORKEY");
+
+	free(plane_resources);
+
+	return adaptor;
+}
commit ae8aa172a7330439a8e6dda41f5e33eb257a139b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 28 22:07:10 2012 +0100

    sna: Fix up 32-bit overflow for maximum object size calculation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 964c6e9..27fa165 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -84,7 +84,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
 #define DRM_I915_GEM_VMAP       0x2c
 #define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
-#define I915_PARAM_HAS_VMAP              18
+#define I915_PARAM_HAS_VMAP              19
 struct drm_i915_gem_vmap {
 	uint64_t user_ptr;
 	uint32_t user_size;
@@ -690,7 +690,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		 * disable dual-stream mode */
 		kgem->min_alignment = 64;
 
-	kgem->max_object_size = 2 * kgem->aperture_total / 3;
+	kgem->max_object_size = 2 * aperture.aper_size / 3;
 	kgem->max_gpu_size = kgem->max_object_size;
 	if (!kgem->has_llc)
 		kgem->max_gpu_size = MAX_CACHE_SIZE;
@@ -741,9 +741,11 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	} else
 		kgem->max_cpu_size = 0;
 
+	DBG(("%s: maximum object size=%d\n",
+	     __FUNCTION__, kgem->max_object_size));
 	DBG(("%s: large object thresold=%d\n",
 	     __FUNCTION__, kgem->large_object_size));
-	DBG(("%s: max object size (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
+	DBG(("%s: max object sizes (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
 	     __FUNCTION__,
 	     kgem->max_gpu_size, kgem->max_cpu_size,
 	     kgem->max_upload_tile_size, kgem->max_copy_tile_size));
@@ -2392,11 +2394,16 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 	uint32_t pitch, size;
 	unsigned flags = 0;
 
-	if (depth < 8)
+	if (depth < 8) {
+		DBG(("%s: unhandled depth %d\n", __FUNCTION__, depth));
 		return 0;
+	}
 
-	if (width > MAXSHORT || height > MAXSHORT)
+	if (width > MAXSHORT || height > MAXSHORT) {
+		DBG(("%s: unhandled size %dx%d\n",
+		     __FUNCTION__, width, height));
 		return 0;
+	}
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
@@ -2405,8 +2412,11 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 		flags |= KGEM_CAN_CREATE_CPU | KGEM_CAN_CREATE_GPU;
 	if (size > kgem->large_object_size)
 		flags |= KGEM_CAN_CREATE_LARGE;
-	if (size > kgem->max_object_size)
+	if (size > kgem->max_object_size) {
+		DBG(("%s: too large (untiled) %d > %d\n",
+		     __FUNCTION__, size, kgem->max_object_size));
 		return 0;
+	}
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
@@ -2417,8 +2427,11 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 		flags |= KGEM_CAN_CREATE_GPU;
 	if (size > kgem->large_object_size)
 		flags |= KGEM_CAN_CREATE_LARGE;
-	if (size > kgem->max_object_size)
+	if (size > kgem->max_object_size) {
+		DBG(("%s: too large (tiled) %d > %d\n",
+		     __FUNCTION__, size, kgem->max_object_size));
 		return 0;
+	}
 
 	return flags;
 }
commit 2e7b5f7eafbf452c781e50eba7dc8323260af59e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 27 10:42:59 2012 +0100

    sna/traps: Prefer to try mono spans on the GPU before trying inplace CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index be46789..4067757 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3092,12 +3092,14 @@ composite_unaligned_boxes(struct sna *sna,
 
 	switch (op) {
 	case PictOpAdd:
+	case PictOpOver:
 		if (priv->clear && priv->clear_color == 0)
 			op = PictOpSrc;
 		break;
 	case PictOpIn:
 		if (priv->clear && priv->clear_color == 0)
 			return true;
+		break;
 	}
 
 	memset(&tmp, 0, sizeof(tmp));
@@ -4479,7 +4481,7 @@ sna_composite_trapezoids(CARD8 op,
 
 	/* scan through for fast rectangles */
 	rectilinear = pixel_aligned = true;
-	if (maskFormat ? maskFormat->depth == 1 : dst->polyEdge == PolyEdgeSharp) {
+	if (is_mono(dst, maskFormat)) {
 		for (n = 0; n < ntrap && rectilinear; n++) {
 			int lx1 = pixman_fixed_to_int(traps[n].left.p1.x + pixman_fixed_1_minus_e/2);
 			int lx2 = pixman_fixed_to_int(traps[n].left.p2.x + pixman_fixed_1_minus_e/2);
@@ -4531,6 +4533,13 @@ sna_composite_trapezoids(CARD8 op,
 		}
 		flags |= COMPOSITE_SPANS_RECTILINEAR;
 	}
+
+	if (is_mono(dst, maskFormat) &&
+	    mono_trapezoids_span_converter(op, src, dst,
+					   xSrc, ySrc,
+					   ntrap, traps))
+		return;
+
 	if (trapezoid_spans_maybe_inplace(op, src, dst, maskFormat)) {
 		flags |= COMPOSITE_SPANS_INPLACE_HINT;
 		if (trapezoid_span_inplace(op, src, dst, maskFormat,
commit 522b41b393b979fdccf1ad62194cde8129f98c8c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 27 10:42:21 2012 +0100

    sna/traps: Fix the width of the left-hand edge of an unaligned box
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 9ab5ae2..be46789 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2759,7 +2759,7 @@ composite_unaligned_trap_row(struct sna *sna,
 	} else {
 		if (pixman_fixed_frac(trap->left.p1.x)) {
 			box.x1 = x1;
-			box.x2 = x1++;
+			box.x2 = ++x1;
 
 			opacity = covered;
 			opacity *= SAMPLES_X - grid_coverage(SAMPLES_X, trap->left.p1.x);
commit 008ad39b72eb86dc2f89789427be269c148feaf4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 27 10:41:37 2012 +0100

    sna/gen6: Reduce opaque solid OVER to SRC for render composite
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 6f1b55a..3be9195 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2033,6 +2033,7 @@ gen6_composite_solid_init(struct sna *sna,
 	channel->repeat = RepeatNormal;
 	channel->is_affine = TRUE;
 	channel->is_solid  = TRUE;
+	channel->is_opaque = (color >> 24) == 0xff;
 	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
@@ -2251,7 +2252,7 @@ gen6_composite_picture(struct sna *sna,
 				    x, y, w, h, dst_x, dst_y);
 }
 
-static void gen6_composite_channel_convert(struct sna_composite_channel *channel)
+inline static void gen6_composite_channel_convert(struct sna_composite_channel *channel)
 {
 	channel->repeat = gen6_repeat(channel->repeat);
 	channel->filter = gen6_filter(channel->filter);
@@ -2735,6 +2736,8 @@ gen6_render_composite(struct sna *sna,
 			DBG(("%s: choosing gen6_emit_composite_primitive_solid\n",
 			     __FUNCTION__));
 			tmp->prim_emit = gen6_emit_composite_primitive_solid;
+			if (tmp->src.is_opaque && op == PictOpOver)
+				tmp->op = PictOpSrc;
 		} else if (tmp->src.transform == NULL) {
 			DBG(("%s: choosing gen6_emit_composite_primitive_identity_source\n",
 			     __FUNCTION__));
commit a9e9041934b2486e5661ba6c550e6670009f69f3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 26 16:03:42 2012 +0100

    sna: Remove bogus assertion of no references to in-flight upload buffers
    
    As we may hold a cached reference to an upload buffer whilst it is
    in-flight, the assertion that there are no such references to a buffer
    being reused is no longer true. Those cached references will be released
    as soon as we retire the buffer during the readback and so we are free
    to reuse such an upload buffer for immediate readback.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 26abdd0..964c6e9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3502,7 +3502,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
 		    !bo->mmapped && size <= bytes(&bo->base)) {
-			assert(bo->base.refcnt == 1);
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 			gem_write(kgem->fd, bo->base.handle,
commit 89091dd8a3fba6520a651e3e135c87546b81df9b
Author: Paulo Zanoni <paulo.r.zanoni at intel.com>
Date:   Tue Mar 20 11:53:21 2012 -0300

    Avoid duplicated code with intel_output_create_ranged_atom
    
    Same change for intel_display.c and sna_display.c.
    
    Signed-off-by: Paulo Zanoni <paulo.r.zanoni at intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_display.c b/src/intel_display.c
index 11d0e2b..abdc372 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -992,6 +992,33 @@ intel_property_ignore(drmModePropertyPtr prop)
 	return FALSE;
 }
 
+static void
+intel_output_create_ranged_atom(xf86OutputPtr output, Atom *atom,
+				const char *name, INT32 min, INT32 max,
+				uint64_t value, Bool immutable)
+{
+	int err;
+	INT32 atom_range[2];
+
+	atom_range[0] = min;
+	atom_range[1] = max;
+
+	*atom = MakeAtom(name, strlen(name), TRUE);
+
+	err = RRConfigureOutputProperty(output->randr_output, *atom, FALSE,
+					TRUE, immutable, 2, atom_range);
+	if (err != 0)
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "RRConfigureOutputProperty error, %d\n", err);
+
+	err = RRChangeOutputProperty(output->randr_output, *atom, XA_INTEGER,
+				     32, PropModeReplace, 1, &value, FALSE,
+				     TRUE);
+	if (err != 0)
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "RRChangeOutputProperty error, %d\n", err);
+}
+
 #define BACKLIGHT_NAME             "Backlight"
 #define BACKLIGHT_DEPRECATED_NAME  "BACKLIGHT"
 static Atom backlight_atom, backlight_deprecated_atom;
@@ -1031,30 +1058,18 @@ intel_output_create_resources(xf86OutputPtr output)
 		drmModePropertyPtr drmmode_prop = p->mode_prop;
 
 		if (drmmode_prop->flags & DRM_MODE_PROP_RANGE) {
-			INT32 range[2];
-
 			p->num_atoms = 1;
 			p->atoms = calloc(p->num_atoms, sizeof(Atom));
 			if (!p->atoms)
 				continue;
 
-			p->atoms[0] = MakeAtom(drmmode_prop->name, strlen(drmmode_prop->name), TRUE);
-			range[0] = drmmode_prop->values[0];
-			range[1] = drmmode_prop->values[1];
-			err = RRConfigureOutputProperty(output->randr_output, p->atoms[0],
-							FALSE, TRUE,
-							drmmode_prop->flags & DRM_MODE_PROP_IMMUTABLE ? TRUE : FALSE,
-							2, range);
-			if (err != 0) {
-				xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-					   "RRConfigureOutputProperty error, %d\n", err);
-			}
-			err = RRChangeOutputProperty(output->randr_output, p->atoms[0],
-						     XA_INTEGER, 32, PropModeReplace, 1, &p->value, FALSE, TRUE);
-			if (err != 0) {
-				xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-					   "RRChangeOutputProperty error, %d\n", err);
-			}
+			intel_output_create_ranged_atom(output, &p->atoms[0],
+							drmmode_prop->name,
+							drmmode_prop->values[0],
+							drmmode_prop->values[1],
+							p->value,
+							drmmode_prop->flags & DRM_MODE_PROP_IMMUTABLE ? TRUE : FALSE);
+
 		} else if (drmmode_prop->flags & DRM_MODE_PROP_ENUM) {
 			p->num_atoms = drmmode_prop->count_enums + 1;
 			p->atoms = calloc(p->num_atoms, sizeof(Atom));
@@ -1090,50 +1105,21 @@ intel_output_create_resources(xf86OutputPtr output)
 	}
 
 	if (intel_output->backlight_iface) {
-		INT32 data, backlight_range[2];
-
 		/* Set up the backlight property, which takes effect
 		 * immediately and accepts values only within the
 		 * backlight_range.
 		 */
-		backlight_atom = MakeAtom(BACKLIGHT_NAME, sizeof(BACKLIGHT_NAME) - 1, TRUE);
-		backlight_deprecated_atom = MakeAtom(BACKLIGHT_DEPRECATED_NAME,
-						     sizeof(BACKLIGHT_DEPRECATED_NAME) - 1, TRUE);
-
-		backlight_range[0] = 0;
-		backlight_range[1] = intel_output->backlight_max;
-		err = RRConfigureOutputProperty(output->randr_output,
-					       	backlight_atom,
-						FALSE, TRUE, FALSE,
-					       	2, backlight_range);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRConfigureOutputProperty error, %d\n", err);
-		}
-		err = RRConfigureOutputProperty(output->randr_output,
-					       	backlight_deprecated_atom,
-						FALSE, TRUE, FALSE,
-					       	2, backlight_range);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRConfigureOutputProperty error, %d\n", err);
-		}
-		/* Set the current value of the backlight property */
-		data = intel_output->backlight_active_level;
-		err = RRChangeOutputProperty(output->randr_output, backlight_atom,
-					     XA_INTEGER, 32, PropModeReplace, 1, &data,
-					     FALSE, TRUE);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRChangeOutputProperty error, %d\n", err);
-		}
-		err = RRChangeOutputProperty(output->randr_output, backlight_deprecated_atom,
-					     XA_INTEGER, 32, PropModeReplace, 1, &data,
-					     FALSE, TRUE);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRChangeOutputProperty error, %d\n", err);
-		}
+		intel_output_create_ranged_atom(output, &backlight_atom,
+					BACKLIGHT_NAME, 0,
+					intel_output->backlight_max,
+					intel_output->backlight_active_level,
+					FALSE);
+		intel_output_create_ranged_atom(output,
+					&backlight_deprecated_atom,
+					BACKLIGHT_DEPRECATED_NAME, 0,
+					intel_output->backlight_max,
+					intel_output->backlight_active_level,
+					FALSE);
 	}
 }
 
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 9401ca4..a5d69dd 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1205,6 +1205,33 @@ sna_property_ignore(drmModePropertyPtr prop)
 	return FALSE;
 }
 
+static void
+sna_output_create_ranged_atom(xf86OutputPtr output, Atom *atom,
+			      const char *name, INT32 min, INT32 max,
+			      uint64_t value, Bool immutable)
+{
+	int err;
+	INT32 atom_range[2];
+
+	atom_range[0] = min;
+	atom_range[1] = max;
+
+	*atom = MakeAtom(name, strlen(name), TRUE);
+
+	err = RRConfigureOutputProperty(output->randr_output, *atom, FALSE,
+					TRUE, immutable, 2, atom_range);
+	if (err != 0)
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "RRConfigureOutputProperty error, %d\n", err);
+
+	err = RRChangeOutputProperty(output->randr_output, *atom, XA_INTEGER,
+				     32, PropModeReplace, 1, &value, FALSE,
+				     TRUE);
+	if (err != 0)
+		xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
+			   "RRChangeOutputProperty error, %d\n", err);
+}
+
 #define BACKLIGHT_NAME             "Backlight"
 #define BACKLIGHT_DEPRECATED_NAME  "BACKLIGHT"
 static Atom backlight_atom, backlight_deprecated_atom;
@@ -1244,30 +1271,18 @@ sna_output_create_resources(xf86OutputPtr output)
 		drmModePropertyPtr drmmode_prop = p->mode_prop;
 
 		if (drmmode_prop->flags & DRM_MODE_PROP_RANGE) {
-			INT32 range[2];
-
 			p->num_atoms = 1;
 			p->atoms = calloc(p->num_atoms, sizeof(Atom));
 			if (!p->atoms)
 				continue;
 
-			p->atoms[0] = MakeAtom(drmmode_prop->name, strlen(drmmode_prop->name), TRUE);
-			range[0] = drmmode_prop->values[0];
-			range[1] = drmmode_prop->values[1];
-			err = RRConfigureOutputProperty(output->randr_output, p->atoms[0],
-							FALSE, TRUE,
-							drmmode_prop->flags & DRM_MODE_PROP_IMMUTABLE ? TRUE : FALSE,
-							2, range);
-			if (err != 0) {
-				xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-					   "RRConfigureOutputProperty error, %d\n", err);
-			}
-			err = RRChangeOutputProperty(output->randr_output, p->atoms[0],
-						     XA_INTEGER, 32, PropModeReplace, 1, &p->value, FALSE, TRUE);
-			if (err != 0) {
-				xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-					   "RRChangeOutputProperty error, %d\n", err);
-			}
+			sna_output_create_ranged_atom(output, &p->atoms[0],
+						      drmmode_prop->name,
+						      drmmode_prop->values[0],
+						      drmmode_prop->values[1],
+						      p->value,
+						      drmmode_prop->flags & DRM_MODE_PROP_IMMUTABLE ? TRUE : FALSE);
+
 		} else if (drmmode_prop->flags & DRM_MODE_PROP_ENUM) {
 			p->num_atoms = drmmode_prop->count_enums + 1;
 			p->atoms = calloc(p->num_atoms, sizeof(Atom));
@@ -1303,50 +1318,21 @@ sna_output_create_resources(xf86OutputPtr output)
 	}
 
 	if (sna_output->backlight_iface) {
-		INT32 data, backlight_range[2];
-
 		/* Set up the backlight property, which takes effect
 		 * immediately and accepts values only within the
 		 * backlight_range.
 		 */
-		backlight_atom = MakeAtom(BACKLIGHT_NAME, sizeof(BACKLIGHT_NAME) - 1, TRUE);
-		backlight_deprecated_atom = MakeAtom(BACKLIGHT_DEPRECATED_NAME,
-						     sizeof(BACKLIGHT_DEPRECATED_NAME) - 1, TRUE);
-
-		backlight_range[0] = 0;
-		backlight_range[1] = sna_output->backlight_max;
-		err = RRConfigureOutputProperty(output->randr_output,
-					       	backlight_atom,
-						FALSE, TRUE, FALSE,
-					       	2, backlight_range);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRConfigureOutputProperty error, %d\n", err);
-		}
-		err = RRConfigureOutputProperty(output->randr_output,
-					       	backlight_deprecated_atom,
-						FALSE, TRUE, FALSE,
-					       	2, backlight_range);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRConfigureOutputProperty error, %d\n", err);
-		}
-		/* Set the current value of the backlight property */
-		data = sna_output->backlight_active_level;
-		err = RRChangeOutputProperty(output->randr_output, backlight_atom,
-					     XA_INTEGER, 32, PropModeReplace, 1, &data,
-					     FALSE, TRUE);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRChangeOutputProperty error, %d\n", err);
-		}
-		err = RRChangeOutputProperty(output->randr_output, backlight_deprecated_atom,
-					     XA_INTEGER, 32, PropModeReplace, 1, &data,
-					     FALSE, TRUE);
-		if (err != 0) {
-			xf86DrvMsg(output->scrn->scrnIndex, X_ERROR,
-				   "RRChangeOutputProperty error, %d\n", err);
-		}
+		sna_output_create_ranged_atom(output, &backlight_atom,
+					BACKLIGHT_NAME, 0,
+					sna_output->backlight_max,
+					sna_output->backlight_active_level,
+					FALSE);
+		sna_output_create_ranged_atom(output,
+					&backlight_deprecated_atom,
+					BACKLIGHT_DEPRECATED_NAME, 0,
+					sna_output->backlight_max,
+					sna_output->backlight_active_level,
+					FALSE);
 	}
 }
 
commit 62f9833298ea936eaefe973eca04b1dde858fbd8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 25 21:25:15 2012 +0100

    sna/gen2+: Approximate expensive gradients when using imprecise rendering
    
    If we lack the ability to use a shader to compute the gradients
    per-pixel, we need to use pixman to render a fallback texture. We can
    reduce the size of this texture and upsample to reduce the cost with
    hopefully imperceptible loss of quality.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 6907dd6..0ad346e 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1310,7 +1310,8 @@ gen2_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int x, int y,
 		       int w, int h,
-		       int dst_x, int dst_y)
+		       int dst_x, int dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -1343,6 +1344,8 @@ gen2_composite_picture(struct sna *sna,
 	}
 
 	if (picture->pDrawable == NULL) {
+		int ret;
+
 		if (picture->pSourcePict->type == SourcePictTypeLinear)
 			return gen2_composite_linear_init(sna, picture, channel,
 							  x, y,
@@ -1351,8 +1354,14 @@ gen2_composite_picture(struct sna *sna,
 
 		DBG(("%s -- fallback, unhandled source %d\n",
 		     __FUNCTION__, picture->pSourcePict->type));
-		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, w, h, dst_x, dst_y);
+		ret = -1;
+		if (!precise)
+			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+								      x, y, w, h, dst_x, dst_y);
+		if (ret == -1)
+			ret = sna_render_picture_fixup(sna, picture, channel,
+						       x, y, w, h, dst_x, dst_y);
+		return ret;
 	}
 
 	if (picture->alphaMap) {
@@ -1765,7 +1774,8 @@ gen2_render_composite(struct sna *sna,
 	switch (gen2_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
@@ -1781,7 +1791,8 @@ gen2_render_composite(struct sna *sna,
 			switch (gen2_composite_picture(sna, mask, &tmp->mask,
 						       mask_x, mask_y,
 						       width,  height,
-						       dst_x,  dst_y)) {
+						       dst_x,  dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				goto cleanup_src;
 			case 0:
@@ -2229,7 +2240,8 @@ gen2_render_composite_spans(struct sna *sna,
 	switch (gen2_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b236761..e798096 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2194,7 +2194,7 @@ gen3_init_linear(struct sna *sna,
 	op->u.gen3.constants[n++] = 0;
 
 	if (!gen3_gradient_setup(sna, picture, channel, ox, oy))
-		return 0;
+		return -1;
 
 	channel->u.gen3.type = SHADER_LINEAR;
 	op->u.gen3.num_constants = n;
@@ -2250,7 +2250,7 @@ gen3_init_radial(struct sna *sna,
 	}
 
 	if (!gen3_gradient_setup(sna, picture, channel, ox, oy))
-		return 0;
+		return -1;
 
 	channel->u.gen3.type = SHADER_RADIAL;
 	op->u.gen3.num_constants = n;
@@ -2285,7 +2285,8 @@ gen3_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int16_t x, int16_t y,
 		       int16_t w, int16_t h,
-		       int16_t dst_x, int16_t dst_y)
+		       int16_t dst_x, int16_t dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -2298,7 +2299,7 @@ gen3_composite_picture(struct sna *sna,
 
 	if (picture->pDrawable == NULL) {
 		SourcePict *source = picture->pSourcePict;
-		int ret = 0;
+		int ret = -1;
 
 		switch (source->type) {
 		case SourcePictTypeSolidFill:
@@ -2316,9 +2317,14 @@ gen3_composite_picture(struct sna *sna,
 			break;
 		}
 
-		if (ret == 0)
-			ret = sna_render_picture_fixup(sna, picture, channel,
-						       x, y, w, h, dst_x, dst_y);
+		if (ret == -1) {
+			if (!precise)
+				ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+									      x, y, w, h, dst_x, dst_y);
+			if (ret == -1)
+				ret = sna_render_picture_fixup(sna, picture, channel,
+							       x, y, w, h, dst_x, dst_y);
+		}
 		return ret;
 	}
 
@@ -2815,7 +2821,8 @@ gen3_render_composite(struct sna *sna,
 	switch (gen3_composite_picture(sna, src, tmp, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
@@ -2840,7 +2847,8 @@ gen3_render_composite(struct sna *sna,
 			switch (gen3_composite_picture(sna, mask, tmp, &tmp->mask,
 						       mask_x, mask_y,
 						       width,  height,
-						       dst_x,  dst_y)) {
+						       dst_x,  dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				goto cleanup_src;
 			case 0:
@@ -3379,7 +3387,8 @@ gen3_render_composite_spans(struct sna *sna,
 	switch (gen3_composite_picture(sna, src, &tmp->base, &tmp->base.src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index def5d19..2e78a92 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1957,7 +1957,8 @@ gen4_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int x, int y,
 		       int w, int h,
-		       int dst_x, int dst_y)
+		       int dst_x, int dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -1973,6 +1974,8 @@ gen4_composite_picture(struct sna *sna,
 		return gen4_composite_solid_init(sna, channel, color);
 
 	if (picture->pDrawable == NULL) {
+		int ret;
+
 		if (picture->pSourcePict->type == SourcePictTypeLinear)
 			return gen4_composite_linear_init(sna, picture, channel,
 							  x, y,
@@ -1980,8 +1983,14 @@ gen4_composite_picture(struct sna *sna,
 							  dst_x, dst_y);
 
 		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
-		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, w, h, dst_x, dst_y);
+		ret = -1;
+		if (!precise)
+			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+								      x, y, w, h, dst_x, dst_y);
+		if (ret == -1)
+			ret = sna_render_picture_fixup(sna, picture, channel,
+						       x, y, w, h, dst_x, dst_y);
+		return ret;
 	}
 
 	if (picture->alphaMap) {
@@ -2404,7 +2413,8 @@ gen4_render_composite(struct sna *sna,
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		DBG(("%s: failed to prepare source\n", __FUNCTION__));
 		goto cleanup_dst;
@@ -2449,7 +2459,8 @@ gen4_render_composite(struct sna *sna,
 			switch (gen4_composite_picture(sna, mask, &tmp->mask,
 						       msk_x, msk_y,
 						       width, height,
-						       dst_x, dst_y)) {
+						       dst_x, dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				DBG(("%s: failed to prepare mask\n", __FUNCTION__));
 				goto cleanup_src;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 565d22a..c27accd 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1990,7 +1990,8 @@ gen5_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int x, int y,
 		       int w, int h,
-		       int dst_x, int dst_y)
+		       int dst_x, int dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -2006,6 +2007,8 @@ gen5_composite_picture(struct sna *sna,
 		return gen5_composite_solid_init(sna, channel, color);
 
 	if (picture->pDrawable == NULL) {
+		int ret;
+
 		if (picture->pSourcePict->type == SourcePictTypeLinear)
 			return gen5_composite_linear_init(sna, picture, channel,
 							  x, y,
@@ -2013,8 +2016,14 @@ gen5_composite_picture(struct sna *sna,
 							  dst_x, dst_y);
 
 		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
-		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, w, h, dst_x, dst_y);
+		ret = -1;
+		if (!precise)
+			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+								      x, y, w, h, dst_x, dst_y);
+		if (ret == -1)
+			ret = sna_render_picture_fixup(sna, picture, channel,
+						       x, y, w, h, dst_x, dst_y);
+		return ret;
 	}
 
 	if (picture->alphaMap) {
@@ -2444,7 +2453,8 @@ gen5_render_composite(struct sna *sna,
 	switch (gen5_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		DBG(("%s: failed to prepare source picture\n", __FUNCTION__));
 		goto cleanup_dst;
@@ -2488,7 +2498,8 @@ gen5_render_composite(struct sna *sna,
 			switch (gen5_composite_picture(sna, mask, &tmp->mask,
 						       msk_x, msk_y,
 						       width, height,
-						       dst_x, dst_y)) {
+						       dst_x, dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				DBG(("%s: failed to prepare mask picture\n", __FUNCTION__));
 				goto cleanup_src;
@@ -2800,7 +2811,8 @@ gen5_render_composite_spans(struct sna *sna,
 	switch (gen5_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index fde0776..6f1b55a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2165,7 +2165,8 @@ gen6_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int x, int y,
 		       int w, int h,
-		       int dst_x, int dst_y)
+		       int dst_x, int dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -2181,6 +2182,8 @@ gen6_composite_picture(struct sna *sna,
 		return gen6_composite_solid_init(sna, channel, color);
 
 	if (picture->pDrawable == NULL) {
+		int ret;
+
 		if (picture->pSourcePict->type == SourcePictTypeLinear)
 			return gen6_composite_linear_init(sna, picture, channel,
 							  x, y,
@@ -2188,8 +2191,14 @@ gen6_composite_picture(struct sna *sna,
 							  dst_x, dst_y);
 
 		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
-		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, w, h, dst_x, dst_y);
+		ret = -1;
+		if (!precise)
+			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+								      x, y, w, h, dst_x, dst_y);
+		if (ret == -1)
+			ret = sna_render_picture_fixup(sna, picture, channel,
+						       x, y, w, h, dst_x, dst_y);
+		return ret;
 	}
 
 	if (picture->alphaMap) {
@@ -2646,7 +2655,8 @@ gen6_render_composite(struct sna *sna,
 	switch (gen6_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
@@ -2702,7 +2712,8 @@ gen6_render_composite(struct sna *sna,
 			switch (gen6_composite_picture(sna, mask, &tmp->mask,
 						       msk_x, msk_y,
 						       width, height,
-						       dst_x, dst_y)) {
+						       dst_x, dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				goto cleanup_src;
 			case 0:
@@ -3089,7 +3100,8 @@ gen6_render_composite_spans(struct sna *sna,
 	switch (gen6_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 36ea8a1..1491167 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2270,7 +2270,8 @@ gen7_composite_picture(struct sna *sna,
 		       struct sna_composite_channel *channel,
 		       int x, int y,
 		       int w, int h,
-		       int dst_x, int dst_y)
+		       int dst_x, int dst_y,
+		       bool precise)
 {
 	PixmapPtr pixmap;
 	uint32_t color;
@@ -2286,6 +2287,8 @@ gen7_composite_picture(struct sna *sna,
 		return gen7_composite_solid_init(sna, channel, color);
 
 	if (picture->pDrawable == NULL) {
+		int ret;
+
 		if (picture->pSourcePict->type == SourcePictTypeLinear)
 			return gen7_composite_linear_init(sna, picture, channel,
 							  x, y,
@@ -2293,8 +2296,14 @@ gen7_composite_picture(struct sna *sna,
 							  dst_x, dst_y);
 
 		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
-		return sna_render_picture_fixup(sna, picture, channel,
-						x, y, w, h, dst_x, dst_y);
+		ret = -1;
+		if (!precise)
+			ret = sna_render_picture_approximate_gradient(sna, picture, channel,
+								      x, y, w, h, dst_x, dst_y);
+		if (ret == -1)
+			ret = sna_render_picture_fixup(sna, picture, channel,
+						       x, y, w, h, dst_x, dst_y);
+		return ret;
 	}
 
 	if (picture->alphaMap) {
@@ -2732,7 +2741,8 @@ gen7_render_composite(struct sna *sna,
 	switch (gen7_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
@@ -2788,7 +2798,8 @@ gen7_render_composite(struct sna *sna,
 			switch (gen7_composite_picture(sna, mask, &tmp->mask,
 						       msk_x, msk_y,
 						       width, height,
-						       dst_x, dst_y)) {
+						       dst_x, dst_y,
+						       dst->polyMode == PolyModePrecise)) {
 			case -1:
 				goto cleanup_src;
 			case 0:
@@ -3185,7 +3196,8 @@ gen7_render_composite_spans(struct sna *sna,
 	switch (gen7_composite_picture(sna, src, &tmp->base.src,
 				       src_x, src_y,
 				       width, height,
-				       dst_x, dst_y)) {
+				       dst_x, dst_y,
+				       dst->polyMode == PolyModePrecise)) {
 	case -1:
 		goto cleanup_dst;
 	case 0:
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 542cdb9..d774a34 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1369,6 +1369,92 @@ sna_render_picture_flatten(struct sna *sna,
 }
 
 int
+sna_render_picture_approximate_gradient(struct sna *sna,
+					PicturePtr picture,
+					struct sna_composite_channel *channel,
+					int16_t x, int16_t y,
+					int16_t w, int16_t h,
+					int16_t dst_x, int16_t dst_y)
+{
+	pixman_image_t *dst, *src;
+	pixman_transform_t t;
+	int w2 = w/2, h2 = h/2;
+	int dx, dy;
+	void *ptr;
+
+#if NO_FIXUP
+	return -1;
+#endif
+
+	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
+
+	if (w2 == 0 || h2 == 0) {
+		DBG(("%s: fallback - unknown bounds\n", __FUNCTION__));
+		return -1;
+	}
+	if (w2 > sna->render.max_3d_size || h2 > sna->render.max_3d_size) {
+		DBG(("%s: fallback - too large (%dx%d)\n", __FUNCTION__, w, h));
+		return -1;
+	}
+
+	channel->pict_format = PIXMAN_a8r8g8b8;
+	channel->bo = kgem_create_buffer_2d(&sna->kgem,
+					    w2, h2, 32,
+					    KGEM_BUFFER_WRITE_INPLACE,
+					    &ptr);
+	if (!channel->bo) {
+		DBG(("%s: failed to create upload buffer, using clear\n",
+		     __FUNCTION__));
+		return 0;
+	}
+
+	dst = pixman_image_create_bits(PIXMAN_a8r8g8b8,
+				       w2, h2, ptr, channel->bo->pitch);
+	if (!dst) {
+		kgem_bo_destroy(&sna->kgem, channel->bo);
+		return 0;
+	}
+
+	src = image_from_pict(picture, FALSE, &dx, &dy);
+	if (src == NULL) {
+		pixman_image_unref(dst);
+		kgem_bo_destroy(&sna->kgem, channel->bo);
+		return 0;
+	}
+
+	memset(&t, 0, sizeof(t));
+	t.matrix[0][0] = (w << 16) / w2;
+	t.matrix[1][1] = (h << 16) / h2;
+	t.matrix[2][2] = 1 << 16;
+	if (picture->transform)
+		pixman_transform_multiply(&t, picture->transform, &t);
+	pixman_image_set_transform(src, &t);
+
+	pixman_image_composite(PictOpSrc, src, NULL, dst,
+			       x + dx, y + dy,
+			       0, 0,
+			       0, 0,
+			       w2, h2);
+	free_pixman_pict(picture, src);
+	pixman_image_unref(dst);
+
+	channel->width  = w2;
+	channel->height = h2;
+
+	channel->filter = PictFilterNearest;
+	channel->repeat = RepeatNone;
+	channel->is_affine = TRUE;
+
+	channel->scale[0] = 1.f/w;
+	channel->scale[1] = 1.f/h;
+	channel->offset[0] = -dst_x;
+	channel->offset[1] = -dst_y;
+	channel->transform = NULL;
+
+	return 1;
+}
+
+int
 sna_render_picture_fixup(struct sna *sna,
 			 PicturePtr picture,
 			 struct sna_composite_channel *channel,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 8d3d9e4..a83f78e 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -615,6 +615,14 @@ sna_render_picture_extract(struct sna *sna,
 			   int16_t dst_x, int16_t dst_y);
 
 int
+sna_render_picture_approximate_gradient(struct sna *sna,
+					PicturePtr picture,
+					struct sna_composite_channel *channel,
+					int16_t x, int16_t y,
+					int16_t w, int16_t h,
+					int16_t dst_x, int16_t dst_y);
+
+int
 sna_render_picture_fixup(struct sna *sna,
 			 PicturePtr picture,
 			 struct sna_composite_channel *channel,
commit 25807f472d051163ed96556a409110fa405c24d1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 23 14:56:06 2012 +0000

    uxa: Remove hook for CompositeRectangles
    
    It was broken and not flushing damage correctly. With the
    improvements made to the kernel, it is no longer a significant advantage
    per se and not worth its additional complexity.
    
    Reported-by: Tilman Sauerbeck <tilman at code-monkey.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32547
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/uxa/uxa-priv.h b/uxa/uxa-priv.h
index 0de45f5..b24ec4f 100644
--- a/uxa/uxa-priv.h
+++ b/uxa/uxa-priv.h
@@ -123,7 +123,6 @@ typedef struct {
 	BitmapToRegionProcPtr SavedBitmapToRegion;
 #ifdef RENDER
 	CompositeProcPtr SavedComposite;
-	CompositeRectsProcPtr SavedCompositeRects;
 	TrianglesProcPtr SavedTriangles;
 	GlyphsProcPtr SavedGlyphs;
 	TrapezoidsProcPtr SavedTrapezoids;
diff --git a/uxa/uxa-render.c b/uxa/uxa-render.c
index 877b286..1e88c5d 100644
--- a/uxa/uxa-render.c
+++ b/uxa/uxa-render.c
@@ -947,195 +947,6 @@ uxa_acquire_mask(ScreenPtr screen,
 				    out_x, out_y);
 }
 
-static Bool
-_pixman_region_init_rectangles(pixman_region16_t *region,
-			       int num_rects,
-			       xRectangle *rects,
-			       int tx, int ty)
-{
-	pixman_box16_t stack_boxes[64], *boxes = stack_boxes;
-	pixman_bool_t ret;
-	int i;
-
-	if (num_rects > sizeof(stack_boxes) / sizeof(stack_boxes[0])) {
-		boxes = malloc(sizeof(pixman_box16_t) * num_rects);
-		if (boxes == NULL)
-			return FALSE;
-	}
-
-	for (i = 0; i < num_rects; i++) {
-		boxes[i].x1 = rects[i].x + tx;
-		boxes[i].y1 = rects[i].y + ty;
-		boxes[i].x2 = rects[i].x + tx + rects[i].width;
-		boxes[i].y2 = rects[i].y + ty + rects[i].height;
-	}
-
-	ret = pixman_region_init_rects(region, boxes, num_rects);
-
-	if (boxes != stack_boxes)
-		free(boxes);
-
-	return ret;
-}
-
-void
-uxa_solid_rects (CARD8		op,
-		 PicturePtr	dst,
-		 xRenderColor  *color,
-		 int		num_rects,
-		 xRectangle    *rects)
-{
-	ScreenPtr screen = dst->pDrawable->pScreen;
-	uxa_screen_t *uxa_screen = uxa_get_screen(screen);
-	PixmapPtr dst_pixmap, src_pixmap = NULL;
-	pixman_region16_t region;
-	pixman_box16_t *boxes, *extents;
-	PicturePtr src;
-	int dst_x, dst_y;
-	int num_boxes;
-
-	if (!pixman_region_not_empty(dst->pCompositeClip))
-		return;
-
-	if (uxa_screen->info->flags & UXA_USE_GLAMOR) {
-		int ok;
-
-		uxa_picture_prepare_access(dst, UXA_GLAMOR_ACCESS_RW);
-		ok = glamor_composite_rects_nf(op, dst, color,
-					       num_rects, rects);
-		uxa_picture_finish_access(dst, UXA_GLAMOR_ACCESS_RW);
-
-		if (!ok)
-			goto fallback;
-
-		return;
-	}
-
-	if (dst->alphaMap)
-		goto fallback;
-
-	dst_pixmap = uxa_get_offscreen_pixmap(dst->pDrawable, &dst_x, &dst_y);
-	if (!dst_pixmap)
-		goto fallback;
-
-	if (!_pixman_region_init_rectangles(&region,
-					    num_rects, rects,
-					    dst->pDrawable->x, dst->pDrawable->y))
-		goto fallback;
-
-	if (!pixman_region_intersect(&region, &region, dst->pCompositeClip)) {
-		pixman_region_fini(&region);
-		return;
-	}
-
-	pixman_region_translate(&region, dst_x, dst_y);
-	boxes = pixman_region_rectangles(&region, &num_boxes);
-	extents = pixman_region_extents (&region);
-
-	if (op == PictOpClear)
-		color->red = color->green = color->blue = color->alpha = 0;
-	if (color->alpha >= 0xff00 && op == PictOpOver) {
-		color->alpha = 0xffff;
-		op = PictOpSrc;
-	}
-
-	/* Using GEM, the relocation costs outweigh the advantages of the blitter */
-	if (num_boxes == 1 && (op == PictOpSrc || op == PictOpClear)) {
-		CARD32 pixel;
-
-try_solid:
-		if (uxa_screen->info->check_solid &&
-		    !uxa_screen->info->check_solid(&dst_pixmap->drawable, GXcopy, FB_ALLONES))
-			goto err_region;
-
-		if (!uxa_get_pixel_from_rgba(&pixel,
-					     color->red,
-					     color->green,
-					     color->blue,
-					     color->alpha,
-					     dst->format))
-			goto err_region;
-
-		if (!uxa_screen->info->prepare_solid(dst_pixmap, GXcopy, FB_ALLONES, pixel))
-			goto err_region;
-
-		while (num_boxes--) {
-			uxa_screen->info->solid(dst_pixmap,
-						boxes->x1, boxes->y1,
-						boxes->x2, boxes->y2);
-			boxes++;
-		}
-
-		uxa_screen->info->done_solid(dst_pixmap);
-	} else {
-		int error;
-
-		src = CreateSolidPicture(0, color, &error);
-		if (!src)
-			goto err_region;
-
-		if (!uxa_screen->info->check_composite(op, src, NULL, dst,
-						       extents->x2 - extents->x1,
-						       extents->y2 - extents->y1)) {
-			if (op == PictOpSrc || op == PictOpClear) {
-				FreePicture(src, 0);
-				goto try_solid;
-			}
-
-			goto err_src;
-		}
-
-		if (!uxa_screen->info->check_composite_texture ||
-		    !uxa_screen->info->check_composite_texture(screen, src)) {
-			PicturePtr solid;
-			int src_off_x, src_off_y;
-
-			solid = uxa_acquire_solid(screen, src->pSourcePict);
-			if (!solid)
-				goto err_src;
-			FreePicture(src, 0);
-
-			src = solid;
-			src_pixmap = uxa_get_offscreen_pixmap(src->pDrawable,
-							      &src_off_x, &src_off_y);
-			if (!src_pixmap)
-				goto err_src;
-		}
-
-		if (!uxa_screen->info->prepare_composite(op, src, NULL, dst, src_pixmap, NULL, dst_pixmap))
-			goto err_src;
-
-		while (num_boxes--) {
-			uxa_screen->info->composite(dst_pixmap,
-						    0, 0, 0, 0,
-						    boxes->x1,
-						    boxes->y1,
-						    boxes->x2 - boxes->x1,
-						    boxes->y2 - boxes->y1);
-			boxes++;
-		}
-
-		uxa_screen->info->done_composite(dst_pixmap);
-		FreePicture(src, 0);
-	}
-
-	/* XXX xserver-1.8: CompositeRects is not tracked by Damage, so we must
-	 * manually append the damaged regions ourselves.
-	 */
-	pixman_region_translate(&region, -dst_x, -dst_y);
-	DamageRegionAppend(dst->pDrawable, &region);
-
-	pixman_region_fini(&region);
-	return;
-
-err_src:
-	FreePicture(src, 0);
-err_region:
-	pixman_region_fini(&region);
-fallback:
-	uxa_screen->SavedCompositeRects(op, dst, color, num_rects, rects);
-}
-
 static int
 uxa_try_driver_composite(CARD8 op,
 			 PicturePtr pSrc,
diff --git a/uxa/uxa.c b/uxa/uxa.c
index eb2ae03..b4a1da6 100644
--- a/uxa/uxa.c
+++ b/uxa/uxa.c
@@ -407,7 +407,6 @@ static Bool uxa_close_screen(int i, ScreenPtr pScreen)
 #ifdef RENDER
 	if (ps) {
 		ps->Composite = uxa_screen->SavedComposite;
-		ps->CompositeRects = uxa_screen->SavedCompositeRects;
 		ps->Glyphs = uxa_screen->SavedGlyphs;
 		ps->Trapezoids = uxa_screen->SavedTrapezoids;
 		ps->AddTraps = uxa_screen->SavedAddTraps;
@@ -536,9 +535,6 @@ Bool uxa_driver_init(ScreenPtr screen, uxa_driver_t * uxa_driver)
 			uxa_screen->SavedComposite = ps->Composite;
 			ps->Composite = uxa_composite;
 
-			uxa_screen->SavedCompositeRects = ps->CompositeRects;
-			ps->CompositeRects = uxa_solid_rects;
-
 			uxa_screen->SavedGlyphs = ps->Glyphs;
 			ps->Glyphs = uxa_glyphs;
 
commit 4460c6d0d30cf20b76854d47fd9e3aecf1839f15
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 23 11:05:55 2012 +0000

    configure: Stop the debug build erroring out if it cannot find valgrind
    
    Another case where I passed an empty string believing that would be
    sufficient to replace the error path...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 7ca3075..b5919bf 100644
--- a/configure.ac
+++ b/configure.ac
@@ -254,8 +254,10 @@ if test "x$DEBUG" = xno; then
 fi
 if test "x$DEBUG" != xno; then
 	AC_DEFINE(HAS_EXTRA_DEBUG,1,[Enable additional debugging])
-	PKG_CHECK_MODULES(VALGRIND, [valgrind],
-			  AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warings]),)
+	PKG_CHECK_MODULES(VALGRIND, [valgrind], have_valgrind=yes, have_valgrind=no)
+	if test x$have_valgrind = xyes; then
+		AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
+	fi
 fi
 if test "x$DEBUG" = xfull; then
 	AC_DEFINE(HAS_DEBUG_FULL,1,[Enable all debugging])
commit 168ecd96f23756e185af627f814f81b8a4c0c529
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 22 23:47:56 2012 +0000

    sna: Make the fallback debugging messages more consistent
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 424c8ad..bf76948 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -147,22 +147,22 @@ static void __sna_fallback_flush(DrawablePtr d)
 static int sna_font_key;
 
 static const uint8_t copy_ROP[] = {
-	ROP_0,                  /* GXclear */
-	ROP_DSa,                /* GXand */
-	ROP_SDna,               /* GXandReverse */
-	ROP_S,                  /* GXcopy */
-	ROP_DSna,               /* GXandInverted */
-	ROP_D,                  /* GXnoop */
-	ROP_DSx,                /* GXxor */
-	ROP_DSo,                /* GXor */
-	ROP_DSon,               /* GXnor */
-	ROP_DSxn,               /* GXequiv */
-	ROP_Dn,                 /* GXinvert */
-	ROP_SDno,               /* GXorReverse */
-	ROP_Sn,                 /* GXcopyInverted */
-	ROP_DSno,               /* GXorInverted */
-	ROP_DSan,               /* GXnand */
-	ROP_1                   /* GXset */
+	ROP_0,		/* GXclear */
+	ROP_DSa,	/* GXand */
+	ROP_SDna,	/* GXandReverse */
+	ROP_S,		/* GXcopy */
+	ROP_DSna,	/* GXandInverted */
+	ROP_D,		/* GXnoop */
+	ROP_DSx,	/* GXxor */
+	ROP_DSo,	/* GXor */
+	ROP_DSon,	/* GXnor */
+	ROP_DSxn,	/* GXequiv */
+	ROP_Dn,		/* GXinvert */
+	ROP_SDno,	/* GXorReverse */
+	ROP_Sn,		/* GXcopyInverted */
+	ROP_DSno,	/* GXorInverted */
+	ROP_DSan,	/* GXnand */
+	ROP_1		/* GXset */
 };
 static const uint8_t fill_ROP[] = {
 	ROP_0,
@@ -2850,8 +2850,11 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
 		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
-		if (bo == NULL)
+		if (bo == NULL) {
+			DBG(("%s: fallback -- unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
+		}
 	}
 
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
@@ -2969,8 +2972,11 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
 		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
-		if (bo == NULL)
+		if (bo == NULL) {
+			DBG(("%s: fallback -- unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
+		}
 	}
 
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
@@ -3857,7 +3863,7 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	    !PM_IS_SOLID(dst, gc->planemask)) {
 		RegionRec region, *ret;
 
-		DBG(("%s: -- fallback, wedged=%d, solid=%d [%x]\n",
+		DBG(("%s: fallback -- wedged=%d, solid=%d [%x]\n",
 		     __FUNCTION__, sna->kgem.wedged,
 		     PM_IS_SOLID(dst, gc->planemask),
 		     (unsigned)gc->planemask));
@@ -5338,8 +5344,11 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		if (arg.bo->tiling == I915_TILING_Y) {
 			assert(arg.bo == sna_pixmap_get_bo(pixmap));
 			arg.bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
-			if (arg.bo == NULL)
+			if (arg.bo == NULL) {
+				DBG(("%s: fallback -- unable to change tiling\n",
+				     __FUNCTION__));
 				goto fallback;
+			}
 		}
 		RegionUninit(&region);
 		return miDoCopy(src, dst, gc,
@@ -9784,10 +9793,13 @@ sna_poly_fill_rect_stippled_blt(DrawablePtr drawable,
 
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
 		/* This is cheating, but only the gpu_bo can be tiled */
-		assert(bo == sna_pixmap(pixmap)->gpu_bo);
+		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
-		if (bo == NULL)
+		if (bo == NULL) {
+			DBG(("%s: fallback -- unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
+		}
 	}
 
 	if (!sna_drawable_move_to_cpu(&stipple->drawable, MOVE_READ))
@@ -10217,7 +10229,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	     __FUNCTION__, _x, _y, _n, fg, bg, rop));
 
 	if (wedged(sna)) {
-		DBG(("%s -- fallback, wedged\n", __FUNCTION__));
+		DBG(("%s: fallback -- wedged\n", __FUNCTION__));
 		return false;
 	}
 
@@ -10230,7 +10242,8 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
 		if (bo == NULL) {
-			DBG(("%s -- fallback, dst uses Y-tiling\n", __FUNCTION__));
+			DBG(("%s: fallback -- unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
 		}
 	}
@@ -10897,7 +10910,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
 		if (bo == NULL) {
-			DBG(("%s -- fallback, unable to change tiling\n",
+			DBG(("%s: fallback -- unable to change tiling\n",
 			     __FUNCTION__));
 			return false;
 		}
@@ -11251,8 +11264,11 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
 		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
-		if (bo == NULL)
+		if (bo == NULL) {
+			DBG(("%s: fallback -- unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
+		}
 	}
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
commit 8693005b5427f256bcd7c2e7cfc48d56f5b8c5ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 22 23:43:04 2012 +0000

    sna: Update the target bo after changing from Y tiling
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f6bbeab..424c8ad 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10894,8 +10894,11 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 
 	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
-		if (!sna_pixmap_change_tiling(pixmap, I915_TILING_X)) {
-			DBG(("%s -- fallback, dst uses Y-tiling\n", __FUNCTION__));
+		assert(bo == sna_pixmap_get_bo(pixmap));
+		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
+		if (bo == NULL) {
+			DBG(("%s -- fallback, unable to change tiling\n",
+			     __FUNCTION__));
 			return false;
 		}
 	}
commit 83f98d6e5c303e52c1e0fb95b6237ebf62a8edfe
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 22 21:57:14 2012 +0000

    sna: Correctly test for clear glyphs when searching for ones to skip
    
    With xterm, it is quite common for it to redraw itself by using lots of
    spaces and so it is efficient for us if we ellide those clear glyphs and
    only draw the backing boxes. However, we were only checking the first 8
    pixels in each line because of a missing pointer increment.
    
    Fixes absent '=' characters when using a compositor and ImageText.
    
    Reported-by: Jiri Slaby <jirislaby at gmail.com
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47735
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b8f5059..f6bbeab 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10964,10 +10964,15 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			x1 = x + c->metrics.leftSideBearing;
 			y1 = y - c->metrics.ascent;
 
-			if (x1 >= extents->x2 || y1 >= extents->y2)
-				goto skip;
-			if (x1 + w <= extents->x1 || y1 + h <= extents->y1)
+			if (x1 >= extents->x2 || y1 >= extents->y2 ||
+			    x1 + w <= extents->x1 || y1 + h <= extents->y1) {
+				DBG(("%s: glyph is clipped (%d, %d)x(%d,%d) against extents (%d, %d), (%d, %d)\n",
+				     __FUNCTION__,
+				     x1, y1, w, h,
+				     extents->x1, extents->y1,
+				     extents->x2, extents->y2));
 				goto skip;
+			}
 
 			if (!transparent) {
 				int clear = 1, j = h;
@@ -10976,12 +10981,15 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				do {
 					i = w8;
 					do {
-						clear = *g == 0;
+						clear = *g++ == 0;
 					} while (clear && --i);
 					g += stride - w8;
 				} while (clear && --j);
-				if (clear)
+				if (clear) {
+					DBG(("%s: skipping clear glyph for ImageGlyph\n",
+					     __FUNCTION__));
 					goto skip;
+				}
 			}
 
 			if (!kgem_check_batch(&sna->kgem, 3+len)) {
commit 86121a3af9a9fc9a2c76d7ac9f3ec17105d20d80
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 22 11:21:43 2012 +0000

    sna: Adjust the damage region for the composite offset
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47597
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index e5031c0..d281776 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -487,9 +487,19 @@ sna_composite(CARD8 op,
 	     get_drawable_dx(dst->pDrawable),
 	     get_drawable_dy(dst->pDrawable)));
 
-	if (op <= PictOpSrc)
+	if (op <= PictOpSrc) {
+		int16_t x, y;
+
+		get_drawable_deltas(dst->pDrawable, pixmap, &x, &y);
+		if (x|y)
+			pixman_region_translate(&region, x, y);
+
 		sna_damage_subtract(&priv->cpu_damage, &region);
 
+		if (x|y)
+			pixman_region_translate(&region, -x, -y);
+	}
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite(sna,
 				   op, src, mask, dst,
commit a6b48dd7f1eeb1a8e3841b8f3326c60b300ee9e9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 22 09:22:52 2012 +0000

    sna: Force fallbacks if the destination is unattached
    
    Since the removal of the ability to create a backing pixmap after the
    creation of its parent, it no longer becomes practical to attempt
    rendering with the GPU to unattached pixmaps. So having made the
    decision never to render to that pixmap, perform the test explicitly
    along the render paths.
    
    This fixes a segmentation fault introduced in 8a303f195 (sna: Remove
    existing damage before overwriting with a composite op) which assumed
    the existence of a backing pixmap along a render path.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47700
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index b098fcc..e5031c0 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -413,7 +413,9 @@ sna_composite(CARD8 op,
 	      INT16 dst_x,  INT16 dst_y,
 	      CARD16 width, CARD16 height)
 {
-	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
 	struct sna_composite_op tmp;
 	unsigned flags;
 	RegionRec region;
@@ -462,8 +464,14 @@ sna_composite(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(dst->pDrawable) &&
-	    !picture_is_gpu(src) && !picture_is_gpu(mask)) {
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback as destination is unattached\n",
+		     __FUNCTION__));
+		goto fallback;
+	}
+
+	if (too_small(priv) && !picture_is_gpu(src) && !picture_is_gpu(mask)) {
 		DBG(("%s: fallback due to too small\n", __FUNCTION__));
 		goto fallback;
 	}
@@ -479,10 +487,8 @@ sna_composite(CARD8 op,
 	     get_drawable_dx(dst->pDrawable),
 	     get_drawable_dy(dst->pDrawable)));
 
-	if (op <= PictOpSrc) {
-		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
+	if (op <= PictOpSrc)
 		sna_damage_subtract(&priv->cpu_damage, &region);
-	}
 
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite(sna,
@@ -752,17 +758,17 @@ sna_composite_rectangles(CARD8		 op,
 
 	boxes = pixman_region_rectangles(&region, &num_boxes);
 
-	if (too_small(dst->pDrawable)) {
-		DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
-		goto fallback;
-	}
-
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
 		DBG(("%s: fallback, not attached\n", __FUNCTION__));
 		goto fallback;
 	}
 
+	if (too_small(priv)) {
+		DBG(("%s: fallback, dst is too small\n", __FUNCTION__));
+		goto fallback;
+	}
+
 	/* If we going to be overwriting any CPU damage with a subsequent
 	 * operation, then we may as well delete it without moving it
 	 * first to the GPU.
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 1c536c8..235528c 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1217,7 +1217,9 @@ sna_glyphs(CARD8 op,
 	   INT16 src_x, INT16 src_y,
 	   int nlist, GlyphListPtr list, GlyphPtr *glyphs)
 {
-	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
 	PictFormatPtr _mask;
 
 	DBG(("%s(op=%d, nlist=%d, src=(%d, %d))\n",
@@ -1234,14 +1236,20 @@ sna_glyphs(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(dst->pDrawable) && !picture_is_gpu(src)) {
-		DBG(("%s: fallback -- too small (%dx%d)\n",
-		     __FUNCTION__, dst->pDrawable->width, dst->pDrawable->height));
+	if (dst->alphaMap) {
+		DBG(("%s: fallback -- dst alpha map\n", __FUNCTION__));
 		goto fallback;
 	}
 
-	if (dst->alphaMap) {
-		DBG(("%s: fallback -- dst alpha map\n", __FUNCTION__));
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback -- destination unattached\n", __FUNCTION__));
+		goto fallback;
+	}
+
+	if (too_small(priv) && !picture_is_gpu(src)) {
+		DBG(("%s: fallback -- too small (%dx%d)\n",
+		     __FUNCTION__, dst->pDrawable->width, dst->pDrawable->height));
 		goto fallback;
 	}
 
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 88d0130..956e2aa 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -101,13 +101,10 @@ is_dirty(DrawablePtr drawable)
 	return priv == NULL || kgem_bo_is_dirty(priv->gpu_bo);
 }
 
-static inline Bool
-too_small(DrawablePtr drawable)
+static inline bool
+too_small(struct sna_pixmap *priv)
 {
-	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-
-	if (priv == NULL)
-		return true;
+	assert(priv);
 
 	if (priv->gpu_damage)
 		return false;
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 7048cae..9ab5ae2 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4435,7 +4435,9 @@ sna_composite_trapezoids(CARD8 op,
 			 INT16 xSrc, INT16 ySrc,
 			 int ntrap, xTrapezoid *traps)
 {
-	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
+	PixmapPtr pixmap = get_drawable_pixmap(dst->pDrawable);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
 	bool rectilinear, pixel_aligned;
 	unsigned flags;
 	int n;
@@ -4461,7 +4463,13 @@ sna_composite_trapezoids(CARD8 op,
 		goto fallback;
 	}
 
-	if (too_small(dst->pDrawable) && !picture_is_gpu(src)) {
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback -- dst is unattached\n", __FUNCTION__));
+		goto fallback;
+	}
+
+	if (too_small(priv) && !picture_is_gpu(src)) {
 		DBG(("%s: fallback -- dst is too small, %dx%d\n",
 		     __FUNCTION__,
 		     dst->pDrawable->width,
commit 52f39ae1697bef86471b7c5eef8553661f255b67
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 21 13:31:03 2012 +0000

    sna: Assert that the bo created is large enough
    
    Double check that the maximum access size computed from the bo
    parameters is within the allocated size for the bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c022900..26abdd0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2509,6 +2509,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 			bo->refcnt = 1;
 			return bo;
 		}
@@ -2561,6 +2562,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
 				assert(bo->reusable);
 				assert(bo->domain != DOMAIN_GPU && !kgem_busy(kgem, bo->handle));
+				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 				bo->refcnt = 1;
 				return bo;
 			}
@@ -2615,6 +2617,7 @@ search_again:
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 			bo->refcnt = 1;
 			return bo;
 		}
@@ -2636,6 +2639,7 @@ search_again:
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+			assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 			bo->refcnt = 1;
 			return bo;
 		}
@@ -2669,6 +2673,7 @@ search_again:
 					bo->delta = 0;
 					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+					assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 					bo->refcnt = 1;
 					return bo;
 				}
@@ -2715,6 +2720,7 @@ search_again:
 				bo->delta = 0;
 				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+				assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 				bo->refcnt = 1;
 				return bo;
 			}
@@ -2773,6 +2779,7 @@ search_inactive:
 		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
 		assert((flags & CREATE_INACTIVE) == 0 ||
 		       !kgem_busy(kgem, bo->handle));
+		assert(bo->pitch*kgem_aligned_height(kgem, height, bo->tiling) <= kgem_bo_size(bo));
 		bo->refcnt = 1;
 		return bo;
 	}
commit 2f23c660f58e0db2107255f32a3503dae62fe304
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 21 13:30:45 2012 +0000

    sna: Assert that the tiled blt is correctly clipped
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 33e08be..b8f5059 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -8697,6 +8697,11 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			if (tile_y < 0)
 				tile_y += tile_height;
 
+			assert(r.x + dx >= 0);
+			assert(r.y + dy >= 0);
+			assert(r.x + dx + r.width  <= pixmap->drawable.width);
+			assert(r.y + dy + r.height <= pixmap->drawable.height);
+
 			r.y += dy;
 			do {
 				int16_t width = r.width;
commit e0cd13e2af3db237c171f8ca1c70ddc67b1fbce4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 21 09:06:10 2012 +0000

    sna: Fallback to inplace upload if forced to tile the indirect replacement
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47629
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 62a8962..4f5a634 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -1072,6 +1072,7 @@ indirect_replace(struct sna *sna,
 {
 	struct kgem *kgem = &sna->kgem;
 	struct kgem_bo *src_bo;
+	BoxRec box;
 	void *ptr;
 	bool ret;
 
@@ -1083,103 +1084,37 @@ indirect_replace(struct sna *sna,
 	if ((int)pixmap->devKind * pixmap->drawable.height >> 12 > kgem->half_cpu_cache_pages)
 		return false;
 
-	if (kgem->ring == KGEM_RENDER || !kgem_bo_can_blt(kgem, bo)) {
-		BoxRec box;
-
-		assert(!must_tile(sna,
-				  pixmap->drawable.width,
-				  pixmap->drawable.height));
-
-		src_bo = kgem_create_buffer_2d(kgem,
-					       pixmap->drawable.width,
-					       pixmap->drawable.height,
-					       pixmap->drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE_INPLACE,
-					       &ptr);
-		if (!src_bo)
-			return false;
-
-		memcpy_blt(src, ptr, pixmap->drawable.bitsPerPixel,
-			   stride, src_bo->pitch,
-			   0, 0,
-			   0, 0,
-			   pixmap->drawable.width,
-			   pixmap->drawable.height);
-
-		box.x1 = box.y1 = 0;
-		box.x2 = pixmap->drawable.width;
-		box.y2 = pixmap->drawable.height;
-
-		ret = sna->render.copy_boxes(sna, GXcopy,
-					     pixmap, src_bo, 0, 0,
-					     pixmap, bo, 0, 0,
-					     &box, 1);
-	} else {
-		uint32_t cmd, br13, *b;
-		int pitch;
-
-		pitch = pixmap->drawable.width * pixmap->drawable.bitsPerPixel;
-		pitch = ALIGN(pitch, 32) >> 3;
+	if (!kgem_bo_can_blt(kgem, bo) &&
+	    must_tile(sna, pixmap->drawable.width, pixmap->drawable.height))
+		return false;
 
-		src_bo = kgem_create_buffer(kgem,
-					    pitch * pixmap->drawable.height,
-					    KGEM_BUFFER_WRITE_INPLACE,
-					    &ptr);
-		if (!src_bo)
-			return false;
+	src_bo = kgem_create_buffer_2d(kgem,
+				       pixmap->drawable.width,
+				       pixmap->drawable.height,
+				       pixmap->drawable.bitsPerPixel,
+				       KGEM_BUFFER_WRITE_INPLACE,
+				       &ptr);
+	if (!src_bo)
+		return false;
 
-		memcpy_blt(src, ptr, pixmap->drawable.bitsPerPixel,
-			   stride, pitch,
-			   0, 0,
-			   0, 0,
-			   pixmap->drawable.width,
-			   pixmap->drawable.height);
-
-		cmd = XY_SRC_COPY_BLT_CMD;
-		br13 = bo->pitch;
-		if (kgem->gen >= 40 && bo->tiling) {
-			cmd |= BLT_DST_TILED;
-			br13 >>= 2;
-		}
-		br13 |= 0xcc << 16;
-		switch (pixmap->drawable.bitsPerPixel) {
-		default:
-		case 32: cmd |= BLT_WRITE_ALPHA | BLT_WRITE_RGB;
-			 br13 |= 1 << 25; /* RGB8888 */
-		case 16: br13 |= 1 << 24; /* RGB565 */
-		case 8: break;
-		}
+	memcpy_blt(src, ptr, pixmap->drawable.bitsPerPixel,
+		   stride, src_bo->pitch,
+		   0, 0,
+		   0, 0,
+		   pixmap->drawable.width,
+		   pixmap->drawable.height);
 
-		kgem_set_mode(kgem, KGEM_BLT);
-		if (kgem->nexec + 2 > KGEM_EXEC_SIZE(kgem) ||
-		    kgem->nreloc + 2 > KGEM_RELOC_SIZE(kgem) ||
-		    !kgem_check_batch(kgem, 8) ||
-		    !kgem_check_bo_fenced(kgem, bo, NULL)) {
-			_kgem_submit(kgem);
-			_kgem_set_mode(kgem, KGEM_BLT);
-		}
+	box.x1 = box.y1 = 0;
+	box.x2 = pixmap->drawable.width;
+	box.y2 = pixmap->drawable.height;
 
-		b = kgem->batch + kgem->nbatch;
-		b[0] = cmd;
-		b[1] = br13;
-		b[2] = 0;
-		b[3] = pixmap->drawable.height << 16 | pixmap->drawable.width;
-		b[4] = kgem_add_reloc(kgem, kgem->nbatch + 4, bo,
-				      I915_GEM_DOMAIN_RENDER << 16 |
-				      I915_GEM_DOMAIN_RENDER |
-				      KGEM_RELOC_FENCED,
-				      0);
-		b[5] = 0;
-		b[6] = pitch;
-		b[7] = kgem_add_reloc(kgem, kgem->nbatch + 7, src_bo,
-				      I915_GEM_DOMAIN_RENDER << 16 |
-				      KGEM_RELOC_FENCED,
-				      0);
-		kgem->nbatch += 8;
-		ret = true;
-	}
+	ret = sna->render.copy_boxes(sna, GXcopy,
+				     pixmap, src_bo, 0, 0,
+				     pixmap, bo, 0, 0,
+				     &box, 1);
 
 	kgem_bo_destroy(kgem, src_bo);
+
 	return ret;
 }
 
commit 1db1eced5462723811e535a53cb41b108fc6c5a3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 20 22:57:50 2012 +0000

    sna: Tidy an assertion when handling tiled copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index bb09214..c022900 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1095,6 +1095,8 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		goto destroy;
 
 	if (bo->vmap) {
+		DBG(("%s: handle=%d is vmapped, tracking until free\n",
+		     __FUNCTION__, bo->handle));
 		if (bo->rq == NULL) {
 			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
 				list_add(&bo->request, &kgem->flushing);
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 493e313..dcb4d1d 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -389,8 +389,8 @@ sna_tiling_fill_boxes(struct sna *sna,
 				int16_t dx = this.extents.x1;
 				int16_t dy = this.extents.y1;
 
+				assert(kgem_bo_can_blt(&sna->kgem, bo));
 				assert(bo->pitch <= 8192);
-				assert(bo->tiling != I915_TILING_Y);
 
 				if (!sna->render.copy_boxes(sna, GXcopy,
 							     dst, dst_bo, 0, 0,
commit 38b0cc24b45afd4ed57187fb066896bf1e467f45
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 21 01:01:47 2012 +0000

    sna: Remove the short-circuiting of move-to-cpu for read if no damage
    
    The danger now is that we may have either discarded the shadow pixels or
    have replaced them with a GTT mapping, either way undesirable and so we
    should reconstruct the shadow mapping.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 55c5f5a..949c18f 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -435,7 +435,7 @@ static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned
 {
 	if (flags == MOVE_READ) {
 		struct sna_pixmap *priv = sna_pixmap(pixmap);
-		if (priv == NULL || priv->gpu_damage == NULL)
+		if (priv == NULL)
 			return true;
 	}
 
commit f49a6b1fd7e23252b2a306f8bdb9baed5c1e8d4b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 20 21:36:26 2012 +0000

    sna: Decouple the private data after UnrealizeFont
    
    As the font is kept around and reused after UnrealizeFont, we need to
    nullify the pointer to our private data in order to prevent the later
    use-after-free.
    
    Reported-by: Peter Jordan
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c7f041e..33e08be 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10183,6 +10183,7 @@ sna_unrealize_font(ScreenPtr screen, FontPtr font)
 		for (n = 0; n < 256; n++)
 			free(priv->glyphs16[n]);
 		free(priv);
+		FontSetPrivate(font, sna_font_key, NULL);
 	}
 
 	return TRUE;
commit 1bdd6461de18a6812d5af0283f4222cbb80ca2f8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 20 11:08:44 2012 +0000

    sna/traps: Remove bogus assertion
    
    As we only enter that path for singular unbounded boxes, we are
    guaranteed to fill the entire trapezoid extents and so do not need the
    unbounded fixup the assertion was fretting about.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 4752e48..7048cae 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3099,7 +3099,6 @@ composite_unaligned_boxes(struct sna *sna,
 		if (priv->clear && priv->clear_color == 0)
 			return true;
 	}
-	assert((priv->clear && priv->clear_color == 0) || operator_is_bounded(op));
 
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite_spans(sna, op, src, dst,
commit 1c2932e9cb283942567c3dd2695d03b8045da27f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 19 15:51:43 2012 +0000

    uxa: Defer the call to EnterVT till after outputs are initialised
    
    We need to do this apparently or else we never perform the VT switch.
    However, we can not do it too early, especially not before we have
    finished intialising the outputs.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47395
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index e4dd26b..4265de8 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1011,13 +1011,6 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 			   "Hardware cursor initialization failed\n");
 	}
 
-	/* Must force it before EnterVT, so we are in control of VT and
-	 * later memory should be bound when allocating, e.g rotate_mem */
-	scrn->vtSema = TRUE;
-
-	if (!I830EnterVT(scrnIndex, 0))
-		return FALSE;
-
 	intel->BlockHandler = screen->BlockHandler;
 	screen->BlockHandler = I830BlockHandler;
 
@@ -1090,7 +1083,11 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	I830UeventInit(scrn);
 #endif
 
-	return TRUE;
+	/* Must force it before EnterVT, so we are in control of VT and
+	 * later memory should be bound when allocating, e.g rotate_mem */
+	scrn->vtSema = TRUE;
+
+	return I830EnterVT(scrnIndex, 0);
 }
 
 static void i830AdjustFrame(int scrnIndex, int x, int y, int flags)
commit 972e1108a5606399a679f97af0815ec9730ab5be
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 19 14:38:28 2012 +0000

    sna: Declare videoRam correctly on gen2 devices
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 7cf3ef1..7b3cfce 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -820,20 +820,25 @@ sna_register_all_privates(void)
 	return TRUE;
 }
 
+static size_t
+agp_aperture_size(struct pci_device *dev, int gen)
+{
+	return dev->regions[gen < 30 ? 0 : 2].size;
+}
+
 static Bool
 sna_screen_init(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 {
 	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
 	struct sna *sna = to_sna(scrn);
 	VisualPtr visual;
-	struct pci_device *const device = sna->PciInfo;
 
 	DBG(("%s\n", __FUNCTION__));
 
 	if (!sna_register_all_privates())
 		return FALSE;
 
-	scrn->videoRam = device->regions[2].size / 1024;
+	scrn->videoRam = agp_aperture_size(sna->PciInfo, sna->kgem.gen) / 1024;
 
 	miClearVisualTypes();
 	if (!miSetVisualTypes(scrn->depth,
commit c0bab1df509d1f7fe36173715846c13b931e9be1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 19 08:25:22 2012 +0000

    sna/dri: Make the drawable as damaged for the off-screen immediate exchange
    
    In some cases off-screen is stil visible, for example under a rotation.
    As such xrandr -o left; glxgears -fullscreen was broken.
    
    Reported-by: Phillip Haddad <phillip.haddad at gmail.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index e2eed1d..95ec07e 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1294,12 +1294,24 @@ sna_dri_schedule_flip(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 	/* Drawable not displayed... just complete the swap */
 	pipe = sna_dri_get_pipe(draw);
 	if (pipe == -1) {
+		RegionRec region;
+
 		DBG(("%s: off-screen, immediate update\n", __FUNCTION__));
 
 		sna_dri_exchange_attachment(front, back);
 		get_private(back)->pixmap = get_private(front)->pixmap;
 		get_private(front)->pixmap = NULL;
 		set_bo(get_private(back)->pixmap, get_private(back)->bo);
+
+		/* XXX can we query whether we need to process damage? */
+		region.extents.x1 = draw->x;
+		region.extents.y1 = draw->y;
+		region.extents.x2 = draw->x + draw->width;
+		region.extents.y2 = draw->y + draw->height;
+		region.data = NULL;
+		DamageRegionAppend(draw, &region);
+		DamageRegionProcessPending(draw);
+
 		DRI2SwapComplete(client, draw, 0, 0, 0,
 				 DRI2_EXCHANGE_COMPLETE, func, data);
 		return TRUE;
commit 17c19ea8e21e1b20eee446045573dfd94ce6f537
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 13:23:26 2012 +0000

    sna/traps: Remove separate edge->vertical flag
    
    Mark vertical edges with dy==0 to reduce structure size and reduce
    memory load during edge walking.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f2caf9a..4752e48 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -190,7 +190,6 @@ struct edge {
 	struct edge *next, *prev;
 
 	int dir;
-	int vertical;
 
 	grid_scaled_y_t height_left;
 
@@ -713,13 +712,12 @@ polygon_add_edge(struct polygon *polygon,
 	e->height_left = ybot - ytop;
 
 	if (dx == 0) {
-		e->vertical = true;
 		e->x.quo = x1;
 		e->x.rem = 0;
+		e->dy = 0;
 		e->dxdy.quo = 0;
 		e->dxdy.rem = 0;
 	} else {
-		e->vertical = false;
 		e->dxdy = floored_divrem(dx, dy);
 		if (ytop == y1) {
 			e->x.quo = x1;
@@ -776,13 +774,12 @@ polygon_add_line(struct polygon *polygon,
 	e->height_left = bot - top;
 
 	if (dx == 0) {
-		e->vertical = true;
 		e->x.quo = p1->x;
 		e->x.rem = -dy;
 		e->dxdy.quo = 0;
 		e->dxdy.rem = 0;
+		e->dy = 0;
 	} else {
-		e->vertical = false;
 		e->dxdy = floored_divrem(dx, dy);
 		if (top == p1->y) {
 			e->x.quo = p1->x;
@@ -819,16 +816,16 @@ polygon_add_line(struct polygon *polygon,
 static void
 active_list_reset(struct active_list *active)
 {
-	active->head.vertical = 1;
 	active->head.height_left = INT_MAX;
 	active->head.x.quo = INT_MIN;
+	active->head.dy = 0;
 	active->head.prev = NULL;
 	active->head.next = &active->tail;
 	active->tail.prev = &active->head;
 	active->tail.next = NULL;
 	active->tail.x.quo = INT_MAX;
 	active->tail.height_left = INT_MAX;
-	active->tail.vertical = 1;
+	active->tail.dy = 0;
 	active->min_height = INT_MAX;
 	active->is_vertical = 1;
 }
@@ -934,7 +931,7 @@ can_full_step(struct active_list *active)
 		for (e = active->head.next; &active->tail != e; e = e->next) {
 			if (e->height_left < min_height)
 				min_height = e->height_left;
-			is_vertical &= e->vertical;
+			is_vertical &= e->dy == 0;
 		}
 
 		active->is_vertical = is_vertical;
@@ -971,7 +968,7 @@ fill_buckets(struct active_list *active,
 		*b = edge;
 		if (edge->height_left < min_height)
 			min_height = edge->height_left;
-		is_vertical &= edge->vertical;
+		is_vertical &= edge->dy == 0;
 		edge = next;
 	}
 
@@ -1002,7 +999,7 @@ nonzero_subrow(struct active_list *active, struct cell_list *coverages)
 			xstart = edge->x.quo;
 
 		if (--edge->height_left) {
-			if (!edge->vertical) {
+			if (edge->dy) {
 				edge->x.quo += edge->dxdy.quo;
 				edge->x.rem += edge->dxdy.rem;
 				if (edge->x.rem >= 0) {
@@ -1595,7 +1592,7 @@ inplace_subrow(struct active_list *active, int8_t *row,
 		}
 
 		if (--edge->height_left) {
-			if (!edge->vertical) {
+			if (edge->dy) {
 				edge->x.quo += edge->dxdy.quo;
 				edge->x.rem += edge->dxdy.rem;
 				if (edge->x.rem >= 0) {
@@ -1805,7 +1802,6 @@ struct mono_edge {
 
 	int32_t height_left;
 	int32_t dir;
-	int32_t vertical;
 
 	int32_t dy;
 	struct quorem x;
@@ -1925,14 +1921,12 @@ mono_add_line(struct mono *mono,
 	dy = p2->y - p1->y;
 
 	if (dx == 0) {
-		e->vertical = TRUE;
 		e->x.quo = p1->x;
 		e->x.rem = 0;
 		e->dxdy.quo = 0;
 		e->dxdy.rem = 0;
 		e->dy = 0;
 	} else {
-		e->vertical = FALSE;
 		e->dxdy = floored_muldivrem (dx, pixman_fixed_1, dy);
 		e->dy = dy;
 
@@ -2079,7 +2073,7 @@ mono_merge_edges(struct mono *c, struct mono_edge *edges)
 	DBG_MONO_EDGES(edges);
 
 	for (e = edges; c->is_vertical && e; e = e->next)
-		c->is_vertical = e->vertical;
+		c->is_vertical = e->dy == 0;
 
 	c->head.next = mono_merge_unsorted_edges(c->head.next, edges);
 }
@@ -2137,11 +2131,13 @@ mono_row(struct mono *c, int16_t y, int16_t h)
 		int16_t xend = I(edge->x.quo);
 
 		if (--edge->height_left) {
-			edge->x.quo += edge->dxdy.quo;
-			edge->x.rem += edge->dxdy.rem;
-			if (edge->x.rem >= 0) {
-				++edge->x.quo;
-				edge->x.rem -= edge->dy;
+			if (edge->dy) {
+				edge->x.quo += edge->dxdy.quo;
+				edge->x.rem += edge->dxdy.rem;
+				if (edge->x.rem >= 0) {
+					++edge->x.quo;
+					edge->x.rem -= edge->dy;
+				}
 			}
 
 			if (edge->x.quo < prev_x) {
@@ -2185,7 +2181,7 @@ mono_init(struct mono *c, int num_edges)
 	if (!mono_polygon_init(&c->polygon, &c->clip.extents, num_edges))
 		return false;
 
-	c->head.vertical = 1;
+	c->head.dy = 0;
 	c->head.height_left = INT_MAX;
 	c->head.x.quo = INT16_MIN << 16;
 	c->head.prev = NULL;
@@ -2194,7 +2190,7 @@ mono_init(struct mono *c, int num_edges)
 	c->tail.next = NULL;
 	c->tail.x.quo = INT16_MAX << 16;
 	c->tail.height_left = INT_MAX;
-	c->tail.vertical = 1;
+	c->tail.dy = 0;
 
 	c->is_vertical = 1;
 
commit 82023397cd05c05cfcb8939829a95e287d438875
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 11:10:14 2012 +0000

    sna/gen3: Improve clear-to-solid reduction
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 8c88722..b236761 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2258,6 +2258,27 @@ gen3_init_radial(struct sna *sna,
 }
 
 static Bool
+sna_picture_is_clear(PicturePtr picture,
+		     int x, int y, int w, int h,
+		     uint32_t *color)
+{
+	struct sna_pixmap *priv;
+
+	if (!picture->pDrawable)
+		return FALSE;
+
+	priv = sna_pixmap(get_drawable_pixmap(picture->pDrawable));
+	if (priv == NULL || !priv->clear)
+		return FALSE;
+
+	if (!source_is_covered(picture, x, y, w, h))
+		return FALSE;
+
+	*color = priv->clear_color;
+	return TRUE;
+}
+
+static Bool
 gen3_composite_picture(struct sna *sna,
 		       PicturePtr picture,
 		       struct sna_composite_op *op,
@@ -2310,6 +2331,9 @@ gen3_composite_picture(struct sna *sna,
 	if (sna_picture_is_solid(picture, &color))
 		return gen3_init_solid(channel, color);
 
+	if (sna_picture_is_clear(picture, x, y, w, h, &color))
+		return gen3_init_solid(channel, color);
+
 	if (!gen3_check_repeat(picture))
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
commit 7ce5f4a11389b2fe9f27a6f09c4848ac71424d5d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 10:55:17 2012 +0000

    sna/gen3: Do not force tiling for large pixmaps
    
    As the extraction routine is now smarter and can construction
    subsurfaces without copying we do not need to force tiling.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8a2222c..bb09214 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2300,17 +2300,11 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 		return tiling < 0 ? tiling : I915_TILING_NONE;
 
 	if (kgem->gen < 40) {
-		if (tiling) {
-			if (width * bpp > 8192 * 8) {
-				DBG(("%s: pitch too large for tliing [%d]\n",
-				     __FUNCTION__, width*bpp/8));
-				tiling = I915_TILING_NONE;
-				goto done;
-			} else if ((width|height) > 2048) {
-				DBG(("%s: large buffer (%dx%d), forcing TILING_X\n",
-				     __FUNCTION__, width, height));
-				tiling = -I915_TILING_X;
-			}
+		if (tiling && width * bpp > 8192 * 8) {
+			DBG(("%s: pitch too large for tliing [%d]\n",
+			     __FUNCTION__, width*bpp/8));
+			tiling = I915_TILING_NONE;
+			goto done;
 		}
 	} else {
 		if (width*bpp > (MAXSHORT-512) * 8) {
commit 342dda3fe361c8be2f3af5af1516cdc6a5fdcaa9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 10:55:06 2012 +0000

    sna/gen3: Prevent copy-fallback if we cannot blit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 67c8956..8c88722 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3987,6 +3987,10 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 	    src_bo->pitch > MAX_3D_PITCH ||
 	    too_large(src->drawable.width, src->drawable.height)) {
 fallback_blt:
+		if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
+		    !kgem_bo_can_blt(&sna->kgem, dst_bo))
+			return FALSE;
+
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
 						   dst, dst_bo, dst_dx, dst_dy,
commit 28c089781fdf74a9bac2e138e65383748558fcc1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 10:55:27 2012 +0000

    sna: Fixup the cpu shadow mappings before uploading the box
    
    On the off-chance we arrive here with a pointer to the GTT mapping.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 8be3e72..542cdb9 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -442,6 +442,7 @@ static struct kgem_bo *upload(struct sna *sna,
 			      PixmapPtr pixmap,
 			      BoxPtr box)
 {
+	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
 
 	DBG(("%s: box=(%d, %d), (%d, %d), pixmap=%dx%d\n",
@@ -451,6 +452,19 @@ static struct kgem_bo *upload(struct sna *sna,
 	assert(box->x2 <= pixmap->drawable.width);
 	assert(box->y2 <= pixmap->drawable.height);
 
+	priv = sna_pixmap(pixmap);
+	if (priv) {
+		/* As we know this box is on the CPU just fixup the shadow */
+		if (priv->mapped) {
+			pixmap->devPrivate.ptr = NULL;
+			priv->mapped = false;
+		}
+		if (pixmap->devPrivate.ptr == NULL) {
+			pixmap->devPrivate.ptr = priv->ptr;
+			pixmap->devKind = priv->stride;
+		}
+	}
+
 	bo = kgem_upload_source_image(&sna->kgem,
 				      pixmap->devPrivate.ptr, box,
 				      pixmap->devKind,
@@ -463,13 +477,11 @@ static struct kgem_bo *upload(struct sna *sna,
 		channel->scale[0] = 1.f/channel->width;
 		channel->scale[1] = 1.f/channel->height;
 
-		if (pixmap->usage_hint == 0 &&
+		if (priv &&
+		    pixmap->usage_hint == 0 &&
 		    channel->width  == pixmap->drawable.width &&
-		    channel->height == pixmap->drawable.height) {
-			struct sna_pixmap *priv = sna_pixmap(pixmap);
-			if (priv)
-				kgem_proxy_bo_attach(bo, &priv->gpu_bo);
-		}
+		    channel->height == pixmap->drawable.height)
+			kgem_proxy_bo_attach(bo, &priv->gpu_bo);
 	}
 
 	return bo;
commit 79258fe560d08dd3dd22b4782360e1597e54948c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 18 09:45:27 2012 +0000

    sna/traps: Apply some more operator and unbounded reductions
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 5594023..f2caf9a 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2698,6 +2698,8 @@ composite_unaligned_box(struct sna *sna,
 			float opacity,
 			pixman_region16_t *clip)
 {
+	assert(opacity != 0.);
+
 	if (clip) {
 		pixman_region16_t region;
 
@@ -3017,6 +3019,7 @@ composite_unaligned_boxes(struct sna *sna,
 {
 	BoxRec extents;
 	struct sna_composite_spans_op tmp;
+	struct sna_pixmap *priv;
 	pixman_region16_t clip, *c;
 	int dst_x, dst_y;
 	int dx, dy, n;
@@ -3030,7 +3033,8 @@ composite_unaligned_boxes(struct sna *sna,
 	if (ntrap > 1 && maskFormat)
 		return false;
 
-	if (!sna->render.composite_spans)
+	priv = sna_pixmap(get_drawable_pixmap(dst->pDrawable));
+	if (priv == NULL || !sna->render.composite_spans)
 		return composite_unaligned_boxes_fallback(op, src, dst, src_x, src_y, ntrap, traps);
 
 	dst_x = extents.x1 = pixman_fixed_to_int(traps[0].left.p1.x);
@@ -3090,6 +3094,17 @@ composite_unaligned_boxes(struct sna *sna,
 	     src_x + extents.x1 - dst_x - dx,
 	     src_y + extents.y1 - dst_y - dy));
 
+	switch (op) {
+	case PictOpAdd:
+		if (priv->clear && priv->clear_color == 0)
+			op = PictOpSrc;
+		break;
+	case PictOpIn:
+		if (priv->clear && priv->clear_color == 0)
+			return true;
+	}
+	assert((priv->clear && priv->clear_color == 0) || operator_is_bounded(op));
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite_spans(sna, op, src, dst,
 					 src_x + extents.x1 - dst_x - dx,
@@ -3321,6 +3336,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	BoxRec extents;
 	pixman_region16_t clip;
 	int16_t dst_x, dst_y;
+	bool was_clear;
 	int dx, dy, n;
 
 	if (NO_SCAN_CONVERTER)
@@ -3388,6 +3404,19 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     src_x + extents.x1 - dst_x - dx,
 	     src_y + extents.y1 - dst_y - dy));
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+	switch (op) {
+	case PictOpAdd:
+	case PictOpOver:
+		if (was_clear)
+			op = PictOpSrc;
+		break;
+	case PictOpIn:
+		if (was_clear)
+			return true;
+		break;
+	}
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite_spans(sna, op, src, dst,
 					 src_x + extents.x1 - dst_x - dx,
@@ -3422,7 +3451,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	tor_render(sna, &tor, &tmp, &clip,
 		   choose_span(&tmp, dst, maskFormat, op, &clip),
-		   maskFormat && !operator_is_bounded(op));
+		   !was_clear && maskFormat && !operator_is_bounded(op));
 
 skip:
 	tor_fini(&tor);
@@ -5150,6 +5179,7 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	pixman_region16_t clip;
 	int16_t dst_x, dst_y;
 	int dx, dy, n;
+	bool was_clear;
 
 	if (NO_SCAN_CONVERTER)
 		return false;
@@ -5216,6 +5246,8 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     src_x + extents.x1 - dst_x - dx,
 	     src_y + extents.y1 - dst_y - dy));
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite_spans(sna, op, src, dst,
 					 src_x + extents.x1 - dst_x - dx,
@@ -5248,7 +5280,7 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	tor_render(sna, &tor, &tmp, &clip,
 		   choose_span(&tmp, dst, maskFormat, op, &clip),
-		   maskFormat && !operator_is_bounded(op));
+		   !was_clear && maskFormat && !operator_is_bounded(op));
 
 skip:
 	tor_fini(&tor);
@@ -5508,6 +5540,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	int16_t dst_x, dst_y;
 	int dx, dy;
 	int cw, ccw, n;
+	bool was_clear;
 
 	if (NO_SCAN_CONVERTER)
 		return false;
@@ -5569,6 +5602,8 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     src_x + extents.x1 - dst_x - dx,
 	     src_y + extents.y1 - dst_y - dy));
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite_spans(sna, op, src, dst,
 					 src_x + extents.x1 - dst_x - dx,
@@ -5611,7 +5646,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 	tor_render(sna, &tor, &tmp, &clip,
 		   choose_span(&tmp, dst, maskFormat, op, &clip),
-		   maskFormat && !operator_is_bounded(op));
+		   !was_clear && maskFormat && !operator_is_bounded(op));
 
 skip:
 	tor_fini(&tor);
commit fe8866d6112c3e187d6682e9e4610325668427a0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 17 21:49:56 2012 +0000

    sna/gen[345]: Convert CPU mappings to GTT for vertices on submit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 0991a98..67c8956 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1680,14 +1680,21 @@ static void gen3_vertex_close(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (IS_CPU_MAP(bo->map) ||
-		    sna->render.vertex_size - sna->render.vertex_used < 64) {
-			DBG(("%s: discarding vbo (was CPU mapped)\n",
-			     __FUNCTION__));
+		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding full vbo\n", __FUNCTION__));
 			sna->render.vbo = NULL;
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 			free_bo = bo;
+		} else if (IS_CPU_MAP(bo->map)) {
+			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
+			sna->render.vertices = kgem_bo_map__gtt(&sna->kgem, bo);
+			if (sna->render.vertices == NULL) {
+				sna->render.vbo = NULL;
+				sna->render.vertices = sna->render.vertex_data;
+				sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+				free_bo = bo;
+			}
 		}
 	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
@@ -1950,6 +1957,15 @@ gen3_render_reset(struct sna *sna)
 	state->last_floats_per_vertex = 0;
 	state->last_vertex_offset = 0;
 	state->vertex_offset = 0;
+
+	if (sna->render.vbo &&
+	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
+		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	}
 }
 
 static void
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index a69852e..def5d19 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -419,9 +419,14 @@ static int gen4_vertex_finish(struct sna *sna)
 
 static void gen4_vertex_close(struct sna *sna)
 {
-	struct kgem_bo *bo;
+	struct kgem_bo *bo, *free_bo = NULL;
 	unsigned int i, delta = 0;
 
+	assert(sna->render_state.gen4.vertex_offset == 0);
+
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -429,10 +434,26 @@ static void gen4_vertex_close(struct sna *sna)
 		return;
 	}
 
-	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
-
 	bo = sna->render.vbo;
-	if (bo == NULL) {
+	if (bo) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding full vbo\n", __FUNCTION__));
+			sna->render.vbo = NULL;
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			free_bo = bo;
+		} else if (IS_CPU_MAP(bo->map)) {
+			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
+			sna->render.vertices =
+				kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
+			if (sna->render.vertices == NULL) {
+				sna->render.vbo = NULL;
+				sna->render.vertices = sna->render.vertex_data;
+				sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+				free_bo = bo;
+			}
+		}
+	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -449,10 +470,11 @@ static void gen4_vertex_close(struct sna *sna)
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
 				kgem_bo_destroy(&sna->kgem, bo);
-				goto reset;
+				bo = NULL;
 			}
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
+			free_bo = bo;
 		}
 	}
 
@@ -471,17 +493,13 @@ static void gen4_vertex_close(struct sna *sna)
 		}
 	}
 
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-
-reset:
-	sna->render.vertex_used = 0;
-	sna->render.vertex_index = 0;
-	sna->render_state.gen4.vb_id = 0;
+	if (sna->render.vbo == NULL) {
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 
-	sna->render.vbo = NULL;
-	sna->render.vertices = sna->render.vertex_data;
-	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	if (free_bo)
+		kgem_bo_destroy(&sna->kgem, free_bo);
 }
 
 
@@ -3207,6 +3225,15 @@ static void gen4_render_reset(struct sna *sna)
 	sna->render_state.gen4.drawrect_offset = -1;
 	sna->render_state.gen4.drawrect_limit = -1;
 	sna->render_state.gen4.surface_table = -1;
+
+	if (sna->render.vbo &&
+	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
+		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	}
 }
 
 static void gen4_render_fini(struct sna *sna)
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 01604ef..565d22a 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -428,14 +428,22 @@ static void gen5_vertex_close(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (IS_CPU_MAP(bo->map) ||
-		    sna->render.vertex_size - sna->render.vertex_used < 64) {
-			DBG(("%s: discarding vbo (was CPU mapped)\n",
-			     __FUNCTION__));
+		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding full vbo\n", __FUNCTION__));
 			sna->render.vbo = NULL;
 			sna->render.vertices = sna->render.vertex_data;
 			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
 			free_bo = bo;
+		} else if (IS_CPU_MAP(bo->map)) {
+			DBG(("%s: converting CPU map to GTT\n", __FUNCTION__));
+			sna->render.vertices =
+				kgem_bo_map__gtt(&sna->kgem, sna->render.vbo);
+			if (sna->render.vertices == NULL) {
+				sna->render.vbo = NULL;
+				sna->render.vertices = sna->render.vertex_data;
+				sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+				free_bo = bo;
+			}
 		}
 	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
@@ -3655,6 +3663,15 @@ static void gen5_render_reset(struct sna *sna)
 	sna->render_state.gen5.drawrect_offset = -1;
 	sna->render_state.gen5.drawrect_limit = -1;
 	sna->render_state.gen5.surface_table = -1;
+
+	if (sna->render.vbo &&
+	    !kgem_bo_is_mappable(&sna->kgem, sna->render.vbo)) {
+		DBG(("%s: discarding unmappable vbo\n", __FUNCTION__));
+		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	}
 }
 
 static void gen5_render_fini(struct sna *sna)
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index daca7af..8a2222c 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3190,6 +3190,43 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 	return ptr;
 }
 
+void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo)
+{
+	void *ptr;
+
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
+
+	assert(!bo->purged);
+	assert(bo->exec == NULL);
+	assert(list_is_empty(&bo->list));
+
+	if (IS_CPU_MAP(bo->map))
+		kgem_bo_release_map(kgem, bo);
+
+	ptr = bo->map;
+	if (ptr == NULL) {
+		assert(bytes(bo) <= kgem->aperture_mappable / 4);
+
+		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
+
+		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
+			       PROT_READ | PROT_WRITE);
+		if (ptr == NULL)
+			return NULL;
+
+		/* Cache this mapping to avoid the overhead of an
+		 * excruciatingly slow GTT pagefault. This is more an
+		 * issue with compositing managers which need to frequently
+		 * flush CPU damage to their GPU bo.
+		 */
+		bo->map = ptr;
+		DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
+	}
+
+	return ptr;
+}
+
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (bo->map)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 98534d9..27e0e04 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -364,6 +364,7 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
 			uint32_t delta);
 
 void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
+void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
@@ -425,9 +426,6 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 	if (bo->domain == DOMAIN_GTT)
 		return true;
 
-	if (IS_GTT_MAP(bo->map))
-		return true;
-
 	if (kgem->gen < 40 && bo->tiling &&
 	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
 		return false;
commit 97cd0c7da51024400e8900e46f51620a5f7ad402
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 17 23:57:46 2012 +0000

    sna/traps: Upon reducing an ADD to a SRC, we need to apply the pending clear
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47444
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 3a93450..5594023 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4077,6 +4077,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	struct sna_pixmap *priv;
 	RegionRec region;
 	uint32_t color;
+	bool unbounded;
 	int16_t dst_x, dst_y;
 	int dx, dy;
 	int n;
@@ -4125,19 +4126,27 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 	}
 
+	unbounded = false;
 	switch (op) {
 	case PictOpAdd:
-		if (priv->clear && priv->clear_color == 0)
+		if (priv->clear && priv->clear_color == 0) {
+			unbounded = true;
 			op = PictOpSrc;
+		}
 		if ((color >> 24) == 0)
 			return true;
 		break;
 	case PictOpIn:
 		if (priv->clear && priv->clear_color == 0)
 			return true;
+		if (priv->clear && priv->clear_color == 0xff)
+			op = PictOpSrc;
 		if ((color >> 24) == 0)
 			return true;
+		unbounded = true;
+		break;
 	case PictOpSrc:
+		unbounded = !(priv->clear && priv->clear_color == 0);
 		break;
 	default:
 		DBG(("%s: fallback -- can not perform op [%d] in place\n",
@@ -4237,7 +4246,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	inplace.opacity = color >> 24;
 
 	tor_render(NULL, &tor, (void*)&inplace,
-		   dst->pCompositeClip, span, op == PictOpIn);
+		   dst->pCompositeClip, span, unbounded);
 
 	tor_fini(&tor);
 
commit e31d9dacafe060dc86de801114b475fdd0142eb6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 17 09:21:00 2012 +0000

    sna/traps: Align indices for unrolled memset in row_inplace()
    
    The compiler presumes that the uint64_t write is naturally aligned and
    so may emit code that crashes with an unaligned moved. To workaround
    this, make sure the write is so aligned.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=47418
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 31a661e..3a93450 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1499,6 +1499,21 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 				else
 					memset(row+lix, 0xff, rix);
 #else
+				if (lix & 1 && rix) {
+					row[lix] = 0xff;
+					lix++;
+					rix--;
+				}
+				if (lix & 2 && rix >= 2) {
+					*(uint16_t *)(row+lix) = 0xffff;
+					lix += 2;
+					rix -= 2;
+				}
+				if (lix & 4 && rix >= 4) {
+					*(uint32_t *)(row+lix) = 0xffffffff;
+					lix += 4;
+					rix -= 4;
+				}
 				while (rix >= 8) {
 					*(uint64_t *)(row+lix) = 0xffffffffffffffff;
 					lix += 8;
commit 2b4e11923d9f683f43acf8053bcec1701df25c1f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 17 00:05:47 2012 +0000

    sna/traps: Tune inplace_end_subrows()
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 52be00b..31a661e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1617,51 +1617,62 @@ inplace_end_subrows(struct active_list *active, uint8_t *row,
 {
 	int cover = 0;
 
-	while (width > 4) {
+	while (width >= 4) {
 		uint32_t dw;
 		int v;
 
 		dw = *(uint32_t *)buf;
 		buf += 4;
 
-		if (dw == 0){
+		if (dw == 0) {
 			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
 			v -= v >> 8;
 			v |= v << 8;
 			dw = v | v << 16;
-		} else if (dw) {
+		} else {
 			cover += (int8_t)(dw & 0xff);
-			assert(cover >= 0);
-			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
-			v -= v >> 8;
-			dw >>= 8;
-			dw |= v << 24;
+			if (cover) {
+				assert(cover > 0);
+				v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+				v -= v >> 8;
+				dw >>= 8;
+				dw |= v << 24;
+			} else
+				dw >>= 8;
 
 			cover += (int8_t)(dw & 0xff);
-			assert(cover >= 0);
-			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
-			v -= v >> 8;
-			dw >>= 8;
-			dw |= v << 24;
+			if (cover) {
+				assert(cover > 0);
+				v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+				v -= v >> 8;
+				dw >>= 8;
+				dw |= v << 24;
+			} else
+				dw >>= 8;
 
 			cover += (int8_t)(dw & 0xff);
-			assert(cover >= 0);
-			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
-			v -= v >> 8;
-			dw >>= 8;
-			dw |= v << 24;
+			if (cover) {
+				assert(cover > 0);
+				v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+				v -= v >> 8;
+				dw >>= 8;
+				dw |= v << 24;
+			} else
+				dw >>= 8;
 
 			cover += (int8_t)(dw & 0xff);
-			assert(cover >= 0);
-			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
-			v -= v >> 8;
-			dw >>= 8;
-			dw |= v << 24;
+			if (cover) {
+				assert(cover > 0);
+				v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+				v -= v >> 8;
+				dw >>= 8;
+				dw |= v << 24;
+			} else
+				dw >>= 8;
 		}
 
 		*(uint32_t *)row = dw;
 		row += 4;
-
 		width -= 4;
 	}
 
commit d887f209f1210a294afc2c3883a97f7ac9f42bad
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 17:19:11 2012 +0000

    sna/dri: Mesa expects the 8-bit stencil buffer to have 2-bytes per pixel
    
    The seperate stencil buffer is full of lies, why worry about one more?
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 283b8db..e2eed1d 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -255,12 +255,11 @@ sna_dri_create_buffer(DrawablePtr drawable,
 		 * W fencing.
 		 */
 		bpp = format ? format : drawable->bitsPerPixel;
+		bpp *= 2;
 		bo = kgem_create_2d(&sna->kgem,
 				    ALIGN(drawable->width, 64),
 				    ALIGN((drawable->height + 1) / 2, 64),
-				    2*bpp,
-				    I915_TILING_NONE,
-				    CREATE_EXACT);
+				    bpp, I915_TILING_NONE, CREATE_EXACT);
 		break;
 
 	case DRI2BufferDepth:
commit 63c0d10faee3c7cca050505c2e81c416119e57e9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 14:51:53 2012 +0000

    sna/dri: Improve handling of non-front attachments for CopyRegion
    
    Confusion reigns between using the backing pixmap for the drawable for
    the front buffer, and a fake pixmap for the auxiliary buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index ccaf40f..283b8db 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -394,9 +394,9 @@ static void set_bo(PixmapPtr pixmap, struct kgem_bo *bo)
 }
 
 static void
-sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
-	     struct kgem_bo *dst_bo,struct kgem_bo *src_bo,
-	     bool sync)
+sna_dri_copy_to_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
+		      struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
+		      bool sync)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(draw);
 	pixman_region16_t clip;
@@ -477,18 +477,8 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 		boxes = &box;
 		n = 1;
 	}
-	/* It's important that this copy gets submitted before the
-	 * direct rendering client submits rendering for the next
-	 * frame, but we don't actually need to submit right now.  The
-	 * client will wait for the DRI2CopyRegion reply or the swap
-	 * buffer event before rendering, and we'll hit the flush
-	 * callback chain before those messages are sent.  We submit
-	 * our batch buffers from the flush callback chain so we know
-	 * that will happen before the client tries to render
-	 * again.
-	 */
 	sna->render.copy_boxes(sna, GXcopy,
-			       pixmap, src_bo, -draw->x, -draw->y,
+			       (PixmapPtr)draw, src_bo, -draw->x, -draw->y,
 			       pixmap, dst_bo, dx, dy,
 			       boxes, n);
 
@@ -505,6 +495,137 @@ sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
 }
 
 static void
+sna_dri_copy_from_front(struct sna *sna, DrawablePtr draw, RegionPtr region,
+			struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
+			bool sync)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(draw);
+	pixman_region16_t clip;
+	BoxRec box, *boxes;
+	int16_t dx, dy;
+	int n;
+
+	box.x1 = draw->x;
+	box.y1 = draw->y;
+	box.x2 = draw->x + draw->width;
+	box.y2 = draw->y + draw->height;
+
+	if (region) {
+		pixman_region_translate(region, draw->x, draw->y);
+		pixman_region_init_rects(&clip, &box, 1);
+		pixman_region_intersect(&clip, &clip, region);
+		region = &clip;
+
+		if (!pixman_region_not_empty(region)) {
+			DBG(("%s: all clipped\n", __FUNCTION__));
+			return;
+		}
+	}
+
+	dx = dy = 0;
+	if (draw->type != DRAWABLE_PIXMAP) {
+		WindowPtr win = (WindowPtr)draw;
+
+		DBG(("%s: draw=(%d, %d), delta=(%d, %d), clip.extents=(%d, %d), (%d, %d)\n",
+		     __FUNCTION__, draw->x, draw->y,
+		     get_drawable_dx(draw), get_drawable_dy(draw),
+		     win->clipList.extents.x1, win->clipList.extents.y1,
+		     win->clipList.extents.x2, win->clipList.extents.y2));
+
+		if (region == NULL) {
+			pixman_region_init_rects(&clip, &box, 1);
+			region = &clip;
+		}
+
+		pixman_region_intersect(region, &win->clipList, region);
+		if (!pixman_region_not_empty(region)) {
+			DBG(("%s: all clipped\n", __FUNCTION__));
+			return;
+		}
+
+		get_drawable_deltas(draw, pixmap, &dx, &dy);
+	}
+
+	if (sna->kgem.gen >= 60)
+		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+
+	if (region) {
+		boxes = REGION_RECTS(region);
+		n = REGION_NUM_RECTS(region);
+		assert(n);
+	} else {
+		pixman_region_init_rects(&clip, &box, 1);
+		region = &clip;
+		boxes = &box;
+		n = 1;
+	}
+	sna->render.copy_boxes(sna, GXcopy,
+			       pixmap, src_bo, dx, dy,
+			       (PixmapPtr)draw, dst_bo, -draw->x, -draw->y,
+			       boxes, n);
+
+	if (region == &clip)
+		pixman_region_fini(&clip);
+}
+
+static void
+sna_dri_copy(struct sna *sna, DrawablePtr draw, RegionPtr region,
+	     struct kgem_bo *dst_bo, struct kgem_bo *src_bo,
+	     bool sync)
+{
+	pixman_region16_t clip;
+	BoxRec box, *boxes;
+	int n;
+
+	box.x1 = 0;
+	box.y1 = 0;
+	box.x2 = draw->width;
+	box.y2 = draw->height;
+
+	if (region) {
+		pixman_region_init_rects(&clip, &box, 1);
+		pixman_region_intersect(&clip, &clip, region);
+		region = &clip;
+
+		if (!pixman_region_not_empty(region)) {
+			DBG(("%s: all clipped\n", __FUNCTION__));
+			return;
+		}
+
+		boxes = REGION_RECTS(region);
+		n = REGION_NUM_RECTS(region);
+		assert(n);
+	} else {
+		boxes = &box;
+		n = 1;
+	}
+
+	if (sna->kgem.gen >= 60) {
+		/* Sandybridge introduced a separate ring which it uses to
+		 * perform blits. Switching rendering between rings incurs
+		 * a stall as we wait upon the old ring to finish and
+		 * flush its render cache before we can proceed on with
+		 * the operation on the new ring.
+		 *
+		 * As this buffer, we presume, has just been written to by
+		 * the DRI client using the RENDER ring, we want to perform
+		 * our operation on the same ring, and ideally on the same
+		 * ring as we will flip from (which should be the RENDER ring
+		 * as well).
+		 */
+		kgem_set_mode(&sna->kgem, KGEM_RENDER);
+	}
+
+	sna->render.copy_boxes(sna, GXcopy,
+			       (PixmapPtr)draw, src_bo, 0, 0,
+			       (PixmapPtr)draw, dst_bo, 0, 0,
+			       boxes, n);
+
+	if (region == &clip)
+		pixman_region_fini(&clip);
+}
+
+static void
 sna_dri_copy_region(DrawablePtr draw,
 		    RegionPtr region,
 		    DRI2BufferPtr dst_buffer,
@@ -513,15 +634,20 @@ sna_dri_copy_region(DrawablePtr draw,
 	PixmapPtr pixmap = get_drawable_pixmap(draw);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct kgem_bo *src, *dst;
+	void (*copy)(struct sna *, DrawablePtr, RegionPtr,
+		     struct kgem_bo *, struct kgem_bo *, bool) = sna_dri_copy;
 
-	if (dst_buffer->attachment == DRI2BufferFrontLeft)
+	if (dst_buffer->attachment == DRI2BufferFrontLeft) {
 		dst = sna_pixmap_set_dri(sna, pixmap);
-	else
+		copy = sna_dri_copy_to_front;
+	} else
 		dst = get_private(dst_buffer)->bo;
 
-	if (src_buffer->attachment == DRI2BufferFrontLeft)
+	if (src_buffer->attachment == DRI2BufferFrontLeft) {
 		src = sna_pixmap_set_dri(sna, pixmap);
-	else
+		assert(copy == sna_dri_copy);
+		copy = sna_dri_copy_from_front;
+	} else
 		src = get_private(src_buffer)->bo;
 
 	assert(dst != NULL);
@@ -534,13 +660,13 @@ sna_dri_copy_region(DrawablePtr draw,
 	DBG(("%s: src -- attachment=%d, name=%d, handle=%d\n",
 	     __FUNCTION__,
 	     src_buffer->attachment, src_buffer->name, src->handle));
-	DBG(("%s: clip (%d, %d), (%d, %d) x %d\n",
+	DBG(("%s: region (%d, %d), (%d, %d) x %d\n",
 	     __FUNCTION__,
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2,
 	     REGION_NUM_RECTS(region)));
 
-	sna_dri_copy(sna, draw, region, dst, src, false);
+	copy(sna, draw, region, dst, src, false);
 }
 
 #if DRI2INFOREC_VERSION >= 4
@@ -916,12 +1042,12 @@ static void sna_dri_vblank_handle(int fd,
 			info->old_front.bo = NULL;
 			return;
 		}
-		/* else fall through to exchange/blit */
+		/* else fall through to blit */
 	case DRI2_SWAP:
-		sna_dri_copy(sna, draw, NULL,
-			     get_private(info->front)->bo,
-			     get_private(info->back)->bo,
-			     true);
+		sna_dri_copy_to_front(sna, draw, NULL,
+				      get_private(info->front)->bo,
+				      get_private(info->back)->bo,
+				      true);
 	case DRI2_SWAP_THROTTLE:
 		DBG(("%s: %d complete, frame=%d tv=%d.%06d\n",
 		     __FUNCTION__, info->type, frame, tv_sec, tv_usec));
@@ -1451,10 +1577,10 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 			 DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
 		 }
 
-		 sna_dri_copy(sna, draw, NULL,
-			      get_private(front)->bo,
-			      get_private(back)->bo,
-			      true);
+		 sna_dri_copy_to_front(sna, draw, NULL,
+				       get_private(front)->bo,
+				       get_private(back)->bo,
+				       true);
 		 return TRUE;
 	}
 
@@ -1540,10 +1666,10 @@ sna_dri_schedule_swap(ClientPtr client, DrawablePtr draw, DRI2BufferPtr front,
 
 blit_fallback:
 	DBG(("%s -- blit\n", __FUNCTION__));
-	sna_dri_copy(sna, draw, NULL,
-		     get_private(front)->bo,
-		     get_private(back)->bo,
-		     true);
+	sna_dri_copy_to_front(sna, draw, NULL,
+			      get_private(front)->bo,
+			      get_private(back)->bo,
+			      false);
 	if (info)
 		sna_dri_frame_event_info_free(info);
 	DRI2SwapComplete(client, draw, 0, 0, 0, DRI2_BLIT_COMPLETE, func, data);
@@ -1584,10 +1710,10 @@ sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 blit:
 		DBG(("%s: unable to flip, so blit\n", __FUNCTION__));
 
-		sna_dri_copy(sna, draw, NULL,
-			     get_private(front)->bo,
-			     get_private(back)->bo,
-			     false);
+		sna_dri_copy_to_front(sna, draw, NULL,
+				      get_private(front)->bo,
+				      get_private(back)->bo,
+				      false);
 		DRI2SwapComplete(client, draw, 0, 0, 0,
 				 DRI2_BLIT_COMPLETE, func, data);
 		return FALSE;
commit 324a1dffdc7dd896224fab265bd6a9cf99d7587f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 13:07:00 2012 +0000

    sna/gen3: Micro-optimise solid span emission
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index c567d6b..0991a98 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1803,7 +1803,7 @@ inline static int gen3_get_rectangles(struct sna *sna,
 
 start:
 	rem = vertex_space(sna);
-	if (op->floats_per_rect > rem) {
+	if (unlikely(op->floats_per_rect > rem)) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen3_get_rectangles__flush(sna, op);
@@ -3194,6 +3194,33 @@ gen3_emit_composite_spans_primitive(struct sna *sna,
 }
 
 fastcall static void
+gen3_render_composite_spans_constant_box(struct sna *sna,
+					 const struct sna_composite_spans_op *op,
+					 const BoxRec *box, float opacity)
+{
+	float *v;
+	DBG(("%s: src=+(%d, %d), opacity=%f, dst=+(%d, %d), box=(%d, %d) x (%d, %d)\n",
+	     __FUNCTION__,
+	     op->base.src.offset[0], op->base.src.offset[1],
+	     opacity,
+	     op->base.dst.x, op->base.dst.y,
+	     box->x1, box->y1,
+	     box->x2 - box->x1,
+	     box->y2 - box->y1));
+
+	gen3_get_rectangles(sna, &op->base, 1);
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 9;
+
+	v[0] = box->x2;
+	v[6] = v[3] = box->x1;
+	v[4] = v[1] = box->y2;
+	v[7] = box->y1;
+	v[8] = v[5] = v[2] = opacity;
+}
+
+fastcall static void
 gen3_render_composite_spans_box(struct sna *sna,
 				const struct sna_composite_spans_op *op,
 				const BoxRec *box, float opacity)
@@ -3328,6 +3355,9 @@ gen3_render_composite_spans(struct sna *sna,
 		tmp->base.mask.u.gen3.type = SHADER_OPACITY;
 
 	no_offset = tmp->base.dst.x == 0 && tmp->base.dst.y == 0;
+	tmp->box   = gen3_render_composite_spans_box;
+	tmp->boxes = gen3_render_composite_spans_boxes;
+	tmp->done  = gen3_render_composite_spans_done;
 	tmp->prim_emit = gen3_emit_composite_spans_primitive;
 	switch (tmp->base.src.u.gen3.type) {
 	case SHADER_NONE:
@@ -3338,7 +3368,11 @@ gen3_render_composite_spans(struct sna *sna,
 	case SHADER_BLACK:
 	case SHADER_WHITE:
 	case SHADER_CONSTANT:
-		tmp->prim_emit = no_offset ? gen3_emit_composite_spans_primitive_constant_no_offset : gen3_emit_composite_spans_primitive_constant;
+		if (no_offset) {
+			tmp->box = gen3_render_composite_spans_constant_box;
+			tmp->prim_emit = gen3_emit_composite_spans_primitive_constant_no_offset;
+		} else
+			tmp->prim_emit = gen3_emit_composite_spans_primitive_constant;
 		break;
 	case SHADER_LINEAR:
 	case SHADER_RADIAL:
@@ -3364,10 +3398,6 @@ gen3_render_composite_spans(struct sna *sna,
 		tmp->base.mask.u.gen3.type == SHADER_OPACITY;
 	tmp->base.floats_per_rect = 3 * tmp->base.floats_per_vertex;
 
-	tmp->box   = gen3_render_composite_spans_box;
-	tmp->boxes = gen3_render_composite_spans_boxes;
-	tmp->done  = gen3_render_composite_spans_done;
-
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
 			   NULL)) {
commit 47c47b85f6b7ef5cbc2192da9ef0cee91f6744ca
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 12:59:13 2012 +0000

    sna/traps: Micro-optimise span emission
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 4dab0f9..52be00b 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1141,6 +1141,18 @@ tor_blt_span(struct sna *sna,
 }
 
 static void
+tor_blt_span__no_damage(struct sna *sna,
+			struct sna_composite_spans_op *op,
+			pixman_region16_t *clip,
+			const BoxRec *box,
+			int coverage)
+{
+	__DBG(("%s: %d -> %d @ %d\n", __FUNCTION__, box->x1, box->x2, coverage));
+
+	op->box(sna, op, box, AREA_TO_ALPHA(coverage));
+}
+
+static void
 tor_blt_span_clipped(struct sna *sna,
 		     struct sna_composite_spans_op *op,
 		     pixman_region16_t *clip,
@@ -3101,7 +3113,8 @@ project_trapezoid_onto_grid(const xTrapezoid *in,
 }
 
 static span_func_t
-choose_span(PicturePtr dst,
+choose_span(struct sna_composite_spans_op *tmp,
+	    PicturePtr dst,
 	    PictFormatPtr maskFormat,
 	    uint8_t op,
 	    RegionPtr clip)
@@ -3120,9 +3133,12 @@ choose_span(PicturePtr dst,
 				span = tor_blt_span_mono_clipped;
 		}
 	} else {
-		span = tor_blt_span;
 		if (REGION_NUM_RECTS(clip) > 1)
 			span = tor_blt_span_clipped;
+		else if (tmp->base.damage == NULL)
+			span = tor_blt_span__no_damage;
+		else
+			span = tor_blt_span;
 	}
 
 	return span;
@@ -3379,7 +3395,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	tor_render(sna, &tor, &tmp, &clip,
-		   choose_span(dst, maskFormat, op, &clip),
+		   choose_span(&tmp, dst, maskFormat, op, &clip),
 		   maskFormat && !operator_is_bounded(op));
 
 skip:
@@ -4669,7 +4685,7 @@ trap_span_converter(PicturePtr dst,
 	}
 
 	tor_render(sna, &tor, &tmp, clip,
-		   choose_span(dst, NULL, PictOpAdd, clip), false);
+		   choose_span(&tmp, dst, NULL, PictOpAdd, clip), false);
 
 skip:
 	tor_fini(&tor);
@@ -5196,7 +5212,7 @@ triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	tor_render(sna, &tor, &tmp, &clip,
-		   choose_span(dst, maskFormat, op, &clip),
+		   choose_span(&tmp, dst, maskFormat, op, &clip),
 		   maskFormat && !operator_is_bounded(op));
 
 skip:
@@ -5559,7 +5575,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	assert(tor.polygon->num_edges <= 2*count);
 
 	tor_render(sna, &tor, &tmp, &clip,
-		   choose_span(dst, maskFormat, op, &clip),
+		   choose_span(&tmp, dst, maskFormat, op, &clip),
 		   maskFormat && !operator_is_bounded(op));
 
 skip:
commit 3a26437f195bf3e5d5913f5552a0b838cafb9b2c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 12:37:25 2012 +0000

    sna/traps: Tune nonzero_row
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c1d42a7..4dab0f9 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -591,20 +591,31 @@ cell_list_add_subspan(struct cell_list *cells,
 		cell->uncovered_area += 2*(fx1-fx2);
 }
 
-static void
-cell_list_render_edge(struct cell_list *cells, struct edge *edge, int sign)
+inline static void
+cell_list_add_span(struct cell_list *cells,
+		   grid_scaled_x_t x1,
+		   grid_scaled_x_t x2)
 {
 	struct cell *cell;
-	grid_scaled_x_t fx;
-	int ix;
+	int ix1, fx1;
+	int ix2, fx2;
+
+	FAST_SAMPLES_X_TO_INT_FRAC(x1, ix1, fx1);
+	FAST_SAMPLES_X_TO_INT_FRAC(x2, ix2, fx2);
+
+	__DBG(("%s: x1=%d (%d+%d), x2=%d (%d+%d)\n", __FUNCTION__,
+	       x1, ix1, fx1, x2, ix2, fx2));
 
-	FAST_SAMPLES_X_TO_INT_FRAC(edge->x.quo, ix, fx);
+	cell = cell_list_find(cells, ix1);
+	if (ix1 != ix2) {
+		cell->uncovered_area += 2*fx1;
+		cell->covered_height += FAST_SAMPLES_Y;
 
-	/* We always know that ix1 is >= the cell list cursor in this
-	 * case due to the no-intersections precondition.  */
-	cell = cell_list_find(cells, ix);
-	cell->covered_height += sign*FAST_SAMPLES_Y;
-	cell->uncovered_area += sign*2*fx*FAST_SAMPLES_Y;
+		cell = cell_list_find(cells, ix2);
+		cell->uncovered_area -= 2*fx2;
+		cell->covered_height -= FAST_SAMPLES_Y;
+	} else
+		cell->uncovered_area += 2*(fx1-fx2);
 }
 
 static void
@@ -1054,9 +1065,7 @@ nonzero_row(struct active_list *active, struct cell_list *coverages)
 			right = right->next;
 		} while (1);
 
-		cell_list_render_edge(coverages, left, +1);
-		cell_list_render_edge(coverages, right, -1);
-
+		cell_list_add_span(coverages, left->x.quo, right->x.quo);
 		left = right->next;
 	}
 }
commit cb04cf9f4395c258987faead80de5c3a2c93082e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 09:28:24 2012 +0000

    sna/traps: Make the inline u8 arithmetic more robust
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index dca9d1f..c1d42a7 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3553,6 +3553,19 @@ struct inplace {
 	uint8_t opacity;
 };
 
+static inline uint8_t
+mul_8_8(uint8_t a, uint8_t b)
+{
+    uint16_t t = a * (uint16_t)b + 0x7f;
+    return ((t >> 8) + t) >> 8;
+}
+
+static uint8_t coverage_opacity(int coverage, uint8_t opacity)
+{
+	coverage = coverage * 256 / FAST_SAMPLES_XY;
+	return mul_8_8(coverage - (coverage >> 8), opacity);
+}
+
 static void
 tor_blt_src(struct sna *sna,
 	    struct sna_composite_spans_op *op,
@@ -3564,7 +3577,7 @@ tor_blt_src(struct sna *sna,
 	uint8_t *ptr = in->ptr;
 	int h, w;
 
-	coverage = coverage * in->opacity / FAST_SAMPLES_XY;
+	coverage = coverage_opacity(coverage, in->opacity);
 
 	ptr += box->y1 * in->stride + box->x1;
 
@@ -3613,19 +3626,22 @@ tor_blt_in(struct sna *sna,
 	uint8_t *ptr = in->ptr;
 	int h, w, i;
 
-	coverage = coverage * in->opacity / FAST_SAMPLES_XY;
 	if (coverage == 0) {
 		tor_blt_src(sna, op, clip, box, 0);
 		return;
 	}
 
+	coverage = coverage_opacity(coverage, in->opacity);
+	if (coverage == 0xff)
+		return;
+
 	ptr += box->y1 * in->stride + box->x1;
 
 	h = box->y2 - box->y1;
 	w = box->x2 - box->x1;
 	do {
 		for (i = 0; i < w; i++)
-			ptr[i] = (ptr[i] * coverage) >> 8;
+			ptr[i] = mul_8_8(ptr[i], coverage);
 		ptr += in->stride;
 	} while (--h);
 }
@@ -3660,10 +3676,15 @@ tor_blt_add(struct sna *sna,
 	uint8_t *ptr = in->ptr;
 	int h, w, v, i;
 
-	coverage = coverage * in->opacity / FAST_SAMPLES_XY;
 	if (coverage == 0)
 		return;
 
+	coverage = coverage_opacity(coverage, in->opacity);
+	if (coverage == 0xff) {
+		tor_blt_src(sna, op, clip, box, 0xff);
+		return;
+	}
+
 	ptr += box->y1 * in->stride + box->x1;
 
 	h = box->y2 - box->y1;
@@ -4057,10 +4078,14 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	case PictOpAdd:
 		if (priv->clear && priv->clear_color == 0)
 			op = PictOpSrc;
+		if ((color >> 24) == 0)
+			return true;
 		break;
 	case PictOpIn:
 		if (priv->clear && priv->clear_color == 0)
 			return true;
+		if ((color >> 24) == 0)
+			return true;
 	case PictOpSrc:
 		break;
 	default:
commit d55bbdf42c75eb0fdb634e963ce81953583964b9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 16 09:18:29 2012 +0000

    sna/traps: Remove the old paths for mono inplace traps
    
    Dead code elimination.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 907ece0..dca9d1f 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3603,28 +3603,6 @@ tor_blt_src_clipped(struct sna *sna,
 }
 
 static void
-tor_blt_src_mono(struct sna *sna,
-		 struct sna_composite_spans_op *op,
-		 pixman_region16_t *clip,
-		 const BoxRec *box,
-		 int coverage)
-{
-	tor_blt_src(sna, op, clip, box,
-		    coverage < FAST_SAMPLES_XY/2 ? 0 : FAST_SAMPLES_XY);
-}
-
-static void
-tor_blt_src_clipped_mono(struct sna *sna,
-			 struct sna_composite_spans_op *op,
-			 pixman_region16_t *clip,
-			 const BoxRec *box,
-			 int coverage)
-{
-	tor_blt_src_clipped(sna, op, clip, box,
-			    coverage < FAST_SAMPLES_XY/2 ? 0 : FAST_SAMPLES_XY);
-}
-
-static void
 tor_blt_in(struct sna *sna,
 	   struct sna_composite_spans_op *op,
 	   pixman_region16_t *clip,
@@ -3672,28 +3650,6 @@ tor_blt_in_clipped(struct sna *sna,
 }
 
 static void
-tor_blt_in_mono(struct sna *sna,
-		struct sna_composite_spans_op *op,
-		pixman_region16_t *clip,
-		const BoxRec *box,
-		int coverage)
-{
-	tor_blt_in(sna, op, clip, box,
-		   coverage < FAST_SAMPLES_XY/2 ? 0 : FAST_SAMPLES_XY);
-}
-
-static void
-tor_blt_in_clipped_mono(struct sna *sna,
-			struct sna_composite_spans_op *op,
-			pixman_region16_t *clip,
-			const BoxRec *box,
-			int coverage)
-{
-	tor_blt_in_clipped(sna, op, clip, box,
-			   coverage < FAST_SAMPLES_XY/2 ? 0 : FAST_SAMPLES_XY);
-}
-
-static void
 tor_blt_add(struct sna *sna,
 	    struct sna_composite_spans_op *op,
 	    pixman_region16_t *clip,
@@ -3745,28 +3701,6 @@ tor_blt_add_clipped(struct sna *sna,
 	pixman_region_fini(&region);
 }
 
-static void
-tor_blt_add_mono(struct sna *sna,
-		 struct sna_composite_spans_op *op,
-		 pixman_region16_t *clip,
-		 const BoxRec *box,
-		 int coverage)
-{
-	if (coverage >= FAST_SAMPLES_XY/2)
-		tor_blt_add(sna, op, clip, box, FAST_SAMPLES_XY);
-}
-
-static void
-tor_blt_add_clipped_mono(struct sna *sna,
-			 struct sna_composite_spans_op *op,
-			 pixman_region16_t *clip,
-			 const BoxRec *box,
-			 int coverage)
-{
-	if (coverage >= FAST_SAMPLES_XY/2)
-		tor_blt_add_clipped(sna, op, clip, box, FAST_SAMPLES_XY);
-}
-
 struct mono_inplace_composite {
 	pixman_image_t *src, *dst;
 	int dx, dy;
@@ -4196,41 +4130,21 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	}
 
 	if (op == PictOpSrc) {
-		if (dst->pCompositeClip->data) {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_src_clipped_mono;
-			else
-				span = tor_blt_src_clipped;
-		} else {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_src_mono;
-			else
-				span = tor_blt_src;
-		}
+		if (dst->pCompositeClip->data)
+			span = tor_blt_src_clipped;
+		else
+			span = tor_blt_src;
 	} else if (op == PictOpIn) {
-		if (dst->pCompositeClip->data) {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_in_clipped_mono;
-			else
-				span = tor_blt_in_clipped;
-		} else {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_in_mono;
-			else
-				span = tor_blt_in;
-		}
+		if (dst->pCompositeClip->data)
+			span = tor_blt_in_clipped;
+		else
+			span = tor_blt_in;
 	} else {
-		if (dst->pCompositeClip->data) {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_add_clipped_mono;
-			else
-				span = tor_blt_add_clipped;
-		} else {
-			if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-				span = tor_blt_add_mono;
-			else
-				span = tor_blt_add;
-		}
+		assert(op == PictOpAdd);
+		if (dst->pCompositeClip->data)
+			span = tor_blt_add_clipped;
+		else
+			span = tor_blt_add;
 	}
 
 	DBG(("%s: move-to-cpu\n", __FUNCTION__));
commit fe4ad66aadcfa0c907ba6460a88c49e65225602c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 21:33:00 2012 +0000

    sna: Correct the order of screen private teardown
    
    So that we do not keep a stale references of the last cached pixmap
    across server regeneration (or shutdown).
    
    Reported-by: Thierry Reding <thierry.reding at avionic-design.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47357
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 22ee1e6..c7f041e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -12082,6 +12082,10 @@ void sna_accel_watch_flush(struct sna *sna, int enable)
 
 void sna_accel_close(struct sna *sna)
 {
+	sna_composite_close(sna);
+	sna_gradients_close(sna);
+	sna_glyphs_close(sna);
+
 	if (sna->freed_pixmap) {
 		assert(sna->freed_pixmap->refcnt == 1);
 		free(sna_pixmap(sna->freed_pixmap));
@@ -12089,10 +12093,6 @@ void sna_accel_close(struct sna *sna)
 		sna->freed_pixmap = NULL;
 	}
 
-	sna_composite_close(sna);
-	sna_gradients_close(sna);
-	sna_glyphs_close(sna);
-
 	DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
 
 	kgem_cleanup_cache(&sna->kgem);
commit 0a8218a535babb5969a58c3a7da0215912f6fef8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 13:20:35 2012 +0000

    sna: Prefer the CPU bo for uploads if last access was not through the shadow
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 03ac400..22ee1e6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1745,7 +1745,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 		if (n) {
 			Bool ok = FALSE;
 
-			if (use_cpu_bo_for_xfer(priv))
+			if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1783,7 +1783,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		Bool ok = FALSE;
-		if (use_cpu_bo_for_xfer(priv))
+		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -1812,7 +1812,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 		box = REGION_RECTS(&i);
 		ok = FALSE;
-		if (use_cpu_bo_for_xfer(priv))
+		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -2283,7 +2283,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
 		ok = FALSE;
-		if (use_cpu_bo_for_xfer(priv))
+		if (pixmap->devPrivate.ptr == NULL || use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
commit 77c9c03b102e6fac2279f0085034557bd68c5112
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 13:18:43 2012 +0000

    sna: Fixup the shadow pixmap pointer for move-to-gpu
    
    If we choose not to use the CPU bo for the upload (because we fear the
    subsequent synchronisation cost), we need to fixup the shadow pointer
    before dereferencing it.
    
    On the move-to-cpu side, the fixup is already performed as we will need
    to access the shadow pixels for the subsequent drawing operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 63afaaa..03ac400 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1751,6 +1751,11 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 							    pixmap, priv->gpu_bo, 0, 0,
 							    box, n);
 			if (!ok) {
+				if (pixmap->devPrivate.ptr == NULL) {
+					assert(priv->stride && priv->ptr);
+					pixmap->devPrivate.ptr = priv->ptr;
+					pixmap->devKind = priv->stride;
+				}
 				if (n == 1 && !priv->pinned &&
 				    box->x1 <= 0 && box->y1 <= 0 &&
 				    box->x2 >= pixmap->drawable.width &&
@@ -1783,13 +1788,19 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, 1);
-		if (!ok)
+		if (!ok) {
+			if (pixmap->devPrivate.ptr == NULL) {
+				assert(priv->stride && priv->ptr);
+				pixmap->devPrivate.ptr = priv->ptr;
+				pixmap->devKind = priv->stride;
+			}
 			ok = sna_write_boxes(sna, pixmap,
 					     priv->gpu_bo, 0, 0,
 					     pixmap->devPrivate.ptr,
 					     pixmap->devKind,
 					     0, 0,
 					     box, 1);
+		}
 		if (!ok)
 			return false;
 
@@ -1806,13 +1817,19 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n);
-		if (!ok)
+		if (!ok) {
+			if (pixmap->devPrivate.ptr == NULL) {
+				assert(priv->stride && priv->ptr);
+				pixmap->devPrivate.ptr = priv->ptr;
+				pixmap->devKind = priv->stride;
+			}
 			ok = sna_write_boxes(sna, pixmap,
 					     priv->gpu_bo, 0, 0,
 					     pixmap->devPrivate.ptr,
 					     pixmap->devKind,
 					     0, 0,
 					     box, n);
+		}
 		if (!ok)
 			return false;
 
@@ -2272,7 +2289,11 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n);
 		if (!ok) {
-			assert(pixmap->devPrivate.ptr != NULL);
+			if (pixmap->devPrivate.ptr == NULL) {
+				assert(priv->stride && priv->ptr);
+				pixmap->devPrivate.ptr = priv->ptr;
+				pixmap->devKind = priv->stride;
+			}
 			if (n == 1 && !priv->pinned &&
 			    (box->x2 - box->x1) >= pixmap->drawable.width &&
 			    (box->y2 - box->y1) >= pixmap->drawable.height) {
commit 0e2fbb6a1c1ad6b0971d07a91a790b0b3236dad3
Merge: 64ccc66... 84d7a82...
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 12:43:47 2012 +0000

    uxa: Merge 'remove complicated second level caching'
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47345
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

commit 84d7a82122530a62ab9a2f4bba3044c23a0db9b0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 10:03:23 2012 +0000

    uxa: Simplify allocation of backing pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 446befd..0b1a369 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -640,6 +640,9 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 
 		dri_bo_unreference(priv->bo);
 		list_del(&priv->batch);
+
+		free(priv);
+		priv = NULL;
 	}
 
 	if (bo != NULL) {
@@ -647,13 +650,11 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 		uint32_t swizzle_mode;
 		int ret;
 
-		if (priv == NULL) {
-			priv = calloc(1, sizeof (struct intel_pixmap));
-			if (priv == NULL)
-				goto BAIL;
+		priv = calloc(1, sizeof (struct intel_pixmap));
+		if (priv == NULL)
+			goto BAIL;
 
-			list_init(&priv->batch);
-		}
+		list_init(&priv->batch);
 
 		dri_bo_reference(bo);
 		priv->bo = bo;
@@ -668,11 +669,6 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 		priv->tiling = tiling;
 		priv->busy = -1;
 		priv->offscreen = 1;
-	} else {
-		if (priv != NULL) {
-			free(priv);
-			priv = NULL;
-		}
 	}
 
   BAIL:
commit 1b1016624ad16cf5063e88a1d422e28865a9e87f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 09:31:24 2012 +0000

    uxa/i915: Remove broken CA pass, fallback to magic 2-pass composite helper
    
    The backend failed to handle all the corner cases, so remove the
    complication.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index ea20322..c6d5ed7 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -694,7 +694,6 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 	if (!intel_get_aperture_space(scrn, bo_table, ARRAY_SIZE(bo_table)))
 		return FALSE;
 
-	intel->needs_render_ca_pass = FALSE;
 	if (mask_picture != NULL && mask_picture->componentAlpha &&
 	    PICT_FORMAT_RGB(mask_picture->format)) {
 		/* Check if it's component alpha that relies on a source alpha
@@ -702,12 +701,8 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 		 * into the single source value that we get to blend with.
 		 */
 		if (i915_blend_op[op].src_alpha &&
-		    (i915_blend_op[op].src_blend != BLENDFACT_ZERO)) {
-			if (op != PictOpOver)
-				return FALSE;
-
-			intel->needs_render_ca_pass = TRUE;
-		}
+		    (i915_blend_op[op].src_blend != BLENDFACT_ZERO))
+			return FALSE;
 	}
 
 	intel->transform[0] = NULL;
@@ -944,18 +939,12 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 					   TEXCOORDFMT_2D : TEXCOORDFMT_4D);
 		}
 
-		if (intel->needs_render_ca_pass) {
-			OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(2) | 0);
-			OUT_BATCH(ss2);
-		} else {
-			OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(2) | I1_LOAD_S(6) | 1);
-			OUT_BATCH(ss2);
-			OUT_BATCH(i915_get_blend_cntl(op, mask_picture, dest_picture->format));
-		}
+		OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(2) | I1_LOAD_S(6) | 1);
+		OUT_BATCH(ss2);
+		OUT_BATCH(i915_get_blend_cntl(op, mask_picture, dest_picture->format));
 	}
 
-	if (! intel->needs_render_ca_pass)
-		i915_composite_emit_shader(intel, op);
+	i915_composite_emit_shader(intel, op);
 }
 
 void
@@ -1000,14 +989,6 @@ i915_composite(PixmapPtr dest, int srcX, int srcY, int maskX, int maskY,
 	}
 
 	if (intel->prim_offset == 0) {
-		if (intel->needs_render_ca_pass) {
-			OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(6) | 0);
-			OUT_BATCH(i915_get_blend_cntl(PictOpOutReverse,
-						      intel->render_mask_picture,
-						      intel->render_dest_picture->format));
-			i915_composite_emit_shader(intel, PictOpOutReverse);
-		}
-
 		intel->prim_offset = intel->batch_used;
 		OUT_BATCH(PRIM3D_RECTLIST | PRIM3D_INDIRECT_SEQUENTIAL);
 		OUT_BATCH(intel->vertex_index);
@@ -1032,16 +1013,6 @@ i915_vertex_flush(intel_screen_private *intel)
 	intel->batch_ptr[intel->prim_offset] |= intel->vertex_count;
 	intel->prim_offset = 0;
 
-	if (intel->needs_render_ca_pass) {
-		OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(6) | 0);
-		OUT_BATCH(i915_get_blend_cntl(PictOpAdd,
-					      intel->render_mask_picture,
-					      intel->render_dest_picture->format));
-		i915_composite_emit_shader(intel, PictOpAdd);
-		OUT_BATCH(PRIM3D_RECTLIST | PRIM3D_INDIRECT_SEQUENTIAL | intel->vertex_count);
-		OUT_BATCH(intel->vertex_index);
-	}
-
 	intel->vertex_index += intel->vertex_count;
 	intel->vertex_count = 0;
 }
diff --git a/src/intel.h b/src/intel.h
index c6fafb2..f806aea 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -272,7 +272,6 @@ typedef struct intel_screen_private {
 	Bool needs_3d_invariant;
 	Bool needs_render_state_emit;
 	Bool needs_render_vertex_emit;
-	Bool needs_render_ca_pass;
 
 	/* i830 render accel state */
 	uint32_t render_dest_format;
commit 895a56a553e1386323a593063e5b7fbb7a410abe
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 09:28:27 2012 +0000

    uxa: Remove unused render_mask_solid members
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index b7793ee..c6fafb2 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -268,9 +268,7 @@ typedef struct intel_screen_private {
 	PixmapPtr render_source, render_mask, render_dest;
 	PicturePtr render_source_picture, render_mask_picture, render_dest_picture;
 	CARD32 render_source_solid;
-	CARD32 render_mask_solid;
 	Bool render_source_is_solid;
-	Bool render_mask_is_solid;
 	Bool needs_3d_invariant;
 	Bool needs_render_state_emit;
 	Bool needs_render_vertex_emit;
commit 9c6f79440e8af9f264e3f6270384064dc1f44721
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 09:27:36 2012 +0000

    uxa: Remove unused tracking of the current render target
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index 9d8b8ac..ea20322 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -899,7 +899,7 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 	 * XXX However for reasons unfathomed, correct rendering in KDE requires
 	 * at least a MI_FLUSH | INHIBIT_RENDER_CACHE_FLUSH here.
 	 */
-	if (1 || dest != intel->render_current_dest) {
+	if (1) {
 		uint32_t tiling_bits;
 
 		if (intel_pixmap_tiled(dest)) {
@@ -927,8 +927,6 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 			  DRAW_XMAX(dest->drawable.width - 1));
 		/* yorig, xorig (relate to color buffer?) */
 		OUT_BATCH(0x00000000);
-
-		intel->render_current_dest = dest;
 	}
 
 	{
@@ -1052,6 +1050,5 @@ void
 i915_batch_commit_notify(intel_screen_private *intel)
 {
 	intel->needs_render_state_emit = TRUE;
-	intel->render_current_dest = NULL;
 	intel->last_floats_per_vertex = 0;
 }
diff --git a/src/intel.h b/src/intel.h
index ef00a01..b7793ee 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -269,7 +269,6 @@ typedef struct intel_screen_private {
 	PicturePtr render_source_picture, render_mask_picture, render_dest_picture;
 	CARD32 render_source_solid;
 	CARD32 render_mask_solid;
-	PixmapPtr render_current_dest;
 	Bool render_source_is_solid;
 	Bool render_mask_is_solid;
 	Bool needs_3d_invariant;
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index c0e1183..446befd 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -628,12 +628,9 @@ dri_bo *intel_get_pixmap_bo(PixmapPtr pixmap)
 
 void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 {
-	ScrnInfoPtr scrn = xf86Screens[pixmap->drawable.pScreen->myNum];
-	intel_screen_private *intel = intel_get_screen_private(scrn);
 	struct intel_pixmap *priv;
 
 	priv = intel_get_pixmap_private(pixmap);
-
 	if (priv == NULL && bo == NULL)
 	    return;
 
@@ -643,9 +640,6 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 
 		dri_bo_unreference(priv->bo);
 		list_del(&priv->batch);
-
-		if (intel->render_current_dest == pixmap)
-		    intel->render_current_dest = NULL;
 	}
 
 	if (bo != NULL) {
@@ -1261,7 +1255,6 @@ Bool intel_uxa_init(ScreenPtr screen)
 	intel->uxa_driver->uxa_major = 1;
 	intel->uxa_driver->uxa_minor = 0;
 
-	intel->render_current_dest = NULL;
 	intel->prim_offset = 0;
 	intel->vertex_count = 0;
 	intel->vertex_offset = 0;
commit 219467ac8bfab98bca82108b22eae8af3fc0bf36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 09:26:10 2012 +0000

    uxa: Simplify flush tracking
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i830_render.c b/src/i830_render.c
index 3f3d2ef..c12e87b 100644
--- a/src/i830_render.c
+++ b/src/i830_render.c
@@ -563,8 +563,7 @@ i830_prepare_composite(int op, PicturePtr source_picture,
 		intel->s8_blendctl = blendctl;
 	}
 
-	if(intel_pixmap_is_dirty(source) ||
-	   (mask && intel_pixmap_is_dirty(mask)))
+	if (intel_pixmap_is_dirty(source) || intel_pixmap_is_dirty(mask))
 		intel_batch_emit_flush(scrn);
 
 	intel->needs_render_state_emit = TRUE;
diff --git a/src/i915_render.c b/src/i915_render.c
index 6210035..9d8b8ac 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -743,11 +743,7 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 
 	intel->i915_render_state.op = op;
 
-	/* BUF_INFO is an implicit flush */
-	if (dest != intel->render_current_dest)
-		intel_batch_do_flush(scrn);
-	else if((source && intel_pixmap_is_dirty(source)) ||
-		(mask && intel_pixmap_is_dirty(mask)))
+	if (intel_pixmap_is_dirty(source) || intel_pixmap_is_dirty(mask))
 		intel_batch_emit_flush(scrn);
 
 	intel->needs_render_state_emit = TRUE;
@@ -906,8 +902,6 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 	if (1 || dest != intel->render_current_dest) {
 		uint32_t tiling_bits;
 
-		intel_batch_do_flush(scrn);
-
 		if (intel_pixmap_tiled(dest)) {
 			tiling_bits = BUF_3D_TILED_SURFACE;
 			if (intel_get_pixmap_private(dest)->tiling
diff --git a/src/i965_render.c b/src/i965_render.c
index 8907139..b981ecc 100644
--- a/src/i965_render.c
+++ b/src/i965_render.c
@@ -2035,8 +2035,7 @@ i965_prepare_composite(int op, PicturePtr source_picture,
 	}
 
 	/* Flush any pending writes prior to relocating the textures. */
-	if (intel_pixmap_is_dirty(source) ||
-	    (mask && intel_pixmap_is_dirty(mask)))
+	if (intel_pixmap_is_dirty(source) || intel_pixmap_is_dirty(mask))
 		intel_batch_emit_flush(scrn);
 
 	composite_op->op = op;
diff --git a/src/intel.h b/src/intel.h
index e1c2bb5..ef00a01 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -82,12 +82,12 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 struct intel_pixmap {
 	dri_bo *bo;
 
-	struct list flush, batch;
+	struct list batch;
 
 	uint16_t stride;
 	uint8_t tiling;
 	int8_t busy :2;
-	int8_t batch_write :1;
+	int8_t dirty :1;
 	int8_t offscreen :1;
 	int8_t pinned :1;
 };
@@ -121,7 +121,7 @@ static inline void intel_set_pixmap_private(PixmapPtr pixmap, struct intel_pixma
 
 static inline Bool intel_pixmap_is_dirty(PixmapPtr pixmap)
 {
-	return !list_is_empty(&intel_get_pixmap_private(pixmap)->flush);
+	return pixmap && intel_get_pixmap_private(pixmap)->dirty;
 }
 
 static inline Bool intel_pixmap_tiled(PixmapPtr pixmap)
@@ -188,7 +188,6 @@ typedef struct intel_screen_private {
 	/** Ending batch_used that was verified by intel_start_batch_atomic() */
 	int batch_atomic_limit;
 	struct list batch_pixmaps;
-	struct list flush_pixmaps;
 	drm_intel_bo *wa_scratch_bo;
 	OsTimerPtr cache_expire;
 
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index 8e54d3a..2719c38 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -114,17 +114,15 @@ void intel_batch_teardown(ScrnInfoPtr scrn)
 
 	while (!list_is_empty(&intel->batch_pixmaps))
 		list_del(intel->batch_pixmaps.next);
-
-	while (!list_is_empty(&intel->flush_pixmaps))
-		list_del(intel->flush_pixmaps.next);
 }
 
-void intel_batch_do_flush(ScrnInfoPtr scrn)
+static void intel_batch_do_flush(ScrnInfoPtr scrn)
 {
 	intel_screen_private *intel = intel_get_screen_private(scrn);
+	struct intel_pixmap *priv;
 
-	while (!list_is_empty(&intel->flush_pixmaps))
-		list_del(intel->flush_pixmaps.next);
+	list_for_each_entry(priv, &intel->batch_pixmaps, batch)
+		priv->dirty = 0;
 }
 
 static void intel_emit_post_sync_nonzero_flush(ScrnInfoPtr scrn)
@@ -268,13 +266,10 @@ void intel_batch_submit(ScrnInfoPtr scrn)
 					 batch);
 
 		entry->busy = -1;
-		entry->batch_write = 0;
+		entry->dirty = 0;
 		list_del(&entry->batch);
 	}
 
-	while (!list_is_empty(&intel->flush_pixmaps))
-		list_del(intel->flush_pixmaps.next);
-
 	if (intel->debug_flush & DEBUG_FLUSH_WAIT)
 		drm_intel_bo_wait_rendering(intel->batch_bo);
 
diff --git a/src/intel_batchbuffer.h b/src/intel_batchbuffer.h
index f5f118e..b2bb390 100644
--- a/src/intel_batchbuffer.h
+++ b/src/intel_batchbuffer.h
@@ -36,7 +36,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 void intel_batch_init(ScrnInfoPtr scrn);
 void intel_batch_teardown(ScrnInfoPtr scrn);
 void intel_batch_emit_flush(ScrnInfoPtr scrn);
-void intel_batch_do_flush(ScrnInfoPtr scrn);
 void intel_batch_submit(ScrnInfoPtr scrn);
 
 static inline int intel_batch_space(intel_screen_private *intel)
@@ -132,10 +131,8 @@ intel_batch_mark_pixmap_domains(intel_screen_private *intel,
 
 	if (list_is_empty(&priv->batch))
 		list_add(&priv->batch, &intel->batch_pixmaps);
-	if (write_domain && list_is_empty(&priv->flush))
-		list_add(&priv->flush, &intel->flush_pixmaps);
 
-	priv->batch_write |= write_domain != 0;
+	priv->dirty |= write_domain != 0;
 	priv->busy = 1;
 
 	intel->needs_flush |= write_domain != 0;
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 606496b..e4dd26b 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -402,7 +402,6 @@ static int intel_init_bufmgr(intel_screen_private *intel)
 	drm_intel_bufmgr_gem_enable_fenced_relocs(intel->bufmgr);
 
 	list_init(&intel->batch_pixmaps);
-	list_init(&intel->flush_pixmaps);
 
 	if ((INTEL_INFO(intel)->gen == 60)) {
 		intel->wa_scratch_bo =
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 7fb5a96..c0e1183 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -643,7 +643,6 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 
 		dri_bo_unreference(priv->bo);
 		list_del(&priv->batch);
-		list_del(&priv->flush);
 
 		if (intel->render_current_dest == pixmap)
 		    intel->render_current_dest = NULL;
@@ -660,7 +659,6 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 				goto BAIL;
 
 			list_init(&priv->batch);
-			list_init(&priv->flush);
 		}
 
 		dri_bo_reference(bo);
@@ -710,8 +708,7 @@ static Bool intel_uxa_prepare_access(PixmapPtr pixmap, uxa_access_t access)
 
 	/* When falling back to swrast, flush all pending operations */
 	intel_glamor_flush(intel);
-	if (!list_is_empty(&priv->batch) &&
-	    (access == UXA_ACCESS_RW || priv->batch_write))
+	if (access == UXA_ACCESS_RW || priv->dirty)
 		intel_batch_submit(scrn);
 
 	assert(bo->size <= intel->max_gtt_map_size);
@@ -1105,7 +1102,6 @@ intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 		priv->offscreen = 1;
 
 		list_init(&priv->batch);
-		list_init(&priv->flush);
 		intel_set_pixmap_private(pixmap, priv);
 
 		screen->ModifyPixmapHeader(pixmap, w, h, 0, 0, stride, NULL);
commit bd8fafe0c48df7f138459f590a0e9e8d0c3267b7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Mar 10 09:10:16 2012 +0000

    uxa: Kill the complicated in-flight tracking
    
    Reference leak hunting.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index 69f7c72..e1c2bb5 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -82,7 +82,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 struct intel_pixmap {
 	dri_bo *bo;
 
-	struct list flush, batch, in_flight;
+	struct list flush, batch;
 
 	uint16_t stride;
 	uint8_t tiling;
@@ -189,7 +189,6 @@ typedef struct intel_screen_private {
 	int batch_atomic_limit;
 	struct list batch_pixmaps;
 	struct list flush_pixmaps;
-	struct list in_flight;
 	drm_intel_bo *wa_scratch_bo;
 	OsTimerPtr cache_expire;
 
diff --git a/src/intel_batchbuffer.c b/src/intel_batchbuffer.c
index 2b8fbb6..8e54d3a 100644
--- a/src/intel_batchbuffer.c
+++ b/src/intel_batchbuffer.c
@@ -117,18 +117,6 @@ void intel_batch_teardown(ScrnInfoPtr scrn)
 
 	while (!list_is_empty(&intel->flush_pixmaps))
 		list_del(intel->flush_pixmaps.next);
-
-	while (!list_is_empty(&intel->in_flight)) {
-		struct intel_pixmap *entry;
-
-		entry = list_first_entry(&intel->in_flight,
-					 struct intel_pixmap,
-					 in_flight);
-
-		dri_bo_unreference(entry->bo);
-		list_del(&entry->in_flight);
-		free(entry);
-	}
 }
 
 void intel_batch_do_flush(ScrnInfoPtr scrn)
@@ -287,18 +275,6 @@ void intel_batch_submit(ScrnInfoPtr scrn)
 	while (!list_is_empty(&intel->flush_pixmaps))
 		list_del(intel->flush_pixmaps.next);
 
-	while (!list_is_empty(&intel->in_flight)) {
-		struct intel_pixmap *entry;
-
-		entry = list_first_entry(&intel->in_flight,
-					 struct intel_pixmap,
-					 in_flight);
-
-		dri_bo_unreference(entry->bo);
-		list_del(&entry->in_flight);
-		free(entry);
-	}
-
 	if (intel->debug_flush & DEBUG_FLUSH_WAIT)
 		drm_intel_bo_wait_rendering(intel->batch_bo);
 
diff --git a/src/intel_driver.c b/src/intel_driver.c
index e2e43fa..606496b 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -403,7 +403,6 @@ static int intel_init_bufmgr(intel_screen_private *intel)
 
 	list_init(&intel->batch_pixmaps);
 	list_init(&intel->flush_pixmaps);
-	list_init(&intel->in_flight);
 
 	if ((INTEL_INFO(intel)->gen == 60)) {
 		intel->wa_scratch_bo =
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index ed4f375..7fb5a96 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -641,16 +641,9 @@ void intel_set_pixmap_bo(PixmapPtr pixmap, dri_bo * bo)
 		if (priv->bo == bo)
 			return;
 
-		if (list_is_empty(&priv->batch)) {
-			dri_bo_unreference(priv->bo);
-		} else if (!drm_intel_bo_is_reusable(priv->bo)) {
-			dri_bo_unreference(priv->bo);
-			list_del(&priv->batch);
-			list_del(&priv->flush);
-		} else {
-			list_add(&priv->in_flight, &intel->in_flight);
-			priv = NULL;
-		}
+		dri_bo_unreference(priv->bo);
+		list_del(&priv->batch);
+		list_del(&priv->flush);
 
 		if (intel->render_current_dest == pixmap)
 		    intel->render_current_dest = NULL;
@@ -1088,45 +1081,6 @@ intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 		if (size > intel->max_bo_size || stride >= KB(32))
 			goto fallback_pixmap;
 
-		/* Perform a preliminary search for an in-flight bo */
-		if (usage != UXA_CREATE_PIXMAP_FOR_MAP) {
-			int aligned_h;
-
-			if (tiling == I915_TILING_X)
-				aligned_h = ALIGN(h, 8);
-			else if (tiling == I915_TILING_Y)
-				aligned_h = ALIGN(h, 32);
-			else
-				aligned_h = ALIGN(h, 2);
-
-			list_for_each_entry(priv, &intel->in_flight, in_flight) {
-				if (priv->tiling != tiling)
-					continue;
-
-				if (tiling == I915_TILING_NONE) {
-				    if (priv->bo->size < size)
-					    continue;
-
-					priv->stride = stride;
-				} else {
-					if (priv->stride < stride ||
-					    priv->bo->size < priv->stride * aligned_h)
-						continue;
-
-					stride = priv->stride;
-				}
-
-				list_del(&priv->in_flight);
-				intel_set_pixmap_private(pixmap, priv);
-
-				screen->ModifyPixmapHeader(pixmap, w, h, 0, 0, stride, NULL);
-
-				if (!intel_glamor_create_textured_pixmap(pixmap))
-					goto fallback_glamor;
-				return pixmap;
-			}
-		}
-
 		priv = calloc(1, sizeof (struct intel_pixmap));
 		if (priv == NULL)
 			goto fallback_pixmap;
commit 64ccc6698def517fc37e9ba3c41715626df0e3ca
Author: Thierry Reding <thierry.reding at avionic-design.de>
Date:   Thu Mar 15 13:10:20 2012 +0100

    configure: Keep passed-in CFLAGS for DRI tests
    
    When the user passes extra CFLAGS and CPPFLAGS to the configure script,
    they should be kept when performing subsequent checks with additional
    flags. This is required to properly build in cross-compilation setups
    where the user may pass in flags like --sysroot in order to pick up the
    cross-built dependencies.
    
    Signed-off-by: Thierry Reding <thierry.reding at avionic-design.de>

diff --git a/configure.ac b/configure.ac
index 03a35bd..7ca3075 100644
--- a/configure.ac
+++ b/configure.ac
@@ -192,8 +192,8 @@ sdkdir=`$PKG_CONFIG --variable=sdkdir xorg-server`
 if test "x$enable_dri" != "xno"; then
         save_CFLAGS="$CFLAGS"
         save_CPPFLAGS="$CPPFLAGS"
-        CFLAGS="$XORG_CFLAGS $DRI_CFLAGS $DRM_CFLAGS"
-        CPPFLAGS="$XORG_CFLAGS $DRI_CFLAGS $DRM_CFLAGS"
+        CFLAGS="$CFLAGS $XORG_CFLAGS $DRI_CFLAGS $DRM_CFLAGS"
+        CPPFLAGS="$CPPFLAGS $XORG_CFLAGS $DRI_CFLAGS $DRM_CFLAGS"
         AC_CHECK_HEADERS([dri.h sarea.h dristruct.h],, [DRI=no],
                 [/* for dri.h */
                  #include <xf86str.h>
commit c5c61ef378cad3c8259631138ea1e03eb02036af
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 12:38:22 2012 +0000

    sna/traps: dst IN WHITE does not reduce to SRC!
    
    I was getting too carried with my reductions. However, IN over a
    clear surface is a no-op, though unlikely!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e73805d..907ece0 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -4071,7 +4071,6 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	struct sna_pixmap *priv;
 	RegionRec region;
 	uint32_t color;
-	bool unbounded;
 	int16_t dst_x, dst_y;
 	int dx, dy;
 	int n;
@@ -4120,17 +4119,14 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 	}
 
-	unbounded = false;
 	switch (op) {
-	case PictOpIn:
-		unbounded = true;
-		if (priv->clear && priv->clear_color == 0xff)
-			op = PictOpSrc;
-		break;
 	case PictOpAdd:
 		if (priv->clear && priv->clear_color == 0)
 			op = PictOpSrc;
 		break;
+	case PictOpIn:
+		if (priv->clear && priv->clear_color == 0)
+			return true;
 	case PictOpSrc:
 		break;
 	default:
@@ -4251,7 +4247,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	inplace.opacity = color >> 24;
 
 	tor_render(NULL, &tor, (void*)&inplace,
-		   dst->pCompositeClip, span, unbounded);
+		   dst->pCompositeClip, span, op == PictOpIn);
 
 	tor_fini(&tor);
 
commit fba49e1bb8e5b6b0e3ceace2dbddb5796ece954e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 15 10:31:51 2012 +0000

    sna/traps: Fix off-by-one for filling vertical segments in tor_inplace
    
    If the last solid portion was exactly 4-pixels wide, we would miss
    filling in the mask.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 8ecd18a..e73805d 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1437,7 +1437,7 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 			}
 
 			winding += right->dir;
-			if (0 == winding)
+			if (0 == winding && right->x.quo != right->next->x.quo)
 				break;
 
 			right = right->next;
@@ -1445,7 +1445,7 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 
 		if (left->x.quo < 0) {
 			lix = lfx = 0;
-		} else if (left->x.quo > width * FAST_SAMPLES_X) {
+		} else if (left->x.quo >= width * FAST_SAMPLES_X) {
 			lix = width;
 			lfx = 0;
 		} else
@@ -1453,7 +1453,7 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 
 		if (right->x.quo < 0) {
 			rix = rfx = 0;
-		} else if (right->x.quo > width * FAST_SAMPLES_X) {
+		} else if (right->x.quo >= width * FAST_SAMPLES_X) {
 			rix = width;
 			rfx = 0;
 		} else
@@ -1478,12 +1478,14 @@ inplace_row(struct active_list *active, uint8_t *row, int width)
 				else
 					memset(row+lix, 0xff, rix);
 #else
-				while (rix && lix & 3)
-					row[lix++] = 0xff, rix--;
-				while (rix > 4) {
+				while (rix >= 8) {
+					*(uint64_t *)(row+lix) = 0xffffffffffffffff;
+					lix += 8;
+					rix -= 8;
+				}
+				if (rix & 4) {
 					*(uint32_t *)(row+lix) = 0xffffffff;
 					lix += 4;
-					rix -= 4;
 				}
 				if (rix & 2) {
 					*(uint16_t *)(row+lix) = 0xffff;
@@ -1533,16 +1535,16 @@ inplace_subrow(struct active_list *active, int8_t *row,
 							*min = ix;
 
 						row[ix++] += FAST_SAMPLES_X - fx;
-						if (ix < width)
+						if (fx && ix < width)
 							row[ix] += fx;
 					}
 
 					xstart = edge->x.quo;
 					if (xstart < FAST_SAMPLES_X * width) {
 						FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
-						row[ix++] -= FAST_SAMPLES_X - fx;
-						if (ix < width)
-							row[ix] -= fx;
+						row[ix] -= FAST_SAMPLES_X - fx;
+						if (fx && ix + 1< width)
+							row[++ix] -= fx;
 
 						if (ix > *max)
 							*max = ix;
commit 27f8f121b3d7433c6c3470b048428ca088acd2c9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 23:43:20 2012 +0000

    sna/damage: Handle a reduced damage rather than assert
    
    As we may reduce a damage to empty along the migration paths and not
    detect that reduced damage till later, handle those scenarios rather
    asserting.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?42426
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index f52ecac..9c646d8 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -1005,12 +1005,16 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 	if (damage == NULL)
 		return NULL;
 
+	if (!RegionNotEmpty(&damage->region)) {
+		__sna_damage_destroy(damage);
+		return NULL;
+	}
+
 	assert(RegionNotEmpty(region));
 
 	if (!sna_damage_maybe_contains_box(damage, &region->extents))
 		return damage;
 
-	assert(RegionNotEmpty(&damage->region));
 
 	if (region_is_singular(region) &&
 	    box_contains(&region->extents, &damage->extents)) {
@@ -1028,15 +1032,12 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 	}
 
 	if (damage->mode != DAMAGE_SUBTRACT) {
-		if (damage->dirty)
+		if (damage->dirty) {
 			__sna_damage_reduce(damage);
-
-		if (pixman_region_equal(region, &damage->region)) {
-			__sna_damage_destroy(damage);
-			return NULL;
+			assert(RegionNotEmpty(&damage->region));
 		}
 
-		if (!pixman_region_not_empty(&damage->region)) {
+		if (pixman_region_equal(region, &damage->region)) {
 			__sna_damage_destroy(damage);
 			return NULL;
 		}
@@ -1091,23 +1092,23 @@ inline static struct sna_damage *__sna_damage_subtract_box(struct sna_damage *da
 	if (damage == NULL)
 		return NULL;
 
+	if (!RegionNotEmpty(&damage->region)) {
+		__sna_damage_destroy(damage);
+		return NULL;
+	}
+
 	if (!sna_damage_maybe_contains_box(damage, box))
 		return damage;
 
-	assert(RegionNotEmpty(&damage->region));
-
 	if (box_contains(box, &damage->extents)) {
 		__sna_damage_destroy(damage);
 		return NULL;
 	}
 
 	if (damage->mode != DAMAGE_SUBTRACT) {
-		if (damage->dirty)
+		if (damage->dirty) {
 			__sna_damage_reduce(damage);
-
-		if (!pixman_region_not_empty(&damage->region)) {
-			__sna_damage_destroy(damage);
-			return NULL;
+			assert(RegionNotEmpty(&damage->region));
 		}
 
 		if (region_is_singular(&damage->region)) {
@@ -1163,6 +1164,11 @@ static struct sna_damage *__sna_damage_subtract_boxes(struct sna_damage *damage,
 	if (damage == NULL)
 		return NULL;
 
+	if (!RegionNotEmpty(&damage->region)) {
+		__sna_damage_destroy(damage);
+		return NULL;
+	}
+
 	assert(n);
 
 	extents = box[0];
@@ -1191,12 +1197,9 @@ static struct sna_damage *__sna_damage_subtract_boxes(struct sna_damage *damage,
 		return __sna_damage_subtract_box(damage, &extents);
 
 	if (damage->mode != DAMAGE_SUBTRACT) {
-		if (damage->dirty)
+		if (damage->dirty) {
 			__sna_damage_reduce(damage);
-
-		if (!pixman_region_not_empty(&damage->region)) {
-			__sna_damage_destroy(damage);
-			return NULL;
+			assert(RegionNotEmpty(&damage->region));
 		}
 
 		damage->mode = DAMAGE_SUBTRACT;
commit abd104600252f0ebd1eb77eb5419693e1c17deda
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 23:08:31 2012 +0000

    sna: Treat unmapped but CPU-mappable bo as available for mapping
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 7e5ffac..98534d9 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -443,7 +443,7 @@ static inline bool kgem_bo_mapped(struct kgem_bo *bo)
 	DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling));
 
 	if (bo->map == NULL)
-		return false;
+		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
 
 	return IS_CPU_MAP(bo->map) == !bo->tiling;
 }
commit 2c492c0064e23457bffdf4f9ab1a7c9f1be93a87
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 23:05:58 2012 +0000

    sna: Disable tiling for single row pixmaps (unless required for hw limits)
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a1ed6ca..daca7af 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2330,6 +2330,12 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 	if (tiling < 0)
 		return tiling;
 
+	if (tiling && height == 1) {
+		DBG(("%s: disabling tiling [%d] for single row\n",
+		     __FUNCTION__,height));
+		tiling = I915_TILING_NONE;
+		goto done;
+	}
 	if (tiling == I915_TILING_Y && height <= 16) {
 		DBG(("%s: too short [%d] for TILING_Y\n",
 		     __FUNCTION__,height));
commit 6890592cd2b2d6f0d06c530f5e770fdc98577d4f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 21:30:13 2012 +0000

    sna/traps: Explicitly create an unattach pixmap for fallback
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42426
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 6ec4748..55c5f5a 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -419,6 +419,8 @@ static inline Bool pixmap_is_scanout(PixmapPtr pixmap)
 PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
 				   int width, int height, int depth,
 				   unsigned flags);
+PixmapPtr sna_pixmap_create_unattached(ScreenPtr screen,
+				       int width, int height, int depth);
 
 struct sna_pixmap *sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags);
 struct sna_pixmap *sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a5da2ca..63afaaa 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -623,6 +623,15 @@ sna_pixmap_create_shm(ScreenPtr screen,
 	return pixmap;
 }
 
+PixmapPtr
+sna_pixmap_create_unattached(ScreenPtr screen,
+			     int width, int height, int depth)
+{
+	return create_pixmap(to_sna_from_screen(screen),
+			     screen, width, height, depth,
+			     CREATE_PIXMAP_USAGE_SCRATCH);
+}
+
 static PixmapPtr
 sna_pixmap_create_scratch(ScreenPtr screen,
 			  int width, int height, int depth,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f0f48e8..8ecd18a 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2464,9 +2464,8 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 				pixman_image_unref(image);
 			}
 		} else {
-			scratch = screen->CreatePixmap(screen,
-						       width, height, depth,
-						       CREATE_PIXMAP_USAGE_SCRATCH);
+			scratch = sna_pixmap_create_unattached(screen,
+							       width, height, depth);
 			if (!scratch)
 				return;
 
commit 23b5640a0f8d8a944275e574f4c72f2380f7ed7a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 20:48:56 2012 +0000

    sna: Avoid using kgem_bo_reference() internally
    
    So that we can keep the assertion to track the refcnt elsewhere.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 761218f..a1ed6ca 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2275,8 +2275,10 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
 
 	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 	bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
-	if (bo)
-		return kgem_bo_reference(bo);
+	if (bo) {
+		bo->refcnt = 1;
+		return bo;
+	}
 
 	handle = gem_create(kgem->fd, size);
 	if (handle == 0)
@@ -2505,7 +2507,8 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-			return kgem_bo_reference(bo);
+			bo->refcnt = 1;
+			return bo;
 		}
 
 		goto create;
@@ -2556,7 +2559,8 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
 				assert(bo->reusable);
 				assert(bo->domain != DOMAIN_GPU && !kgem_busy(kgem, bo->handle));
-				return kgem_bo_reference(bo);
+				bo->refcnt = 1;
+				return bo;
 			}
 		} while (!list_is_empty(cache) && kgem_retire(kgem));
 	}
@@ -2609,7 +2613,8 @@ search_again:
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-			return kgem_bo_reference(bo);
+			bo->refcnt = 1;
+			return bo;
 		}
 	} else {
 		list_for_each_entry(bo, cache, list) {
@@ -2629,7 +2634,8 @@ search_again:
 			bo->delta = 0;
 			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-			return kgem_bo_reference(bo);
+			bo->refcnt = 1;
+			return bo;
 		}
 	}
 
@@ -2661,7 +2667,8 @@ search_again:
 					bo->delta = 0;
 					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-					return kgem_bo_reference(bo);
+					bo->refcnt = 1;
+					return bo;
 				}
 			}
 		}
@@ -2706,7 +2713,8 @@ search_again:
 				bo->delta = 0;
 				DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
 				     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
-				return kgem_bo_reference(bo);
+				bo->refcnt = 1;
+				return bo;
 			}
 		}
 	}
@@ -2763,7 +2771,8 @@ search_inactive:
 		assert((flags & CREATE_INACTIVE) == 0 || bo->domain != DOMAIN_GPU);
 		assert((flags & CREATE_INACTIVE) == 0 ||
 		       !kgem_busy(kgem, bo->handle));
-		return kgem_bo_reference(bo);
+		bo->refcnt = 1;
+		return bo;
 	}
 
 	if (flags & CREATE_INACTIVE && !list_is_empty(&kgem->requests)) {
commit 03731b4e37b0cbd3a1cb8c8ec08fddf0f670a918
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 20:46:59 2012 +0000

    sna: Ellide no-op image glyphs
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e957b1b..a5da2ca 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10163,7 +10163,8 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	      int _x, int _y, unsigned int _n,
 	      CharInfoPtr *_info,
 	      RegionRec *clip,
-	      uint32_t fg, uint32_t bg)
+	      uint32_t fg, uint32_t bg,
+	      bool transparent)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -10174,7 +10175,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	int16_t dx, dy;
 	uint32_t br00;
 
-	uint8_t rop = bg == -1 ? copy_ROP[gc->alu] : ROP_S;
+	uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
 
 	DBG(("%s (%d, %d) x %d, fg=%08x, bg=%08x alu=%02x\n",
 	     __FUNCTION__, _x, _y, _n, fg, bg, rop));
@@ -10206,7 +10207,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	extents = REGION_RECTS(clip);
 	last_extents = extents + REGION_NUM_RECTS(clip);
 
-	if (bg != -1) /* emulate miImageGlyphBlt */
+	if (!transparent) /* emulate miImageGlyphBlt */
 		sna_blt_fill_boxes(sna, GXcopy,
 				   bo, drawable->bitsPerPixel,
 				   bg, extents, REGION_NUM_RECTS(clip));
@@ -10225,7 +10226,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		b[0] |= BLT_DST_TILED;
 		b[1] >>= 2;
 	}
-	b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+	b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 	b[2] = extents->y1 << 16 | extents->x1;
 	b[3] = extents->y2 << 16 | extents->x2;
 	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10256,6 +10257,9 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			if (c->bits == (void *)1)
 				goto skip;
 
+			if (!transparent && c->bits == (void *)2)
+				goto skip;
+
 			len = (w8 * h + 7) >> 3 << 1;
 			DBG(("%s glyph: (%d, %d) x (%d[%d], %d), len=%d\n" ,__FUNCTION__,
 			     x,y, w, w8, h, len));
@@ -10268,6 +10272,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			if (x1 + w <= extents->x1 || y1 + h <= extents->y1)
 				goto skip;
 
+
 			if (!kgem_check_batch(&sna->kgem, 3+len)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -10279,7 +10284,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 					b[0] |= BLT_DST_TILED;
 					b[1] >>= 2;
 				}
-				b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+				b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 				b[2] = extents->y1 << 16 | extents->x1;
 				b[3] = extents->y2 << 16 | extents->x2;
 				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10299,7 +10304,9 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			b[0] = br00 | (1 + len);
 			b[1] = (uint16_t)y1 << 16 | (uint16_t)x1;
 			b[2] = (uint16_t)(y1+h) << 16 | (uint16_t)(x1+w);
-			 {
+			if (c->bits == (void *)2) {
+				memset(b+3, 0, len*4);
+			} else {
 				uint64_t *src = (uint64_t *)c->bits;
 				uint64_t *dst = (uint64_t *)(b + 3);
 				do  {
@@ -10384,6 +10391,7 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
 	int h = GLYPHHEIGHTPIXELS(in);
 	int stride = GLYPHWIDTHBYTESPADDED(in);
 	uint8_t *dst, *src;
+	int clear = 1;
 
 	out->metrics = in->metrics;
 
@@ -10405,11 +10413,17 @@ static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
 	do {
 		int i = w;
 		do {
+			clear &= *src == 0;
 			*dst++ = byte_reverse(*src++);
 		} while (--i);
 		src += stride;
 	} while (--h);
 
+	if (clear) {
+		free(out->bits);
+		out->bits = (void *)2;
+	}
+
 	return true;
 }
 
@@ -10511,7 +10525,7 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (!gc_is_solid(gc, &fg))
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1)) {
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1, true)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10600,7 +10614,7 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (!gc_is_solid(gc, &fg))
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1)) {
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1, true)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10698,7 +10712,8 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, gc->fgPixel, gc->bgPixel)) {
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region,
+			   gc->fgPixel, gc->bgPixel, false)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10788,7 +10803,8 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, gc->fgPixel, gc->bgPixel)) {
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region,
+			   gc->fgPixel, gc->bgPixel, false)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10830,14 +10846,15 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
 		       RegionPtr clip,
-		       uint32_t fg, uint32_t bg)
+		       uint32_t fg, uint32_t bg,
+		       bool transparent)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	const BoxRec *extents, *last_extents;
 	uint32_t *b;
 	int16_t dx, dy;
-	uint8_t rop = bg == -1 ? copy_ROP[gc->alu] : ROP_S;
+	uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
 
 	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
@@ -10855,7 +10872,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	extents = REGION_RECTS(clip);
 	last_extents = extents + REGION_NUM_RECTS(clip);
 
-	if (bg != -1) /* emulate miImageGlyphBlt */
+	if (!transparent) /* emulate miImageGlyphBlt */
 		sna_blt_fill_boxes(sna, GXcopy,
 				   bo, drawable->bitsPerPixel,
 				   bg, extents, REGION_NUM_RECTS(clip));
@@ -10874,7 +10891,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		b[0] |= BLT_DST_TILED;
 		b[1] >>= 2;
 	}
-	b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+	b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 	b[2] = extents->y1 << 16 | extents->x1;
 	b[3] = extents->y2 << 16 | extents->x2;
 	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10916,6 +10933,21 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			if (x1 + w <= extents->x1 || y1 + h <= extents->y1)
 				goto skip;
 
+			if (!transparent) {
+				int clear = 1, j = h;
+				uint8_t *g = glyph;
+
+				do {
+					i = w8;
+					do {
+						clear = *g == 0;
+					} while (clear && --i);
+					g += stride - w8;
+				} while (clear && --j);
+				if (clear)
+					goto skip;
+			}
+
 			if (!kgem_check_batch(&sna->kgem, 3+len)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -10927,7 +10959,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 					b[0] |= BLT_DST_TILED;
 					b[1] >>= 2;
 				}
-				b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+				b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 				b[2] = extents->y1 << 16 | extents->x1;
 				b[3] = extents->y2 << 16 | extents->x2;
 				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
@@ -11054,7 +11086,7 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
 				   bo, damage, &region,
-				   gc->fgPixel, gc->bgPixel))
+				   gc->fgPixel, gc->bgPixel, false))
 		goto out;
 
 fallback:
@@ -11129,7 +11161,7 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 
 	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
-				   bo, damage, &region, fg, -1))
+				   bo, damage, &region, fg, -1, true))
 		goto out;
 
 fallback:
commit d0e05b4294b2f150a41dd95d52c2e6ee8479283d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 20:19:30 2012 +0000

    sna: Don't mark cached upload buffers for inactivity expiration
    
    As these do not follow the normal rules of damage tracking, we have to
    be careful not to force migration.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42426
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ce13982..e957b1b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2073,7 +2073,8 @@ static inline struct sna_pixmap *
 sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 {
 	assert(priv->gpu_bo);
-	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
+	if (!priv->pinned && priv->gpu_bo->proxy == NULL &&
+	    (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return priv;
commit 96ead26c4dddab637875c74a9afca6fd6662352b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 19:56:42 2012 +0000

    sna: Add a couple of asserts for inactive_partial reference counting
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 72751f3..761218f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1257,6 +1257,7 @@ static void kgem_retire_partials(struct kgem *kgem)
 			kgem_bo_destroy(kgem, cached);
 		}
 
+		assert(bo->base.refcnt > 0);
 		if (bo->base.refcnt != 1)
 			continue;
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 52e5e9c..7e5ffac 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -270,6 +270,7 @@ static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo)
 
 static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
 {
+	assert(bo->refcnt);
 	bo->refcnt++;
 	return bo;
 }
commit 6a4ac18e37521d54dc36b215cbd27c56d20b2922
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 11:51:39 2012 +0000

    sna/traps: Use a more direct fallback path for the CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f05324f..f0f48e8 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2423,44 +2423,63 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
-		scratch = sna_pixmap_create_upload(screen,
-						   width, height, 8,
-						   KGEM_BUFFER_WRITE);
-		if (!scratch)
-			return;
+		if (is_gpu(dst->pDrawable) || picture_is_gpu(src)) {
+			scratch = sna_pixmap_create_upload(screen,
+							   width, height, 8,
+							   KGEM_BUFFER_WRITE);
+			if (!scratch)
+				return;
 
-		if (depth < 8) {
-			image = pixman_image_create_bits(format, width, height,
-							 NULL, 0);
+			if (depth < 8) {
+				image = pixman_image_create_bits(format, width, height,
+								 NULL, 0);
+			} else {
+				memset(scratch->devPrivate.ptr, 0, scratch->devKind*height);
+				image = pixman_image_create_bits(format, width, height,
+								 scratch->devPrivate.ptr,
+								 scratch->devKind);
+			}
+			if (image) {
+				for (; ntrap; ntrap--, traps++)
+					pixman_rasterize_trapezoid(image,
+								   (pixman_trapezoid_t *)traps,
+								   -bounds.x1, -bounds.y1);
+				if (depth < 8) {
+					pixman_image_t *a8;
+
+					a8 = pixman_image_create_bits(PIXMAN_a8, width, height,
+								      scratch->devPrivate.ptr,
+								      scratch->devKind);
+					if (a8) {
+						pixman_image_composite(PIXMAN_OP_SRC,
+								       image, NULL, a8,
+								       0, 0,
+								       0, 0,
+								       0, 0,
+								       width, height);
+						pixman_image_unref (a8);
+					}
+				}
+
+				pixman_image_unref(image);
+			}
 		} else {
+			scratch = screen->CreatePixmap(screen,
+						       width, height, depth,
+						       CREATE_PIXMAP_USAGE_SCRATCH);
+			if (!scratch)
+				return;
+
 			memset(scratch->devPrivate.ptr, 0, scratch->devKind*height);
 			image = pixman_image_create_bits(format, width, height,
 							 scratch->devPrivate.ptr,
 							 scratch->devKind);
-		}
-		if (image) {
-			for (; ntrap; ntrap--, traps++)
-				pixman_rasterize_trapezoid(image,
-							   (pixman_trapezoid_t *)traps,
-							   -bounds.x1, -bounds.y1);
-			if (depth < 8) {
-				pixman_image_t *a8;
-
-				a8 = pixman_image_create_bits(PIXMAN_a8, width, height,
-							      scratch->devPrivate.ptr,
-							      scratch->devKind);
-				if (a8) {
-					pixman_image_composite(PIXMAN_OP_SRC,
-							       image, NULL, a8,
-							       0, 0,
-							       0, 0,
-							       0, 0,
-							       width, height);
-					pixman_image_unref (a8);
-				}
+			if (image) {
+				for (; ntrap; ntrap--, traps++)
+					pixman_rasterize_trapezoid(image,
+								   (pixman_trapezoid_t *)traps,
+								   -bounds.x1, -bounds.y1);
 			}
-
-			pixman_image_unref(image);
 		}
 
 		mask = CreatePicture(0, &scratch->drawable,
commit 8580677ee6725144acf63f5cc2f8097376352d6e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 11:46:18 2012 +0000

    sna/traps: Rasterise using pixman inplace where appropriate
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 933f580..f05324f 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2302,6 +2302,79 @@ trapezoids_bounds(int n, const xTrapezoid *t, BoxPtr box)
 	box->y2 = pixman_fixed_integer_ceil(y2);
 }
 
+static bool
+is_mono(PicturePtr dst, PictFormatPtr mask)
+{
+	return mask ? mask->depth < 8 : dst->polyEdge==PolyEdgeSharp;
+}
+
+static bool
+trapezoids_inplace_fallback(CARD8 op,
+			    PicturePtr src, PicturePtr dst, PictFormatPtr mask,
+			    int ntrap, xTrapezoid *traps)
+{
+	pixman_image_t *image;
+	BoxRec box;
+	uint32_t color;
+	int dx, dy;
+
+	if (op != PictOpAdd)
+		return false;
+
+	if (is_mono(dst, mask)) {
+		if (dst->format != PICT_a1)
+			return false;
+	} else {
+		if (dst->format != PICT_a8)
+			return false;
+	}
+
+	if (!sna_picture_is_solid(src, &color) || (color >> 24) != 0xff) {
+		DBG(("%s: not an opaque solid source\n", __FUNCTION__));
+		return false;
+	}
+
+	box.x1 = dst->pDrawable->x;
+	box.y1 = dst->pDrawable->y;
+	box.x2 = dst->pDrawable->width;
+	box.y2 = dst->pDrawable->height;
+	if (pixman_region_contains_rectangle(dst->pCompositeClip,
+					     &box) != PIXMAN_REGION_IN) {
+		DBG(("%s: requires clipping, drawable (%d,%d), (%d, %d), clip (%d, %d), (%d, %d)\n", __FUNCTION__,
+		     box.x1, box.y1, box.x2, box.y2,
+		     dst->pCompositeClip->extents.x1,
+		     dst->pCompositeClip->extents.y1,
+		     dst->pCompositeClip->extents.x2,
+		     dst->pCompositeClip->extents.y2));
+		return false;
+	}
+
+	if (is_gpu(dst->pDrawable)) {
+		DBG(("%s: not performing inplace as dst is already on the GPU\n",
+		     __FUNCTION__));
+		return false;
+	}
+
+	DBG(("%s\n", __FUNCTION__));
+
+	image = NULL;
+	if (sna_drawable_move_to_cpu(dst->pDrawable, MOVE_READ | MOVE_WRITE))
+		image = image_from_pict(dst, FALSE, &dx, &dy);
+	if (image) {
+		dx += dst->pDrawable->x;
+		dy += dst->pDrawable->y;
+
+		for (; ntrap; ntrap--, traps++)
+			pixman_rasterize_trapezoid(image,
+						   (pixman_trapezoid_t *)traps,
+						   dx, dy);
+
+		pixman_image_unref(image);
+	}
+
+	return true;
+}
+
 static void
 trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		    PictFormatPtr maskFormat, INT16 xSrc, INT16 ySrc,
@@ -2998,12 +3071,6 @@ project_trapezoid_onto_grid(const xTrapezoid *in,
 	return xTrapezoidValid(out);
 }
 
-static bool
-is_mono(PicturePtr dst, PictFormatPtr mask)
-{
-	return mask ? mask->depth < 8 : dst->polyEdge==PolyEdgeSharp;
-}
-
 static span_func_t
 choose_span(PicturePtr dst,
 	    PictFormatPtr maskFormat,
@@ -4448,6 +4515,9 @@ fallback:
 				    xSrc, ySrc, ntrap, traps))
 		return;
 
+	if (trapezoids_inplace_fallback(op, src, dst, maskFormat, ntrap, traps))
+		return;
+
 	DBG(("%s: fallback mask=%08x, ntrap=%d\n", __FUNCTION__,
 	     maskFormat ? (unsigned)maskFormat->format : 0, ntrap));
 	trapezoids_fallback(op, src, dst, maskFormat,
commit 8a303f195b53a78dcbb2b6c84e7e691bedc86d25
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 11:26:03 2012 +0000

    sna: Remove existing damage before overwriting with a composite op
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index a610e7c..b098fcc 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -479,6 +479,11 @@ sna_composite(CARD8 op,
 	     get_drawable_dx(dst->pDrawable),
 	     get_drawable_dy(dst->pDrawable)));
 
+	if (op <= PictOpSrc) {
+		struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
+		sna_damage_subtract(&priv->cpu_damage, &region);
+	}
+
 	memset(&tmp, 0, sizeof(tmp));
 	if (!sna->render.composite(sna,
 				   op, src, mask, dst,
commit 63eeb98463a77adadc26d9de1e640b75eefebf1a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 14 10:30:47 2012 +0000

    sna/gen3: Look harder to see if we can indeed use the BLT for composite
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index d3ed2ef..c567d6b 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2744,15 +2744,13 @@ gen3_render_composite(struct sna *sna,
 					    width,  height,
 					    tmp);
 
-	memset(&tmp->u.gen3, 0, sizeof(tmp->u.gen3));
-
 	if (!gen3_composite_set_target(sna, tmp, dst)) {
 		DBG(("%s: unable to set render target\n",
 		     __FUNCTION__));
 		return FALSE;
 	}
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT  &&
+	if (mask == NULL && sna->kgem.mode != KGEM_RENDER &&
 	    sna_blt_composite(sna, op,
 			      src, dst,
 			      src_x, src_y,
commit d23ee0380b61e0dfd3ed56b8b4a15fd0b7956491
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 22:00:25 2012 +0000

    sna: Reuse the cached upload as a source GPU bo
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42426
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b0b55a4..ce13982 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2236,6 +2236,11 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	if (priv->cpu_damage == NULL)
 		goto done;
 
+	if (priv->gpu_bo->proxy) {
+		assert((flags & MOVE_WRITE) ==0);
+		goto done;
+	}
+
 	if (priv->mapped) {
 		assert(priv->stride);
 		pixmap->devPrivate.ptr = priv->ptr;
commit 0acec1685328b1dd9dffa804a27a122fc6e4225a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 21:29:57 2012 +0000

    sna: Defer the release of the upload buffer cache till retirement
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cee4513..72751f3 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1238,7 +1238,26 @@ static void kgem_retire_partials(struct kgem *kgem)
 		assert(next->base.list.prev == &bo->base.list);
 		assert(bo->base.io);
 
-		if (bo->base.refcnt != 1 || bo->base.rq)
+		if (bo->base.rq)
+			continue;
+
+		DBG(("%s: releasing upload cache for handle=%d? %d\n",
+		     __FUNCTION__, bo->base.handle, !list_is_empty(&bo->base.vma)));
+		while (!list_is_empty(&bo->base.vma)) {
+			struct kgem_bo *cached;
+
+			cached = list_first_entry(&bo->base.vma, struct kgem_bo, vma);
+			assert(cached->proxy == &bo->base);
+			list_del(&cached->vma);
+
+			assert(*(struct kgem_bo **)cached->map == cached);
+			*(struct kgem_bo **)cached->map = NULL;
+			cached->map = NULL;
+
+			kgem_bo_destroy(kgem, cached);
+		}
+
+		if (bo->base.refcnt != 1)
 			continue;
 
 		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
@@ -1256,6 +1275,8 @@ static void kgem_retire_partials(struct kgem *kgem)
 		bo->base.needs_flush = false;
 		bo->used = 0;
 
+		DBG(("%s: transferring partial handle=%d to inactive\n",
+		     __FUNCTION__, bo->base.handle));
 		list_move_tail(&bo->base.list, &kgem->inactive_partials);
 		bubble_sort_partial(&kgem->inactive_partials, bo);
 	}
@@ -1381,9 +1402,7 @@ static void kgem_commit(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
 	struct kgem_bo *bo, *next;
-	struct list release;
 
-	list_init(&release);
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
 		assert(next->request.prev == &bo->request);
 
@@ -1392,7 +1411,7 @@ static void kgem_commit(struct kgem *kgem)
 		     bo->dirty, bo->needs_flush, (unsigned)bo->exec->offset));
 
 		assert(!bo->purged);
-		assert(bo->proxy || bo->rq == rq);
+		assert(bo->rq == rq || (bo->proxy->rq == rq));
 
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
@@ -1411,20 +1430,8 @@ static void kgem_commit(struct kgem *kgem)
 			list_del(&bo->request);
 			bo->rq = NULL;
 			bo->exec = &_kgem_dummy_exec;
-			if (bo->map)
-				list_add_tail(&bo->request, &release);
 		}
 	}
-	while (!list_is_empty(&release)) {
-		bo = list_first_entry(&release, struct kgem_bo, request);
-		DBG(("%s: releasing upload cache, handle=%d\n",
-		     __FUNCTION__, bo->handle));
-		list_del(&bo->request);
-		assert(*(struct kgem_bo **)bo->map == bo);
-		*(struct kgem_bo **)bo->map = NULL;
-		bo->map = NULL;
-		kgem_bo_destroy(kgem, bo);
-	}
 
 	if (rq == &_kgem_static_request) {
 		struct drm_i915_gem_set_domain set_domain;
@@ -2869,11 +2876,12 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	     __FUNCTION__, bo->handle, bo->proxy != NULL));
 
 	if (bo->proxy) {
+		_list_del(&bo->vma);
+		_list_del(&bo->request);
 		if (bo->io && (bo->exec == NULL || bo->proxy->rq == NULL))
 			_kgem_bo_delete_partial(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
-		_list_del(&bo->request);
 		free(bo);
 		return;
 	}
@@ -3468,6 +3476,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 
+		if (bo->used && bo->base.rq == NULL && bo->base.refcnt == 1) {
+			bo->used = 0;
+			bubble_sort_partial(&kgem->active_partials, bo);
+		}
+
 		if (bo->used + size <= bytes(&bo->base)) {
 			DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
 			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
@@ -3895,6 +3908,8 @@ void kgem_proxy_bo_attach(struct kgem_bo *bo,
 {
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	assert(bo->map == NULL);
+	assert(bo->proxy);
+	list_add(&bo->vma, &bo->proxy->vma);
 	bo->map = ptr;
 	*ptr = kgem_bo_reference(bo);
 }
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c9016a1..b0b55a4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11822,6 +11822,7 @@ static void sna_accel_inactive(struct sna *sna)
 					inactive);
 		assert((priv->create & KGEM_CAN_CREATE_LARGE) == 0);
 		assert(priv->gpu_bo);
+		assert(!priv->gpu_bo->proxy);
 
 		/* XXX Rather than discarding the GPU buffer here, we
 		 * could mark it purgeable and allow the shrinker to
commit 06b28d541bdf2607edc2eb476919b28e747885d8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 20:01:53 2012 +0000

    sna: Destroy the cached upload buffer if before copying into it
    
    As we discard the buffer after the next batch, we will lose the contents
    of the pixmap. Instead discard the cache, and treat it as a normal
    shadow again.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42426
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d5420a3..c9016a1 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3402,7 +3402,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		dst_priv->clear = false;
 	}
 
-	assert(dst_priv->gpu_bo == NULL || dst_priv->gpu_bo->proxy == NULL);
+	if (dst_priv->gpu_bo && dst_priv->gpu_bo->proxy) {
+		kgem_bo_destroy(&sna->kgem, dst_priv->gpu_bo);
+		dst_priv->gpu_bo = NULL;
+	}
 
 	/* Try to maintain the data on the GPU */
 	if (dst_priv->gpu_bo == NULL &&
commit 015db20287ec90f81ddb7eb2e7ce23b90aacfb2b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 16:29:41 2012 +0000

    sna: Refactor source upload-to-gpu
    
    This was originally split between into functions for flexibility that
    nothing used, so merge the duplicated code into a common function.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index b5d314b..8be3e72 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -354,122 +354,88 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 	return priv->cpu_bo;
 }
 
-static Bool
+static struct kgem_bo *
 move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 {
 	struct sna_pixmap *priv;
 	int count, w, h;
+	bool migrate = false;
 
 	if (DBG_FORCE_UPLOAD > 0)
-		return FALSE;
+		return NULL;
+
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: not migrating unattached pixmap\n",
+		     __FUNCTION__));
+		return NULL;
+	}
+
+	if (priv->gpu_bo) {
+		if (priv->cpu_damage &&
+		    sna_damage_contains_box(priv->cpu_damage,
+					    box) != PIXMAN_REGION_OUT) {
+			if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ))
+				return NULL;
+		}
+
+		return priv->gpu_bo;
+	}
 
 	if (pixmap->usage_hint) {
 		DBG(("%s: not migrating pixmap due to usage_hint=%d\n",
 		     __FUNCTION__, pixmap->usage_hint));
-		return FALSE;
+		return NULL;
 	}
 
 	if (DBG_FORCE_UPLOAD < 0)
-		return TRUE;
+		migrate = true;
 
 	w = box->x2 - box->x1;
 	h = box->y2 - box->y1;
 	if (w == pixmap->drawable.width && h == pixmap->drawable.height) {
-		bool upload;
-
-		priv = sna_pixmap(pixmap);
-		if (!priv) {
-			DBG(("%s: not migrating unattached pixmap\n",
-			     __FUNCTION__));
-			return false;
-		}
-
-		upload = true;
+		migrate = true;
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
 				       I915_TILING_X,
 				       pixmap->drawable.width,
 				       pixmap->drawable.height,
 				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-			upload = priv->source_count++ > SOURCE_BIAS;
+			migrate = priv->source_count++ > SOURCE_BIAS;
 
 		DBG(("%s: migrating whole pixmap (%dx%d) for source (%d,%d),(%d,%d), count %d? %d\n",
 		     __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height,
 		     box->x1, box->y1, box->x2, box->y2, priv->source_count,
-		     upload));
-		return upload;
+		     migrate));
+	} else {
+		/* ignore tiny fractions */
+		if (64*w*h > pixmap->drawable.width * pixmap->drawable.height) {
+			count = priv->source_count++;
+			if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
+			    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
+					       I915_TILING_X,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height,
+					       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
+				count -= SOURCE_BIAS;
+
+			DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
+			     __FUNCTION__,
+			     box->x1, box->y1, box->x2, box->y2,
+			     count, w*h,
+			     pixmap->drawable.width * pixmap->drawable.height,
+			     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
+
+			migrate =  count*w*h > pixmap->drawable.width * pixmap->drawable.height;
+		}
 	}
 
-	/* ignore tiny fractions */
-	if (64*w*h < pixmap->drawable.width * pixmap->drawable.height)
-		return FALSE;
-
-	priv = sna_pixmap(pixmap);
-	if (!priv)
-		return FALSE;
-
-	count = priv->source_count++;
-	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
-	    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
-			       I915_TILING_X,
-			       pixmap->drawable.width,
-			       pixmap->drawable.height,
-			       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
-		count -= SOURCE_BIAS;
-
-	DBG(("%s: migrate box (%d, %d), (%d, %d)? source count=%d, fraction=%d/%d [%d]\n",
-	     __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2,
-	     count, w*h,
-	     pixmap->drawable.width * pixmap->drawable.height,
-	     pixmap->drawable.width * pixmap->drawable.height / (w*h)));
-
-	return count*w*h > pixmap->drawable.width * pixmap->drawable.height;
-}
-
-static Bool
-_texture_is_cpu(PixmapPtr pixmap, const BoxRec *box)
-{
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
-
-	if (priv == NULL)
-		return TRUE;
-
-	if (priv->gpu_bo == NULL)
-		return TRUE;
-
-	if (!priv->cpu_damage)
-		return FALSE;
-
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return TRUE;
-
-	if (sna_damage_contains_box__no_reduce(priv->cpu_damage, box))
-		return TRUE;
-
-	if (sna_damage_contains_box(priv->gpu_damage, box) != PIXMAN_REGION_OUT)
-		return FALSE;
-
-	return sna_damage_contains_box(priv->cpu_damage, box) != PIXMAN_REGION_OUT;
-}
+	if (migrate && !sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+		return NULL;
 
-#if DEBUG_RENDER
-static Bool
-texture_is_cpu(PixmapPtr pixmap, const BoxRec *box)
-{
-	Bool ret = _texture_is_cpu(pixmap, box);
-	ErrorF("%s(pixmap=%p, box=((%d, %d), (%d, %d)) = %d\n",
-	       __FUNCTION__, pixmap, box->x1, box->y1, box->x2, box->y2, ret);
-	return ret;
-}
-#else
-static Bool
-texture_is_cpu(PixmapPtr pixmap, const BoxRec *box)
-{
-	return _texture_is_cpu(pixmap, box);
+	return priv->gpu_bo;
 }
-#endif
 
 static struct kgem_bo *upload(struct sna *sna,
 			      struct sna_composite_channel *channel,
@@ -600,16 +566,13 @@ sna_render_pixmap_bo(struct sna *sna,
 	if (bo) {
 		bo = kgem_bo_reference(bo);
 	} else {
-		if (!texture_is_cpu(pixmap, &box) || move_to_gpu(pixmap, &box)) {
-			priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
-			if (priv)
-				bo = kgem_bo_reference(priv->gpu_bo);
-		}
+		bo = move_to_gpu(pixmap, &box);
 		if (bo == NULL) {
 			DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
 			     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 			bo = upload(sna, channel, pixmap, &box);
-		}
+		} else
+			bo = kgem_bo_reference(bo);
 	}
 
 	channel->bo = bo;
@@ -1146,18 +1109,8 @@ sna_render_picture_extract(struct sna *sna,
 		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
 			return 0;
 	} else {
-		bool upload = true;
-		if (!texture_is_cpu(pixmap, &box) ||
-		    move_to_gpu(pixmap, &box)) {
-			struct sna_pixmap *priv;
-
-			priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
-			if (priv) {
-				src_bo = priv->gpu_bo;
-				upload = false;
-			}
-		}
-		if (upload) {
+		src_bo = move_to_gpu(pixmap, &box);
+		if (src_bo == NULL) {
 			bo = kgem_upload_source_image(&sna->kgem,
 						      pixmap->devPrivate.ptr,
 						      &box,
commit 6dfe28f978b59ab2fc44cdd8ddf3743d6507588b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 15:47:27 2012 +0000

    sna/gen6: Remove the double application of the render offset
    
    Cut'n'paste error from an older generation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 8a8cdd8..fde0776 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1466,26 +1466,17 @@ gen6_emit_composite_primitive(struct sna *sna,
 			      const struct sna_composite_rectangles *r)
 {
 	gen6_emit_composite_vertex(sna, op,
-				   r->src.x + r->width,
-				   r->src.y + r->height,
-				   r->mask.x + r->width,
-				   r->mask.y + r->height,
-				   op->dst.x + r->dst.x + r->width,
-				   op->dst.y + r->dst.y + r->height);
+				   r->src.x + r->width,  r->src.y + r->height,
+				   r->mask.x + r->width, r->mask.y + r->height,
+				   r->dst.x + r->width, r->dst.y + r->height);
 	gen6_emit_composite_vertex(sna, op,
-				   r->src.x,
-				   r->src.y + r->height,
-				   r->mask.x,
-				   r->mask.y + r->height,
-				   op->dst.x + r->dst.x,
-				   op->dst.y + r->dst.y + r->height);
+				   r->src.x,  r->src.y + r->height,
+				   r->mask.x, r->mask.y + r->height,
+				   r->dst.x,  r->dst.y + r->height);
 	gen6_emit_composite_vertex(sna, op,
-				   r->src.x,
-				   r->src.y,
-				   r->mask.x,
-				   r->mask.y,
-				   op->dst.x + r->dst.x,
-				   op->dst.y + r->dst.y);
+				   r->src.x,  r->src.y,
+				   r->mask.x, r->mask.y,
+				   r->dst.x,  r->dst.y);
 }
 
 static void gen6_emit_vertex_buffer(struct sna *sna,
commit c751242e245eb168b92616bdcea3421be4e420bc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 13 12:04:04 2012 +0000

    sna: Only use the cpu bo for xfer between CPU and GPU if either is busy
    
    The synchronisation costs overwhelm any benefit from offloading the
    copy, unless we are currently streaming the updates anyway.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index dc330ca..d5420a3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -857,6 +857,14 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
 }
 
+static bool use_cpu_bo_for_xfer(struct sna_pixmap *priv)
+{
+	if (priv->cpu_bo == NULL)
+		return FALSE;
+
+	return kgem_bo_is_busy(priv->gpu_bo) || kgem_bo_is_busy(priv->cpu_bo);
+}
+
 bool
 _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 {
@@ -1028,16 +1036,12 @@ skip_inplace_map:
 
 		n = sna_damage_get_boxes(priv->gpu_damage, &box);
 		if (n) {
-			struct kgem_bo *dst_bo;
 			Bool ok = FALSE;
 
-			dst_bo = NULL;
-			if (sna->kgem.gen >= 30)
-				dst_bo = priv->cpu_bo;
-			if (dst_bo)
+			if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
-							    pixmap, dst_bo, 0, 0,
+							    pixmap, priv->cpu_bo, 0, 0,
 							    box, n);
 			if (!ok)
 				sna_read_boxes(sna,
@@ -1421,7 +1425,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = FALSE;
-			if (priv->cpu_bo && sna->kgem.gen >= 30)
+			if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -1496,7 +1500,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (n) {
 					Bool ok = FALSE;
 
-					if (priv->cpu_bo && sna->kgem.gen >= 30)
+					if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
@@ -1516,10 +1520,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 								      &r->extents)) {
 				BoxPtr box = REGION_RECTS(r);
 				int n = REGION_NUM_RECTS(r);
-				Bool ok;
+				Bool ok = FALSE;
 
-				ok = FALSE;
-				if (priv->cpu_bo && sna->kgem.gen >= 30)
+				if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
 					ok = sna->render.copy_boxes(sna, GXcopy,
 								    pixmap, priv->gpu_bo, 0, 0,
 								    pixmap, priv->cpu_bo, 0, 0,
@@ -1539,10 +1542,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 				if (sna_damage_intersect(priv->gpu_damage, r, &need)) {
 					BoxPtr box = REGION_RECTS(&need);
 					int n = REGION_NUM_RECTS(&need);
-					Bool ok;
+					Bool ok = FALSE;
 
-					ok = FALSE;
-					if (priv->cpu_bo && sna->kgem.gen >= 30)
+					if (sna->kgem.gen >= 30 && use_cpu_bo_for_xfer(priv))
 						ok = sna->render.copy_boxes(sna, GXcopy,
 									    pixmap, priv->gpu_bo, 0, 0,
 									    pixmap, priv->cpu_bo, 0, 0,
@@ -1732,10 +1734,9 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 		n = sna_damage_get_boxes(priv->cpu_damage, &box);
 		if (n) {
-			Bool ok;
+			Bool ok = FALSE;
 
-			ok = FALSE;
-			if (priv->cpu_bo)
+			if (use_cpu_bo_for_xfer(priv))
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->cpu_bo, 0, 0,
 							    pixmap, priv->gpu_bo, 0, 0,
@@ -1768,7 +1769,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
 		   sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
 		Bool ok = FALSE;
-		if (priv->cpu_bo)
+		if (use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -1791,7 +1792,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 		box = REGION_RECTS(&i);
 		ok = FALSE;
-		if (priv->cpu_bo)
+		if (use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
@@ -2250,7 +2251,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
 		ok = FALSE;
-		if (priv->cpu_bo)
+		if (use_cpu_bo_for_xfer(priv))
 			ok = sna->render.copy_boxes(sna, GXcopy,
 						    pixmap, priv->cpu_bo, 0, 0,
 						    pixmap, priv->gpu_bo, 0, 0,
commit c9668a772f15ab36d643a9b0616b309856146c89
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 23:28:51 2012 +0000

    sna: Reduce OVER with a clear pixmap to a BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 441b24e..6ec4748 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -460,6 +460,13 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 	return sna_pixmap_move_to_gpu(get_drawable_pixmap(drawable), flags) != NULL;
 }
 
+static inline bool
+sna_drawable_is_clear(DrawablePtr d)
+{
+	struct sna_pixmap *priv = sna_pixmap(get_drawable_pixmap(d));
+	return priv && priv->clear && priv->clear_color == 0;
+}
+
 static inline struct kgem_bo *sna_pixmap_get_bo(PixmapPtr pixmap)
 {
 	return sna_pixmap(pixmap)->gpu_bo;
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index eb8dbf8..e7a6182 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1556,6 +1556,7 @@ sna_blt_composite(struct sna *sna,
 	struct sna_pixmap *priv;
 	int16_t tx, ty;
 	uint32_t alpha_fixup;
+	bool was_clear;
 	Bool ret;
 
 #if DEBUG_NO_BLT || NO_BLT_COMPOSITE
@@ -1576,6 +1577,7 @@ sna_blt_composite(struct sna *sna,
 		return FALSE;
 	}
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
 	tmp->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	priv = sna_pixmap_move_to_gpu(tmp->dst.pixmap, MOVE_WRITE | MOVE_READ);
 	if (priv == NULL) {
@@ -1606,16 +1608,22 @@ sna_blt_composite(struct sna *sna,
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
-	if (op == PictOpClear)
+	if (op == PictOpClear) {
+clear:
+		if (was_clear)
+			return TRUE;
 		return prepare_blt_clear(sna, tmp);
+	}
 
 	if (is_solid(src)) {
 		if (op == PictOpOver && is_opaque_solid(src))
 			op = PictOpSrc;
 		if (op == PictOpAdd && is_white(src))
 			op = PictOpSrc;
+		if (was_clear && (op == PictOpAdd || op == PictOpOver))
+			op = PictOpSrc;
 		if (op == PictOpOutReverse && is_opaque_solid(src))
-			return prepare_blt_clear(sna, tmp);
+			goto clear;
 
 		if (op != PictOpSrc) {
 			DBG(("%s: unsuported op [%d] for blitting\n",
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index d2f16f2..933f580 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3033,13 +3033,6 @@ choose_span(PicturePtr dst,
 }
 
 static bool
-sna_drawable_is_clear(DrawablePtr d)
-{
-	struct sna_pixmap *priv = sna_pixmap(get_drawable_pixmap(d));
-	return priv && priv->clear && priv->clear_color == 0;
-}
-
-static bool
 mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 			       INT16 src_x, INT16 src_y,
 			       int ntrap, xTrapezoid *traps)
commit 2ea58256f5ce6e1c0f48d366ff7a1fbf300600de
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 23:05:17 2012 +0000

    sna: Reuse the same upload buffer for the duration of the batch
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index db579d0..cee4513 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1381,7 +1381,9 @@ static void kgem_commit(struct kgem *kgem)
 {
 	struct kgem_request *rq = kgem->next_request;
 	struct kgem_bo *bo, *next;
+	struct list release;
 
+	list_init(&release);
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
 		assert(next->request.prev == &bo->request);
 
@@ -1408,8 +1410,21 @@ static void kgem_commit(struct kgem *kgem)
 			/* proxies are not used for domain tracking */
 			list_del(&bo->request);
 			bo->rq = NULL;
+			bo->exec = &_kgem_dummy_exec;
+			if (bo->map)
+				list_add_tail(&bo->request, &release);
 		}
 	}
+	while (!list_is_empty(&release)) {
+		bo = list_first_entry(&release, struct kgem_bo, request);
+		DBG(("%s: releasing upload cache, handle=%d\n",
+		     __FUNCTION__, bo->handle));
+		list_del(&bo->request);
+		assert(*(struct kgem_bo **)bo->map == bo);
+		*(struct kgem_bo **)bo->map = NULL;
+		bo->map = NULL;
+		kgem_bo_destroy(kgem, bo);
+	}
 
 	if (rq == &_kgem_static_request) {
 		struct drm_i915_gem_set_domain set_domain;
@@ -2842,7 +2857,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
 	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
 
-	if (bo->delta + bo->size.bytes == io->used) {
+	if (ALIGN(bo->delta + bo->size.bytes, 64) == io->used) {
 		io->used = bo->delta;
 		bubble_sort_partial(&kgem->active_partials, io);
 	}
@@ -2850,9 +2865,11 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 
 void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 {
+	DBG(("%s: handle=%d, proxy? %d\n",
+	     __FUNCTION__, bo->handle, bo->proxy != NULL));
+
 	if (bo->proxy) {
-		assert(bo->map == NULL);
-		if (bo->io && bo->exec == NULL)
+		if (bo->io && (bo->exec == NULL || bo->proxy->rq == NULL))
 			_kgem_bo_delete_partial(kgem, bo);
 		kgem_bo_unref(kgem, bo->proxy);
 		kgem_bo_binding_free(kgem, bo);
@@ -3873,6 +3890,15 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 	return bo;
 }
 
+void kgem_proxy_bo_attach(struct kgem_bo *bo,
+			  struct kgem_bo **ptr)
+{
+	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
+	assert(bo->map == NULL);
+	bo->map = ptr;
+	*ptr = kgem_bo_reference(bo);
+}
+
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
 	struct kgem_partial_bo *bo;
@@ -3880,7 +3906,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 	int domain;
 
 	assert(_bo->io);
-	assert(_bo->exec == NULL);
+	assert(_bo->exec == &_kgem_dummy_exec);
+	assert(_bo->rq == NULL);
 	if (_bo->proxy)
 		_bo = _bo->proxy;
 	assert(_bo->exec == NULL);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index dff8bb2..52e5e9c 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -200,6 +200,7 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 					 const void *data,
 					 BoxPtr box,
 					 int stride, int bpp);
+void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1b0ac3c..dc330ca 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1064,8 +1064,10 @@ skip_inplace_map:
 	}
 
 done:
-	if (flags & MOVE_WRITE)
+	if (flags & MOVE_WRITE) {
 		priv->source_count = SOURCE_BIAS;
+		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+	}
 
 	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
@@ -1260,6 +1262,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 		if (priv->stride && priv->gpu_bo &&
 		    region_inplace(sna, pixmap, region, priv)) {
+			assert(priv->gpu_bo->proxy == NULL);
 			if (sync_will_stall(priv->gpu_bo) &&
 			    priv->gpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
@@ -1371,6 +1374,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	if (priv->gpu_bo == NULL)
 		goto done;
 
+	assert(priv->gpu_bo->proxy == NULL);
 	if (priv->clear) {
 		int n = REGION_NUM_RECTS(region);
 		BoxPtr box = REGION_RECTS(region);
@@ -1581,8 +1585,10 @@ done:
 		RegionTranslate(region, -dx, -dy);
 
 out:
-	if (flags & MOVE_WRITE)
+	if (flags & MOVE_WRITE) {
 		priv->source_count = SOURCE_BIAS;
+		assert(priv->gpu_bo == NULL || priv->gpu_bo->proxy == NULL);
+	}
 	if (priv->cpu_bo) {
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
@@ -1942,6 +1948,7 @@ done:
 
 	DBG(("%s: using GPU bo with damage? %d\n",
 	     __FUNCTION__, *damage != NULL));
+	assert(priv->gpu_bo->proxy == NULL);
 	return priv->gpu_bo;
 
 use_gpu_bo:
@@ -1951,6 +1958,7 @@ use_gpu_bo:
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
 	DBG(("%s: using whole GPU bo\n", __FUNCTION__));
+	assert(priv->gpu_bo->proxy == NULL);
 	return priv->gpu_bo;
 
 use_cpu_bo:
@@ -2083,6 +2091,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		DBG(("%s: GPU all-damaged\n", __FUNCTION__));
+		assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
 		return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
 	}
 
@@ -2285,6 +2294,7 @@ done:
 		}
 	}
 active:
+	assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
 	return sna_pixmap_mark_active(sna, priv);
 }
 
@@ -2532,6 +2542,8 @@ static bool upload_inplace(struct sna *sna,
 		return false;
 
 	if (priv->gpu_bo) {
+		assert(priv->gpu_bo->proxy == NULL);
+
 		if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))
 			return true;
 
@@ -3389,6 +3401,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		dst_priv->clear = false;
 	}
 
+	assert(dst_priv->gpu_bo == NULL || dst_priv->gpu_bo->proxy == NULL);
+
 	/* Try to maintain the data on the GPU */
 	if (dst_priv->gpu_bo == NULL &&
 	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 09a33c1..b5d314b 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -496,6 +496,14 @@ static struct kgem_bo *upload(struct sna *sna,
 		channel->offset[1] -= box->y1;
 		channel->scale[0] = 1.f/channel->width;
 		channel->scale[1] = 1.f/channel->height;
+
+		if (pixmap->usage_hint == 0 &&
+		    channel->width  == pixmap->drawable.width &&
+		    channel->height == pixmap->drawable.height) {
+			struct sna_pixmap *priv = sna_pixmap(pixmap);
+			if (priv)
+				kgem_proxy_bo_attach(bo, &priv->gpu_bo);
+		}
 	}
 
 	return bo;
@@ -526,7 +534,8 @@ sna_render_pixmap_bo(struct sna *sna,
 	priv = sna_pixmap(pixmap);
 	if (priv) {
 		if (priv->gpu_bo &&
-		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage)) {
+		    (DAMAGE_IS_ALL(priv->gpu_damage) || !priv->cpu_damage ||
+		     priv->gpu_bo->proxy)) {
 			channel->bo = kgem_bo_reference(priv->gpu_bo);
 			return 1;
 		}
@@ -1148,12 +1157,20 @@ sna_render_picture_extract(struct sna *sna,
 				upload = false;
 			}
 		}
-		if (upload)
+		if (upload) {
 			bo = kgem_upload_source_image(&sna->kgem,
 						      pixmap->devPrivate.ptr,
 						      &box,
 						      pixmap->devKind,
 						      pixmap->drawable.bitsPerPixel);
+			if (pixmap->usage_hint == 0 &&
+			    box.x2 - box.x1 == pixmap->drawable.width &&
+			    box.y2 - box.y1 == pixmap->drawable.height) {
+				struct sna_pixmap *priv = sna_pixmap(pixmap);
+				if (priv)
+					kgem_proxy_bo_attach(bo, &priv->gpu_bo);
+			}
+		}
 	}
 	if (src_bo) {
 		bo = kgem_create_2d(&sna->kgem, w, h,
commit 8b9abe2be1f54bd8e8593ed155cc4725ac97627a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 21:09:19 2012 +0000

    sna: Prefer to render very thin trapezoids inplace
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index e6015af..8d3d9e4 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -204,6 +204,7 @@ struct sna_render {
 				unsigned flags,
 				struct sna_composite_spans_op *tmp);
 #define COMPOSITE_SPANS_RECTILINEAR 0x1
+#define COMPOSITE_SPANS_INPLACE_HINT 0x2
 
 	Bool (*video)(struct sna *sna,
 		      struct sna_video *video,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index a523fed..88d0130 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -85,7 +85,13 @@ static inline Bool
 is_cpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
-	return !priv || priv->cpu_damage != NULL;
+	if (priv == NULL || priv->clear || DAMAGE_IS_ALL(priv->cpu_damage))
+		return true;
+
+	if (priv->gpu_damage || (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))
+		return false;
+
+	return true;
 }
 
 static inline Bool
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 23a65ef..d2f16f2 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3180,7 +3180,8 @@ mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 
 static bool
 trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
-			 PictFormatPtr maskFormat, INT16 src_x, INT16 src_y,
+			 PictFormatPtr maskFormat, unsigned int flags,
+			 INT16 src_x, INT16 src_y,
 			 int ntrap, xTrapezoid *traps)
 {
 	struct sna *sna;
@@ -3263,8 +3264,7 @@ trapezoid_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 					 extents.x1,  extents.y1,
 					 extents.x2 - extents.x1,
 					 extents.y2 - extents.y1,
-					 0,
-					 &tmp)) {
+					 flags, &tmp)) {
 		DBG(("%s: fallback -- composite spans render op not supported\n",
 		     __FUNCTION__));
 		return false;
@@ -3776,6 +3776,40 @@ mono_inplace_composite_boxes(struct sna *sna,
 }
 
 static bool
+trapezoid_spans_maybe_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
+			      PictFormatPtr maskFormat)
+{
+	if (NO_SCAN_CONVERTER)
+		return false;
+
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat))
+		return false;
+	if (dst->alphaMap)
+		return false;
+
+	if (is_mono(dst, maskFormat))
+		goto out;
+
+	if (!sna_picture_is_solid(src, NULL))
+		return false;
+
+	if (dst->format != PICT_a8)
+		return false;
+
+	switch (op) {
+	case PictOpIn:
+	case PictOpAdd:
+	case PictOpSrc:
+		break;
+	default:
+		return false;
+	}
+
+out:
+	return is_cpu(dst->pDrawable) ? true : dst->pDrawable->width <= TOR_INPLACE_SIZE;
+}
+
+static bool
 trapezoid_span_mono_inplace(CARD8 op,
 			    PicturePtr src,
 			    PicturePtr dst,
@@ -4304,6 +4338,7 @@ sna_composite_trapezoids(CARD8 op,
 {
 	struct sna *sna = to_sna_from_drawable(dst->pDrawable);
 	bool rectilinear, pixel_aligned;
+	unsigned flags;
 	int n;
 
 	DBG(("%s(op=%d, src=(%d, %d), mask=%08x, ntrap=%d)\n", __FUNCTION__,
@@ -4370,8 +4405,9 @@ sna_composite_trapezoids(CARD8 op,
 		}
 	}
 
-	DBG(("%s: rectlinear? %d, pixel-aligned? %d\n",
+	DBG(("%s: rectilinear? %d, pixel-aligned? %d\n",
 	     __FUNCTION__, rectilinear, pixel_aligned));
+	flags = 0;
 	if (rectilinear) {
 		if (pixel_aligned) {
 			if (composite_aligned_boxes(sna, op, src, dst,
@@ -4386,9 +4422,17 @@ sna_composite_trapezoids(CARD8 op,
 						      ntrap, traps))
 				return;
 		}
+		flags |= COMPOSITE_SPANS_RECTILINEAR;
+	}
+	if (trapezoid_spans_maybe_inplace(op, src, dst, maskFormat)) {
+		flags |= COMPOSITE_SPANS_INPLACE_HINT;
+		if (trapezoid_span_inplace(op, src, dst, maskFormat,
+					   xSrc, ySrc, ntrap, traps,
+					   false))
+			return;
 	}
 
-	if (trapezoid_span_converter(op, src, dst, maskFormat,
+	if (trapezoid_span_converter(op, src, dst, maskFormat, flags,
 				     xSrc, ySrc, ntrap, traps))
 		return;
 
commit 1e2d6ee31a21267ba27e4bebb883aaab08a12f30
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 20:34:42 2012 +0000

    sna: Always reset the source counter after rendering to with the CPU
    
    The goal is to avoid moving to the GPU too early for a frequently
    modified CPU buffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2e3df8a..1b0ac3c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1061,11 +1061,12 @@ skip_inplace_map:
 
 		if (priv->flush)
 			list_move(&priv->list, &sna->dirty_pixmaps);
-
-		priv->source_count = SOURCE_BIAS;
 	}
 
 done:
+	if (flags & MOVE_WRITE)
+		priv->source_count = SOURCE_BIAS;
+
 	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
@@ -1580,6 +1581,8 @@ done:
 		RegionTranslate(region, -dx, -dy);
 
 out:
+	if (flags & MOVE_WRITE)
+		priv->source_count = SOURCE_BIAS;
 	if (priv->cpu_bo) {
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 11549b4..09a33c1 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -393,10 +393,10 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 				       pixmap->drawable.bitsPerPixel) == I915_TILING_NONE)
 			upload = priv->source_count++ > SOURCE_BIAS;
 
-		DBG(("%s: migrating whole pixmap (%dx%d) for source (%d,%d),(%d,%d)? %d\n",
+		DBG(("%s: migrating whole pixmap (%dx%d) for source (%d,%d),(%d,%d), count %d? %d\n",
 		     __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height,
-		     box->x1, box->y1, box->x2, box->y2,
+		     box->x1, box->y1, box->x2, box->y2, priv->source_count,
 		     upload));
 		return upload;
 	}
commit 7bde1f55a1075eefab0f083833508265fc0e91e7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 20:25:50 2012 +0000

    sna: After move-to-gpu signals yes, force the GPU bo creation
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index d1e3500..11549b4 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -591,22 +591,16 @@ sna_render_pixmap_bo(struct sna *sna,
 	if (bo) {
 		bo = kgem_bo_reference(bo);
 	} else {
-		if (texture_is_cpu(pixmap, &box) && !move_to_gpu(pixmap, &box)) {
+		if (!texture_is_cpu(pixmap, &box) || move_to_gpu(pixmap, &box)) {
+			priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
+			if (priv)
+				bo = kgem_bo_reference(priv->gpu_bo);
+		}
+		if (bo == NULL) {
 			DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
 			     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 			bo = upload(sna, channel, pixmap, &box);
 		}
-
-		if (bo == NULL) {
-			priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
-			if (priv) {
-				bo = kgem_bo_reference(priv->gpu_bo);
-			} else {
-				DBG(("%s: failed to upload pixmap to gpu, uploading CPU box (%d, %d), (%d, %d) instead\n",
-				     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
-				bo = upload(sna, channel, pixmap, &box);
-			}
-		}
 	}
 
 	channel->bo = bo;
@@ -1148,7 +1142,7 @@ sna_render_picture_extract(struct sna *sna,
 		    move_to_gpu(pixmap, &box)) {
 			struct sna_pixmap *priv;
 
-			priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
+			priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
 			if (priv) {
 				src_bo = priv->gpu_bo;
 				upload = false;
commit 2ee7de1f1711d1058e5812c6aa0d94ecf9af7d8d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 20:09:05 2012 +0000

    sna/trapezoids: Reduce mono ADD/OVER against a clear background to a SRC
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f936b4b..23a65ef 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3849,6 +3849,7 @@ trapezoid_span_mono_inplace(CARD8 op,
 
 	if (sna_picture_is_solid(src, &inplace.fill.color) &&
 	    (op == PictOpSrc || op == PictOpClear ||
+	     (was_clear && (op == PictOpOver || op == PictOpAdd)) ||
 	     (op == PictOpOver && inplace.fill.color >> 24 == 0xff))) {
 		PixmapPtr pixmap;
 		int16_t dx, dy;
commit 99c239e380b9f5134afc75bf55bf3f69e5113e38
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 19:49:30 2012 +0000

    sna: Treat backing pixmaps no differently from their forward facing cousins
    
    Another fix for the large buffers overhaul.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3619101..2e3df8a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -395,13 +395,6 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 	if (sna->kgem.gen == 21)
 		return I915_TILING_X;
 
-	if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_BACKING_PIXMAP) {
-		/* Treat this like a window, and require accelerated
-		 * scrolling i.e. overlapped blits.
-		 */
-		return I915_TILING_X;
-	}
-
 	if (sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
@@ -765,6 +758,8 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
 		flags &= ~KGEM_CAN_CREATE_GPU;
+	if (usage == CREATE_PIXMAP_USAGE_BACKING_PIXMAP)
+		usage = 0;
 
 force_create:
 	pad = PixmapBytePad(width, depth);
commit 28792be1f31190171644d911d7b5573186fe2df2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 19:45:35 2012 +0000

    sna/display: Only flush pending output when installing a new scanout
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index 676125d..9401ca4 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -634,8 +634,10 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	     sna_mode->fb_pixmap,
 	     sna->front->drawable.serialNumber));
 
-	if (sna_mode->fb_pixmap != sna->front->drawable.serialNumber)
+	if (sna_mode->fb_pixmap != sna->front->drawable.serialNumber) {
+		kgem_submit(&sna->kgem);
 		sna_mode_remove_fb(sna);
+	}
 
 	if (sna_mode->fb_id == 0) {
 		struct kgem_bo *bo = sna_pixmap_pin(sna->front);
@@ -677,8 +679,6 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 	crtc->y = y;
 	crtc->rotation = rotation;
 
-	kgem_submit(&sna->kgem);
-
 	mode_to_kmode(&sna_crtc->kmode, mode);
 	if (!sna_crtc_apply(crtc)) {
 		crtc->x = saved_x;
commit 278c329b9e2aa9a94bd0d0ee73939814ac514e3d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 11:50:54 2012 +0000

    sna/trapezoids: Further improve the clipping criteria for inplace traps
    
    Not only must we defend against the span starting too far to the right,
    we must also defend against the span terminating too far to the left.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e76f918..f936b4b 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1520,37 +1520,40 @@ inplace_subrow(struct active_list *active, int8_t *row,
 
 		winding += edge->dir;
 		if (0 == winding) {
-			if (edge->x.quo >= FAST_SAMPLES_X * width) {
-				*max = width;
-			} else if (edge->next->x.quo != edge->x.quo) {
-				grid_scaled_x_t fx;
-				int ix;
-
-				xstart = edge->x.quo;
-				FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
-				row[ix++] -= FAST_SAMPLES_X - fx;
-				if (ix < width)
-					row[ix] -= fx;
-
-				if (ix > *max)
-					*max = ix;
-
-				xstart = INT_MIN;
+			if (edge->next->x.quo != edge->x.quo) {
+				if (edge->x.quo <= xstart) {
+					xstart = INT_MIN;
+				} else  {
+					grid_scaled_x_t fx;
+					int ix;
+
+					if (xstart < FAST_SAMPLES_X * width) {
+						FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
+						if (ix < *min)
+							*min = ix;
+
+						row[ix++] += FAST_SAMPLES_X - fx;
+						if (ix < width)
+							row[ix] += fx;
+					}
+
+					xstart = edge->x.quo;
+					if (xstart < FAST_SAMPLES_X * width) {
+						FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
+						row[ix++] -= FAST_SAMPLES_X - fx;
+						if (ix < width)
+							row[ix] -= fx;
+
+						if (ix > *max)
+							*max = ix;
+
+						xstart = INT_MIN;
+					} else
+						*max = width;
+				}
 			}
 		} else if (xstart < 0) {
 			xstart = MAX(edge->x.quo, 0);
-			if (xstart < FAST_SAMPLES_X * width) {
-				grid_scaled_x_t fx;
-				int ix;
-
-				FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
-				if (ix < *min)
-					*min = ix;
-
-				row[ix++] += FAST_SAMPLES_X - fx;
-				if (ix < width)
-					row[ix] += fx;
-			}
 		}
 
 		if (--edge->height_left) {
commit 7ae45584327a10b05f7aee99bcb71e9d990a3e9b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 12 10:49:46 2012 +0000

    sna/trapezoids: Add paranoia to ensure that the span starts within the clip
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47226
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 3e2802e..e76f918 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1538,16 +1538,19 @@ inplace_subrow(struct active_list *active, int8_t *row,
 				xstart = INT_MIN;
 			}
 		} else if (xstart < 0) {
-			grid_scaled_x_t fx;
-			int ix;
-
 			xstart = MAX(edge->x.quo, 0);
-			FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
-			if (ix < *min)
-				*min = ix;
+			if (xstart < FAST_SAMPLES_X * width) {
+				grid_scaled_x_t fx;
+				int ix;
 
-			row[ix++] += FAST_SAMPLES_X - fx;
-			row[ix] += fx;
+				FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
+				if (ix < *min)
+					*min = ix;
+
+				row[ix++] += FAST_SAMPLES_X - fx;
+				if (ix < width)
+					row[ix] += fx;
+			}
 		}
 
 		if (--edge->height_left) {
commit 8136bc5e113ae06c30def3c91b1615e5fab8af44
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 11 19:45:55 2012 +0000

    sna: Make the maximum BLT pitch assertions consistent
    
    The maximum permissibly BLT pitch value is 32767, so make the assertions
    match...
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47206
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index d70e30e..eb8dbf8 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -118,6 +118,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 {
 	struct kgem *kgem = &sna->kgem;
 
+	assert(kgem_bo_can_blt (kgem, bo));
 	assert(bo->tiling != I915_TILING_Y);
 	blt->bo[0] = bo;
 
@@ -127,7 +128,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 		blt->cmd |= BLT_DST_TILED;
 		blt->br13 >>= 2;
 	}
-	assert(blt->br13 < MAXSHORT);
+	assert(blt->br13 <= MAXSHORT);
 
 	if (alu == GXclear)
 		pixel = 0;
@@ -258,6 +259,9 @@ static Bool sna_blt_copy_init(struct sna *sna,
 {
 	struct kgem *kgem = &sna->kgem;
 
+	assert(kgem_bo_can_blt (kgem, src));
+	assert(kgem_bo_can_blt (kgem, dst));
+
 	blt->bo[0] = src;
 	blt->bo[1] = dst;
 
@@ -270,14 +274,14 @@ static Bool sna_blt_copy_init(struct sna *sna,
 		blt->cmd |= BLT_SRC_TILED;
 		blt->pitch[0] >>= 2;
 	}
-	assert(blt->pitch[0] < MAXSHORT);
+	assert(blt->pitch[0] <= MAXSHORT);
 
 	blt->pitch[1] = dst->pitch;
 	if (kgem->gen >= 40 && dst->tiling) {
 		blt->cmd |= BLT_DST_TILED;
 		blt->pitch[1] >>= 2;
 	}
-	assert(blt->pitch[1] < MAXSHORT);
+	assert(blt->pitch[1] <= MAXSHORT);
 
 	blt->overwrites = alu == GXcopy || alu == GXclear || alu == GXset;
 	blt->br13 = (copy_ROP[alu] << 16) | blt->pitch[1];
@@ -308,6 +312,9 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 {
 	struct kgem *kgem = &sna->kgem;
 
+	assert(kgem_bo_can_blt (kgem, src));
+	assert(kgem_bo_can_blt (kgem, dst));
+
 	blt->bo[0] = src;
 	blt->bo[1] = dst;
 
@@ -317,14 +324,14 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 		blt->cmd |= BLT_SRC_TILED;
 		blt->pitch[0] >>= 2;
 	}
-	assert(blt->pitch[0] < MAXSHORT);
+	assert(blt->pitch[0] <= MAXSHORT);
 
 	blt->pitch[1] = dst->pitch;
 	if (kgem->gen >= 40 && dst->tiling) {
 		blt->cmd |= BLT_DST_TILED;
 		blt->pitch[1] >>= 2;
 	}
-	assert(blt->pitch[1] < MAXSHORT);
+	assert(blt->pitch[1] <= MAXSHORT);
 
 	blt->overwrites = 1;
 	blt->br13 = (0xfc << 16) | blt->pitch[1];
@@ -1829,6 +1836,8 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	uint32_t br13, cmd, *b;
 	bool overwrites;
 
+	assert(kgem_bo_can_blt (kgem, bo));
+
 	DBG(("%s: box=((%d, %d), (%d, %d))\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
@@ -1841,7 +1850,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 		cmd |= BLT_DST_TILED;
 		br13 >>= 2;
 	}
-	assert(br13 < MAXSHORT);
+	assert(br13 <= MAXSHORT);
 
 	br13 |= fill_ROP[alu] << 16;
 	switch (bpp) {
@@ -1954,7 +1963,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 		cmd |= 1 << 11;
 		br13 >>= 2;
 	}
-	assert(br13 < MAXSHORT);
+	assert(br13 <= MAXSHORT);
 
 	br13 |= 1<<31 | fill_ROP[alu] << 16;
 	switch (bpp) {
@@ -2105,7 +2114,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		cmd |= BLT_DST_TILED;
 		br13 >>= 2;
 	}
-	assert(br13 < MAXSHORT);
+	assert(br13 <= MAXSHORT);
 
 	br13 |= copy_ROP[alu] << 16;
 	switch (bpp) {
commit 989615493608525fc252e4e94ac7259cba0741f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 22:44:16 2012 +0000

    sna: Feed fallback mono trapezoids through the mono rasteriser
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 8420730..d70e30e 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -631,6 +631,12 @@ sna_rgba_for_color(uint32_t color, int depth)
 	return color_convert(color, sna_format_for_depth(depth), PICT_a8r8g8b8);
 }
 
+uint32_t
+sna_rgba_to_color(uint32_t rgba, uint32_t format)
+{
+	return color_convert(rgba, PICT_a8r8g8b8, format);
+}
+
 static uint32_t
 get_pixel(PicturePtr picture)
 {
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 71a6fc5..e6015af 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -482,6 +482,7 @@ sna_render_get_gradient(struct sna *sna,
 			PictGradient *pattern);
 
 uint32_t sna_rgba_for_color(uint32_t color, int depth);
+uint32_t sna_rgba_to_color(uint32_t rgba, uint32_t format);
 Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color);
 
 void no_render_init(struct sna *sna);
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 4493331..3e2802e 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3681,6 +3681,262 @@ tor_blt_add_clipped_mono(struct sna *sna,
 		tor_blt_add_clipped(sna, op, clip, box, FAST_SAMPLES_XY);
 }
 
+struct mono_inplace_composite {
+	pixman_image_t *src, *dst;
+	int dx, dy;
+	int sx, sy;
+	int op;
+};
+struct mono_inplace_fill {
+	uint32_t *data, stride;
+	uint32_t color;
+	int bpp;
+};
+
+fastcall static void
+mono_inplace_fill_box(struct sna *sna,
+		      const struct sna_composite_op *op,
+		      const BoxRec *box)
+{
+	struct mono_inplace_fill *fill = op->priv;
+
+	DBG(("(%s: (%d, %d)x(%d, %d):%08x\n",
+	     __FUNCTION__,
+	     box->x1, box->y1,
+	     box->x2 - box->x1,
+	     box->y2 - box->y1,
+	     fill->color));
+	pixman_fill(fill->data, fill->stride, fill->bpp,
+		    box->x1, box->y1,
+		    box->x2 - box->x1,
+		    box->y2 - box->y1,
+		    fill->color);
+}
+
+static void
+mono_inplace_fill_boxes(struct sna *sna,
+			const struct sna_composite_op *op,
+			const BoxRec *box, int nbox)
+{
+	struct mono_inplace_fill *fill = op->priv;
+
+	do {
+		DBG(("(%s: (%d, %d)x(%d, %d):%08x\n",
+		     __FUNCTION__,
+		     box->x1, box->y1,
+		     box->x2 - box->x1,
+		     box->y2 - box->y1,
+		     fill->color));
+		pixman_fill(fill->data, fill->stride, fill->bpp,
+			    box->x1, box->y1,
+			    box->x2 - box->x1,
+			    box->y2 - box->y1,
+			    fill->color);
+		box++;
+	} while (--nbox);
+}
+
+fastcall static void
+mono_inplace_composite_box(struct sna *sna,
+			   const struct sna_composite_op *op,
+			   const BoxRec *box)
+{
+	struct mono_inplace_composite *c = op->priv;
+
+	pixman_image_composite(c->op, c->src, NULL, c->dst,
+			       box->x1 + c->sx, box->y1 + c->sy,
+			       0, 0,
+			       box->x1 + c->dx, box->y1 + c->dy,
+			       box->x2 - box->x1,
+			       box->y2 - box->y1);
+}
+
+static void
+mono_inplace_composite_boxes(struct sna *sna,
+			     const struct sna_composite_op *op,
+			     const BoxRec *box, int nbox)
+{
+	struct mono_inplace_composite *c = op->priv;
+
+	do {
+		pixman_image_composite(c->op, c->src, NULL, c->dst,
+				       box->x1 + c->sx, box->y1 + c->sy,
+				       0, 0,
+				       box->x1 + c->dx, box->y1 + c->dy,
+				       box->x2 - box->x1,
+				       box->y2 - box->y1);
+		box++;
+	} while (--nbox);
+}
+
+static bool
+trapezoid_span_mono_inplace(CARD8 op,
+			    PicturePtr src,
+			    PicturePtr dst,
+			    INT16 src_x, INT16 src_y,
+			    int ntrap, xTrapezoid *traps)
+{
+	struct mono mono;
+	union {
+		struct mono_inplace_fill fill;
+		struct mono_inplace_composite composite;
+	} inplace;
+	int was_clear;
+	int x, y, n;
+
+	trapezoids_bounds(ntrap, traps, &mono.clip.extents);
+	if (mono.clip.extents.y1 >= mono.clip.extents.y2 ||
+	    mono.clip.extents.x1 >= mono.clip.extents.x2)
+		return true;
+
+	DBG(("%s: extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     mono.clip.extents.x1, mono.clip.extents.y1,
+	     mono.clip.extents.x2, mono.clip.extents.y2));
+
+	if (!sna_compute_composite_region(&mono.clip,
+					  src, NULL, dst,
+					  src_x, src_y,
+					  0, 0,
+					  mono.clip.extents.x1, mono.clip.extents.y1,
+					  mono.clip.extents.x2 - mono.clip.extents.x1,
+					  mono.clip.extents.y2 - mono.clip.extents.y1)) {
+		DBG(("%s: trapezoids do not intersect drawable clips\n",
+		     __FUNCTION__)) ;
+		return true;
+	}
+
+	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     mono.clip.extents.x1, mono.clip.extents.y1,
+	     mono.clip.extents.x2, mono.clip.extents.y2));
+
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &mono.clip,
+					     MOVE_WRITE | MOVE_READ))
+		return true;
+
+	mono.sna = to_sna_from_drawable(dst->pDrawable);
+	if (!mono_init(&mono, 2*ntrap))
+		return false;
+
+	mono.op.damage = NULL;
+
+	x = dst->pDrawable->x;
+	y = dst->pDrawable->y;
+
+	for (n = 0; n < ntrap; n++) {
+		if (!xTrapezoidValid(&traps[n]))
+			continue;
+
+		if (pixman_fixed_to_int(traps[n].top) + y >= mono.clip.extents.y2 ||
+		    pixman_fixed_to_int(traps[n].bottom) + y < mono.clip.extents.y1)
+			continue;
+
+		mono_add_line(&mono, x, y,
+			      traps[n].top, traps[n].bottom,
+			      &traps[n].left.p1, &traps[n].left.p2, 1);
+		mono_add_line(&mono, x, y,
+			      traps[n].top, traps[n].bottom,
+			      &traps[n].right.p1, &traps[n].right.p2, -1);
+	}
+
+	if (sna_picture_is_solid(src, &inplace.fill.color) &&
+	    (op == PictOpSrc || op == PictOpClear ||
+	     (op == PictOpOver && inplace.fill.color >> 24 == 0xff))) {
+		PixmapPtr pixmap;
+		int16_t dx, dy;
+		uint8_t *ptr;
+
+unbounded_pass:
+		pixmap = get_drawable_pixmap(dst->pDrawable);
+		get_drawable_deltas(dst->pDrawable, pixmap, &dx, &dy);
+
+		ptr = pixmap->devPrivate.ptr;
+		ptr += dy * pixmap->devKind + dx * pixmap->drawable.bitsPerPixel / 8;
+		inplace.fill.data = (uint32_t *)ptr;
+		inplace.fill.stride = pixmap->devKind / sizeof(uint32_t);
+		inplace.fill.bpp = pixmap->drawable.bitsPerPixel;
+
+		if (op == PictOpClear)
+			inplace.fill.color = 0;
+		else if (dst->format != PICT_a8r8g8b8)
+			inplace.fill.color = sna_rgba_to_color(inplace.fill.color, dst->format);
+
+		DBG(("%s: fill %x\n", __FUNCTION__, inplace.fill.color));
+
+		mono.op.priv = &inplace.fill;
+		mono.op.box = mono_inplace_fill_box;
+		mono.op.boxes = mono_inplace_fill_boxes;
+
+		op = 0;
+	} else {
+		inplace.composite.dst = image_from_pict(dst, FALSE,
+							&inplace.composite.dx,
+							&inplace.composite.dy);
+		inplace.composite.src = image_from_pict(src, FALSE,
+							&inplace.composite.sx,
+							&inplace.composite.sy);
+		inplace.composite.sx +=
+			src_x - pixman_fixed_to_int(traps[0].left.p1.x),
+		inplace.composite.sy +=
+			src_y - pixman_fixed_to_int(traps[0].left.p1.y),
+		inplace.composite.op = op;
+
+		mono.op.priv = &inplace.composite;
+		mono.op.box = mono_inplace_composite_box;
+		mono.op.boxes = mono_inplace_composite_boxes;
+	}
+	mono_render(&mono);
+	mono_fini(&mono);
+
+	if (op) {
+		free_pixman_pict(src, inplace.composite.src);
+		free_pixman_pict(dst, inplace.composite.dst);
+
+		if (!was_clear && !operator_is_bounded(op)) {
+			xPointFixed p1, p2;
+
+			DBG(("%s: unbounded fixup\n", __FUNCTION__));
+
+			if (!mono_init(&mono, 2+2*ntrap))
+				return false;
+
+			p1.y = mono.clip.extents.y1 * pixman_fixed_1;
+			p2.y = mono.clip.extents.y2 * pixman_fixed_1;
+
+			p1.x = mono.clip.extents.x1 * pixman_fixed_1;
+			p2.x = mono.clip.extents.x1 * pixman_fixed_1;
+			mono_add_line(&mono, 0, 0, p1.y, p2.y, &p1, &p2, -1);
+
+			p1.x = mono.clip.extents.x2 * pixman_fixed_1;
+			p2.x = mono.clip.extents.x2 * pixman_fixed_1;
+			mono_add_line(&mono, 0, 0, p1.y, p2.y, &p1, &p2, 1);
+
+			for (n = 0; n < ntrap; n++) {
+				if (!xTrapezoidValid(&traps[n]))
+					continue;
+
+				if (pixman_fixed_to_int(traps[n].top) + x >= mono.clip.extents.y2 ||
+				    pixman_fixed_to_int(traps[n].bottom) + y < mono.clip.extents.y1)
+					continue;
+
+				mono_add_line(&mono, x, y,
+					      traps[n].top, traps[n].bottom,
+					      &traps[n].left.p1, &traps[n].left.p2, 1);
+				mono_add_line(&mono, x, y,
+					      traps[n].top, traps[n].bottom,
+					      &traps[n].right.p1, &traps[n].right.p2, -1);
+			}
+
+			op = PictOpClear;
+			goto unbounded_pass;
+		}
+	}
+
+	return true;
+}
+
 static bool
 trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		       PictFormatPtr maskFormat, INT16 src_x, INT16 src_y,
@@ -3713,7 +3969,24 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 	}
 
-	if (dst->format != PICT_a8 || !sna_picture_is_solid(src, &color)) {
+	if (!fallback && is_gpu(dst->pDrawable)) {
+		DBG(("%s: fallback -- can not perform operation in place, destination busy\n",
+		     __FUNCTION__));
+
+		return false;
+	}
+
+	if (is_mono(dst, maskFormat))
+		return trapezoid_span_mono_inplace(op, src, dst,
+						   src_x, src_y, ntrap, traps);
+
+	if (!sna_picture_is_solid(src, &color)) {
+		DBG(("%s: fallback -- can not perform operation in place, requires solid source\n",
+		     __FUNCTION__));
+		return false;
+	}
+
+	if (dst->format != PICT_a8) {
 		DBG(("%s: fallback -- can not perform operation in place, format=%x\n",
 		     __FUNCTION__, dst->format));
 		return false;
@@ -3744,8 +4017,6 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		     __FUNCTION__, op));
 		return false;
 	}
-	if (!fallback && is_gpu(dst->pDrawable))
-		return false;
 
 	DBG(("%s: format=%x, op=%d, color=%x\n",
 	     __FUNCTION__, dst->format, op, color));
commit 552e4fbd2c25eb5ab0ae77e11f5f8ba2fdb29daa
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 20:02:44 2012 +0000

    sna/traps: Add a fast path for narrow masks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5773d66..db579d0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3785,6 +3785,12 @@ done:
 	return kgem_create_proxy(&bo->base, offset, size);
 }
 
+bool kgem_buffer_is_inplace(struct kgem_bo *_bo)
+{
+	struct kgem_partial_bo *bo = (struct kgem_partial_bo *)_bo->proxy;
+	return bo->write & KGEM_BUFFER_WRITE_INPLACE;
+}
+
 struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 				      int width, int height, int bpp,
 				      uint32_t flags,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 6c31f33..dff8bb2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -503,6 +503,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 				      int width, int height, int bpp,
 				      uint32_t flags,
 				      void **ret);
+bool kgem_buffer_is_inplace(struct kgem_bo *bo);
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
 
 void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3429438..3619101 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2007,17 +2007,10 @@ sna_pixmap_create_upload(ScreenPtr screen,
 		pixmap = sna->freed_pixmap;
 		sna->freed_pixmap = NULL;
 
-		pixmap->usage_hint = CREATE_PIXMAP_USAGE_SCRATCH;
 		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
 		pixmap->refcnt = 1;
-
-		DBG(("%s: serial=%ld, usage=%d\n",
-		     __FUNCTION__,
-		     pixmap->drawable.serialNumber,
-		     pixmap->usage_hint));
 	} else {
-		pixmap = create_pixmap(sna, screen, 0, 0, depth,
-				       CREATE_PIXMAP_USAGE_SCRATCH);
+		pixmap = create_pixmap(sna, screen, 0, 0, depth, 0);
 		if (!pixmap)
 			return NullPixmap;
 
@@ -2035,8 +2028,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 
 	priv->gpu_bo = kgem_create_buffer_2d(&sna->kgem,
 					     width, height, bpp,
-					     flags,
-					     &ptr);
+					     flags, &ptr);
 	if (!priv->gpu_bo) {
 		free(priv);
 		fbDestroyPixmap(pixmap);
@@ -2058,6 +2050,15 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	pixmap->devKind = priv->gpu_bo->pitch;
 	pixmap->devPrivate.ptr = ptr;
 
+	pixmap->usage_hint = 0;
+	if (!kgem_buffer_is_inplace(priv->gpu_bo))
+		pixmap->usage_hint = 1;
+
+	DBG(("%s: serial=%ld, usage=%d\n",
+	     __FUNCTION__,
+	     pixmap->drawable.serialNumber,
+	     pixmap->usage_hint));
+
 	return pixmap;
 }
 
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 8c6cf34..4493331 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1409,6 +1409,342 @@ tor_render(struct sna *sna,
 	}
 }
 
+static void
+inplace_row(struct active_list *active, uint8_t *row, int width)
+{
+	struct edge *left = active->head.next;
+
+	assert(active->is_vertical);
+
+	while (&active->tail != left) {
+		struct edge *right;
+		int winding = left->dir;
+		grid_scaled_x_t lfx, rfx;
+		int lix, rix;
+
+		left->height_left -= FAST_SAMPLES_Y;
+		if (!left->height_left) {
+			left->prev->next = left->next;
+			left->next->prev = left->prev;
+		}
+
+		right = left->next;
+		do {
+			right->height_left -= FAST_SAMPLES_Y;
+			if (!right->height_left) {
+				right->prev->next = right->next;
+				right->next->prev = right->prev;
+			}
+
+			winding += right->dir;
+			if (0 == winding)
+				break;
+
+			right = right->next;
+		} while (1);
+
+		if (left->x.quo < 0) {
+			lix = lfx = 0;
+		} else if (left->x.quo > width * FAST_SAMPLES_X) {
+			lix = width;
+			lfx = 0;
+		} else
+			FAST_SAMPLES_X_TO_INT_FRAC(left->x.quo, lix, lfx);
+
+		if (right->x.quo < 0) {
+			rix = rfx = 0;
+		} else if (right->x.quo > width * FAST_SAMPLES_X) {
+			rix = width;
+			rfx = 0;
+		} else
+			FAST_SAMPLES_X_TO_INT_FRAC(right->x.quo, rix, rfx);
+		if (lix == rix) {
+			if (rfx != lfx)
+				row[lix] += (rfx-lfx) * 256 / FAST_SAMPLES_X;
+		} else {
+			if (lfx == 0)
+				row[lix] = 0xff;
+			else
+				row[lix] += 256 - lfx * 256 / FAST_SAMPLES_X;
+
+			if (rfx)
+				row[rix] += rfx * 256 / FAST_SAMPLES_X;
+
+			if (rix > ++lix) {
+				rix -= lix;
+#if 0
+				if (rix == 1)
+					row[lix] = 0xff;
+				else
+					memset(row+lix, 0xff, rix);
+#else
+				while (rix && lix & 3)
+					row[lix++] = 0xff, rix--;
+				while (rix > 4) {
+					*(uint32_t *)(row+lix) = 0xffffffff;
+					lix += 4;
+					rix -= 4;
+				}
+				if (rix & 2) {
+					*(uint16_t *)(row+lix) = 0xffff;
+					lix += 2;
+				}
+				if (rix & 1)
+					row[lix] = 0xff;
+#endif
+			}
+		}
+
+		left = right->next;
+	}
+}
+
+static inline uint8_t clip255(int x)
+{
+	if (x > 255)
+		return 255;
+
+	return x;
+}
+
+inline static void
+inplace_subrow(struct active_list *active, int8_t *row,
+	       int width, int *min, int *max)
+{
+	struct edge *edge = active->head.next;
+	grid_scaled_x_t prev_x = INT_MIN;
+	int winding = 0, xstart = INT_MIN;
+
+	while (&active->tail != edge) {
+		struct edge *next = edge->next;
+
+		winding += edge->dir;
+		if (0 == winding) {
+			if (edge->x.quo >= FAST_SAMPLES_X * width) {
+				*max = width;
+			} else if (edge->next->x.quo != edge->x.quo) {
+				grid_scaled_x_t fx;
+				int ix;
+
+				xstart = edge->x.quo;
+				FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
+				row[ix++] -= FAST_SAMPLES_X - fx;
+				if (ix < width)
+					row[ix] -= fx;
+
+				if (ix > *max)
+					*max = ix;
+
+				xstart = INT_MIN;
+			}
+		} else if (xstart < 0) {
+			grid_scaled_x_t fx;
+			int ix;
+
+			xstart = MAX(edge->x.quo, 0);
+			FAST_SAMPLES_X_TO_INT_FRAC(xstart, ix, fx);
+			if (ix < *min)
+				*min = ix;
+
+			row[ix++] += FAST_SAMPLES_X - fx;
+			row[ix] += fx;
+		}
+
+		if (--edge->height_left) {
+			if (!edge->vertical) {
+				edge->x.quo += edge->dxdy.quo;
+				edge->x.rem += edge->dxdy.rem;
+				if (edge->x.rem >= 0) {
+					++edge->x.quo;
+					edge->x.rem -= edge->dy;
+				}
+			}
+
+			if (edge->x.quo < prev_x) {
+				struct edge *pos = edge->prev;
+				pos->next = next;
+				next->prev = pos;
+				do {
+					pos = pos->prev;
+				} while (edge->x.quo < pos->x.quo);
+				pos->next->prev = edge;
+				edge->next = pos->next;
+				edge->prev = pos;
+				pos->next = edge;
+			} else
+				prev_x = edge->x.quo;
+		} else {
+			edge->prev->next = next;
+			next->prev = edge->prev;
+		}
+
+		edge = next;
+	}
+}
+
+inline static void
+inplace_end_subrows(struct active_list *active, uint8_t *row,
+		    int8_t *buf, int width)
+{
+	int cover = 0;
+
+	while (width > 4) {
+		uint32_t dw;
+		int v;
+
+		dw = *(uint32_t *)buf;
+		buf += 4;
+
+		if (dw == 0){
+			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+			v -= v >> 8;
+			v |= v << 8;
+			dw = v | v << 16;
+		} else if (dw) {
+			cover += (int8_t)(dw & 0xff);
+			assert(cover >= 0);
+			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+			v -= v >> 8;
+			dw >>= 8;
+			dw |= v << 24;
+
+			cover += (int8_t)(dw & 0xff);
+			assert(cover >= 0);
+			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+			v -= v >> 8;
+			dw >>= 8;
+			dw |= v << 24;
+
+			cover += (int8_t)(dw & 0xff);
+			assert(cover >= 0);
+			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+			v -= v >> 8;
+			dw >>= 8;
+			dw |= v << 24;
+
+			cover += (int8_t)(dw & 0xff);
+			assert(cover >= 0);
+			v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+			v -= v >> 8;
+			dw >>= 8;
+			dw |= v << 24;
+		}
+
+		*(uint32_t *)row = dw;
+		row += 4;
+
+		width -= 4;
+	}
+
+	while (width--) {
+		int v;
+
+		cover += *buf++;
+		assert(cover >= 0);
+
+		v = cover * 256 / (FAST_SAMPLES_X * FAST_SAMPLES_Y);
+		v -= v >> 8;
+		*row++ = v;
+	}
+}
+
+#define TOR_INPLACE_SIZE 128
+static void
+tor_inplace(struct tor *converter, PixmapPtr scratch, int mono, uint8_t *buf)
+{
+	int i, j, h = converter->ymax;
+	struct polygon *polygon = converter->polygon;
+	struct active_list *active = converter->active;
+	struct edge *buckets[FAST_SAMPLES_Y] = { 0 };
+	uint8_t *row = scratch->devPrivate.ptr;
+	int stride = scratch->devKind;
+	int width = scratch->drawable.width;
+
+	__DBG(("%s: mono=%d, buf=%d\n", __FUNCTION__, mono, buf));
+	assert(!mono);
+
+	/* Render each pixel row. */
+	for (i = 0; i < h; i = j) {
+		int do_full_step = 0;
+		void *ptr = buf ?: row;
+
+		j = i + 1;
+
+		/* Determine if we can ignore this row or use the full pixel
+		 * stepper. */
+		if (!polygon->y_buckets[i]) {
+			if (active->head.next == &active->tail) {
+				active->min_height = INT_MAX;
+				active->is_vertical = 1;
+				for (; j < h && !polygon->y_buckets[j]; j++)
+					;
+				__DBG(("%s: no new edges and no exisiting edges, skipping, %d -> %d\n",
+				       __FUNCTION__, i, j));
+
+				memset(row, 0, stride*(j-i));
+				row += stride*(j-i);
+				continue;
+			}
+
+			do_full_step = can_full_step(active);
+		}
+
+		__DBG(("%s: y=%d [%d], do_full_step=%d, new edges=%d, min_height=%d, vertical=%d\n",
+		       __FUNCTION__,
+		       i, i+ymin, do_full_step,
+		       polygon->y_buckets[i] != NULL,
+		       active->min_height,
+		       active->is_vertical));
+		if (do_full_step) {
+			memset(ptr, 0, width);
+			inplace_row(active, ptr, width);
+			if (row != ptr)
+				memcpy(row, ptr, width);
+
+			if (active->is_vertical) {
+				while (j < h &&
+				       polygon->y_buckets[j] == NULL &&
+				       active->min_height >= 2*FAST_SAMPLES_Y)
+				{
+					active->min_height -= FAST_SAMPLES_Y;
+					row += stride;
+					memcpy(row, ptr, width);
+					j++;
+				}
+				if (j != i + 1)
+					step_edges(active, j - (i + 1));
+
+				__DBG(("%s: vertical edges, full step (%d, %d)\n",
+				       __FUNCTION__,  i, j));
+			}
+		} else {
+			grid_scaled_y_t suby;
+			int min = width, max = 0;
+
+			fill_buckets(active, polygon->y_buckets[i], buckets);
+
+			/* Subsample this row. */
+			memset(ptr, 0, width);
+			for (suby = 0; suby < FAST_SAMPLES_Y; suby++) {
+				if (buckets[suby]) {
+					merge_edges(active, buckets[suby]);
+					buckets[suby] = NULL;
+				}
+
+				inplace_subrow(active, ptr, width, &min, &max);
+			}
+			memset(row, 0, min);
+			if (max > min)
+				inplace_end_subrows(active, row+min, (int8_t*)ptr+min, max-min);
+			if (max < width)
+				memset(row+max, 0, width-max);
+		}
+
+		active->min_height -= FAST_SAMPLES_Y;
+		row += stride;
+	}
+}
+
 struct mono_edge {
 	struct mono_edge *next, *prev;
 
@@ -1936,7 +2272,7 @@ trapezoids_bounds(int n, const xTrapezoid *t, BoxPtr box)
 		if (((x2 - t->right.p1.x) | (x2 - t->right.p2.x)) < 0) {
 			if (pixman_fixed_floor(t->right.p1.x) == pixman_fixed_floor(t->right.p2.x)) {
 				x2 = pixman_fixed_ceil(t->right.p1.x);
-			} else  {
+			} else {
 				if (t->right.p1.y == t->top)
 					fx1 = t->right.p1.x;
 				else
@@ -3007,7 +3343,6 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 			 int ntrap, xTrapezoid *traps)
 {
 	struct tor tor;
-	span_func_t span;
 	ScreenPtr screen = dst->pDrawable->pScreen;
 	PixmapPtr scratch;
 	PicturePtr mask;
@@ -3041,8 +3376,8 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (extents.y1 >= extents.y2 || extents.x1 >= extents.x2)
 		return true;
 
-	DBG(("%s: extents (%d, %d), (%d, %d)\n",
-	     __FUNCTION__, extents.x1, extents.y1, extents.x2, extents.y2));
+	DBG(("%s: ntraps=%d, extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, ntrap, extents.x1, extents.y1, extents.x2, extents.y2));
 
 	if (!sna_compute_composite_extents(&extents,
 					   src, NULL, dst,
@@ -3096,15 +3431,18 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		tor_add_edge(&tor, &t, &t.right, -1);
 	}
 
-	if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-		span = tor_blt_mask_mono;
-	else
-		span = tor_blt_mask;
-
-	tor_render(NULL, &tor,
-		   scratch->devPrivate.ptr,
-		   (void *)(intptr_t)scratch->devKind,
-		   span, true);
+	if (extents.x2 <= TOR_INPLACE_SIZE) {
+		uint8_t buf[TOR_INPLACE_SIZE];
+		tor_inplace(&tor, scratch, is_mono(dst, maskFormat),
+			    scratch->usage_hint ? NULL : buf);
+	} else {
+		tor_render(NULL, &tor,
+			   scratch->devPrivate.ptr,
+			   (void *)(intptr_t)scratch->devKind,
+			   is_mono(dst, maskFormat) ? tor_blt_mask_mono : tor_blt_mask,
+			   true);
+	}
+	tor_fini(&tor);
 
 	mask = CreatePicture(0, &scratch->drawable,
 			     PictureMatchFormat(screen, 8, PICT_a8),
@@ -3119,7 +3457,6 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 				 extents.x2, extents.y2);
 		FreePicture(mask, 0);
 	}
-	tor_fini(&tor);
 
 	return true;
 }
@@ -3535,7 +3872,6 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 			int ntrap, xTrapezoid *traps)
 {
 	struct tor tor;
-	span_func_t span;
 	ScreenPtr screen = dst->pDrawable->pScreen;
 	PixmapPtr scratch;
 	PicturePtr mask;
@@ -3569,8 +3905,8 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (extents.y1 >= extents.y2 || extents.x1 >= extents.x2)
 		return true;
 
-	DBG(("%s: extents (%d, %d), (%d, %d)\n",
-	     __FUNCTION__, extents.x1, extents.y1, extents.x2, extents.y2));
+	DBG(("%s: ntraps=%d, extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, ntrap, extents.x1, extents.y1, extents.x2, extents.y2));
 
 	if (!sna_compute_composite_extents(&extents,
 					   src, NULL, dst,
@@ -3624,15 +3960,16 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		tor_add_edge(&tor, &t, &t.right, -1);
 	}
 
-	if (maskFormat ? maskFormat->depth < 8 : dst->polyEdge == PolyEdgeSharp)
-		span = tor_blt_mask_mono;
-	else
-		span = tor_blt_mask;
-
-	tor_render(NULL, &tor,
-		   scratch->devPrivate.ptr,
-		   (void *)(intptr_t)scratch->devKind,
-		   span, true);
+	if (extents.x2 <= TOR_INPLACE_SIZE) {
+		tor_inplace(&tor, scratch, is_mono(dst, maskFormat), NULL);
+	} else {
+		tor_render(NULL, &tor,
+			   scratch->devPrivate.ptr,
+			   (void *)(intptr_t)scratch->devKind,
+			   is_mono(dst, maskFormat) ? tor_blt_mask_mono : tor_blt_mask,
+			   true);
+	}
+	tor_fini(&tor);
 
 	mask = CreatePicture(0, &scratch->drawable,
 			     PictureMatchFormat(screen, 8, PICT_a8),
@@ -3675,7 +4012,6 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 done:
 		FreePicture(mask, 0);
 	}
-	tor_fini(&tor);
 
 	return true;
 }
commit 494edfaaacaae13adfa5e727c66a83cb2294d330
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 12:19:33 2012 +0000

    sna: Handle partial reads with a pending clear
    
    Skip the filling of the whole pixmap if we have a small read and we
    know the GPU bo is clear. Also choose to operate inplace on the GPU bo
    if we meet the usual criteria.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5aad88b..3429438 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1257,13 +1257,6 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 	}
 
-	if (priv->clear) {
-		DBG(("%s: pending clear, moving whole pixmap\n", __FUNCTION__));
-		if (dx | dy)
-			RegionTranslate(region, -dx, -dy);
-		return _sna_pixmap_move_to_cpu(pixmap, flags | MOVE_READ);
-	}
-
 	if ((flags & MOVE_READ) == 0) {
 		DBG(("%s: no read, checking to see if we can stream the write into the GPU bo\n",
 		     __FUNCTION__));
@@ -1295,6 +1288,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 					sna_damage_add(&priv->gpu_damage,
 						       region);
 
+				priv->clear = false;
 				return true;
 			}
 		}
@@ -1333,6 +1327,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			} else
 				sna_damage_add(&priv->gpu_damage, region);
 
+			priv->clear = false;
 			return true;
 		}
 	}
@@ -1354,12 +1349,20 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			pixmap->devKind = priv->gpu_bo->pitch;
 			if (!DAMAGE_IS_ALL(priv->gpu_damage))
 				sna_damage_add(&priv->gpu_damage, region);
+			priv->clear = false;
 			return true;
 		}
 
 		priv->mapped = false;
 	}
 
+	if (priv->clear && flags & MOVE_WRITE) {
+		DBG(("%s: pending clear, moving whole pixmap for partial write\n", __FUNCTION__));
+		if (dx | dy)
+			RegionTranslate(region, -dx, -dy);
+		return _sna_pixmap_move_to_cpu(pixmap, flags | MOVE_READ);
+	}
+
 	if (priv->mapped) {
 		pixmap->devPrivate.ptr = NULL;
 		priv->mapped = false;
@@ -1372,6 +1375,35 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	if (priv->gpu_bo == NULL)
 		goto done;
 
+	if (priv->clear) {
+		int n = REGION_NUM_RECTS(region);
+		BoxPtr box = REGION_RECTS(region);
+
+		DBG(("%s: pending clear, doing partial fill\n", __FUNCTION__));
+		if (priv->cpu_bo) {
+			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
+			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+		}
+
+		do {
+			pixman_fill(pixmap->devPrivate.ptr,
+				    pixmap->devKind/sizeof(uint32_t),
+				    pixmap->drawable.bitsPerPixel,
+				    box->x1, box->y1,
+				    box->x2 - box->x1,
+				    box->y2 - box->y1,
+				    priv->clear_color);
+			box++;
+		} while (--n);
+
+		if (region->extents.x2 - region->extents.x1 > 1 ||
+		    region->extents.y2 - region->extents.y1 > 1) {
+			sna_damage_subtract(&priv->gpu_damage, region);
+			priv->clear = false;
+		}
+		goto done;
+	}
+
 	if ((flags & MOVE_READ) == 0) {
 		assert(flags & MOVE_WRITE);
 		sna_damage_subtract(&priv->gpu_damage, region);
commit bd62dc73dcdbab34aa5c83382e46c7315d554a1a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 10:37:34 2012 +0000

    sna/traps: Apply somes simple but common operator reduction for clipmasks
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index e28c669..8c6cf34 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3354,8 +3354,10 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	struct inplace inplace;
 	span_func_t span;
 	PixmapPtr pixmap;
+	struct sna_pixmap *priv;
 	RegionRec region;
 	uint32_t color;
+	bool unbounded;
 	int16_t dst_x, dst_y;
 	int dx, dy;
 	int n;
@@ -3380,18 +3382,33 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 	}
 
+	pixmap = get_drawable_pixmap(dst->pDrawable);
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback -- unattached\n", __FUNCTION__));
+		return false;
+	}
+
+	unbounded = false;
 	switch (op) {
 	case PictOpIn:
+		unbounded = true;
+		if (priv->clear && priv->clear_color == 0xff)
+			op = PictOpSrc;
+		break;
 	case PictOpAdd:
+		if (priv->clear && priv->clear_color == 0)
+			op = PictOpSrc;
+		break;
 	case PictOpSrc:
-		if (!fallback && is_gpu(dst->pDrawable))
-			return false;
 		break;
 	default:
 		DBG(("%s: fallback -- can not perform op [%d] in place\n",
 		     __FUNCTION__, op));
 		return false;
 	}
+	if (!fallback && is_gpu(dst->pDrawable))
+		return false;
 
 	DBG(("%s: format=%x, op=%d, color=%x\n",
 	     __FUNCTION__, dst->format, op, color));
@@ -3497,7 +3514,6 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 					     op == PictOpSrc ? MOVE_WRITE : MOVE_WRITE | MOVE_READ))
 		return true;
 
-	pixmap = get_drawable_pixmap(dst->pDrawable);
 	get_drawable_deltas(dst->pDrawable, pixmap, &dst_x, &dst_y);
 
 	inplace.ptr = pixmap->devPrivate.ptr;
@@ -3506,7 +3522,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	inplace.opacity = color >> 24;
 
 	tor_render(NULL, &tor, (void*)&inplace,
-		   dst->pCompositeClip, span, op == PictOpIn);
+		   dst->pCompositeClip, span, unbounded);
 
 	tor_fini(&tor);
 
commit c25a3f7f46010660f441070ab7b9d5d1bc39ed0d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 09:43:46 2012 +0000

    sna/dri: Only delivered a delayed flip if the drawable is still on the root
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 92132d6..ccaf40f 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1053,7 +1053,8 @@ static void sna_dri_flip_event(struct sna *sna,
 				      flip->drawable_id,
 				      serverClient,
 				      M_ANY, DixWriteAccess) == Success) {
-			if (!sna_dri_flip_continue(sna, drawable, flip)) {
+			if (can_flip(sna, drawable, flip->front, flip->back) &&
+			    !sna_dri_flip_continue(sna, drawable, flip)) {
 				DRI2SwapComplete(flip->client, drawable,
 						 0, 0, 0,
 						 DRI2_BLIT_COMPLETE,
commit 93846b468e778440549ef0cae171c7fe9678ed9a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 09:43:24 2012 +0000

    sna/traps: Remove some dead code
    
    This function was never used in this implementation, remove it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 727b4d2..e28c669 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -483,15 +483,6 @@ cell_list_rewind(struct cell_list *cells)
 	cells->cursor = &cells->head;
 }
 
-/* Rewind the cell list if its cursor has been advanced past x. */
-inline static void
-cell_list_maybe_rewind(struct cell_list *cells, int x)
-{
-	struct cell *tail = cells->cursor;
-	if (tail->x > x)
-		cell_list_rewind (cells);
-}
-
 static void
 cell_list_init(struct cell_list *cells)
 {
commit 90c995736555ce14b08b69a42832d9774ba58304
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 9 00:37:32 2012 +0000

    sna: Emit a INFO when compiled with debugging enabled
    
    It is useful to know and to receive confirmation that you have
    successfully compiled and executed the driver with debugging enabled.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index ddacfd1..7cf3ef1 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -1074,6 +1074,10 @@ void sna_init_scrn(ScrnInfoPtr scrn, int entity_num)
 	xf86DrvMsg(scrn->scrnIndex, X_INFO,
 		   "SNA compiled: %s\n", BUILDER_DESCRIPTION);
 #endif
+#if HAVE_EXTRA_DEBUG
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "SNA compiled with debugging enabled\n");
+#endif
 
 	DBG(("%s\n", __FUNCTION__));
 	DBG(("pixman version: %s\n", pixman_version_string()));
commit 2e194f33db0437ea2f25c22efdad9552aefcab2f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 18:07:22 2012 +0000

    sna/traps: Fix the initialisation of the error term for vertical mono edges
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 8e735ef..727b4d2 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -776,7 +776,7 @@ polygon_add_line(struct polygon *polygon,
 	if (dx == 0) {
 		e->vertical = true;
 		e->x.quo = p1->x;
-		e->x.rem = 0;
+		e->x.rem = -dy;
 		e->dxdy.quo = 0;
 		e->dxdy.rem = 0;
 	} else {
commit 635c604b787625f93763001951f8bdf66482c682
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 18:06:51 2012 +0000

    sna/traps: Unroll insertion sort
    
    As the compiler cannot know the loop is bounded by a sentinel, manually
    unroll it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 33ea3bb..8e735ef 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -556,6 +556,14 @@ cell_list_find(struct cell_list *cells, int x)
 			break;
 
 		tail = tail->next;
+		if (tail->next->x > x)
+			break;
+
+		tail = tail->next;
+		if (tail->next->x > x)
+			break;
+
+		tail = tail->next;
 	} while (1);
 
 	if (tail->x != x)
commit a087430ad99c06e79249d2cdd019cb8bf7f955d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 17:13:39 2012 +0000

    sna/gen6: Replace the memset with explict initialisation
    
    The profiles told me to kill it...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 764b629..8a8cdd8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3705,8 +3705,6 @@ gen6_render_fill_boxes(struct sna *sna,
 	     __FUNCTION__, pixel, n,
 	     box[0].x1, box[0].y1, box[0].x2, box[0].y2));
 
-	memset(&tmp, 0, sizeof(tmp));
-
 	tmp.op = op;
 
 	tmp.dst.pixmap = dst;
@@ -3714,16 +3712,21 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.format = format;
 	tmp.dst.bo = dst_bo;
+	tmp.dst.x = tmp.dst.y = 0;
 
 	tmp.src.bo = sna_render_get_solid(sna, pixel);
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_REPEAT;
 
 	tmp.mask.bo = NULL;
+	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
+	tmp.mask.repeat = SAMPLER_EXTEND_NONE;
 
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
+	tmp.has_component_alpha = FALSE;
+	tmp.need_magic_ca_pass = FALSE;
 
 	tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen6.nr_surfaces = 2;
commit 61226cd41faf320f79ee4bd72dc77163079da853
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 13:41:58 2012 +0000

    sna: Fix handling of large glyphs following large and shared buffer work
    
    Part of the large buffer handling was to move the decision making about
    whether to create GPU bo for a pixmap to creation time. The single
    instance where we change our minds later is involving large glyphs which
    we choose not to cache.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 8340345..441b24e 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -684,7 +684,6 @@ memcpy_xor(const void *src, void *dst, int bpp,
 
 #define SNA_CREATE_FB 0x10
 #define SNA_CREATE_SCRATCH 0x11
-#define SNA_CREATE_GLYPH 0x12
 
 inline static bool is_power_of_two(unsigned x)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 08ee537..5aad88b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -764,7 +764,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	}
 
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
-		goto fallback;
+		flags &= ~KGEM_CAN_CREATE_GPU;
 
 force_create:
 	pad = PixmapBytePad(width, depth);
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 2733a1a..1c536c8 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -315,7 +315,7 @@ glyph_cache(ScreenPtr screen,
 		PixmapPtr pixmap = (PixmapPtr)glyph_picture->pDrawable;
 		assert(glyph_picture->pDrawable->type == DRAWABLE_PIXMAP);
 		if (pixmap->drawable.depth >= 8) {
-			pixmap->usage_hint = SNA_CREATE_GLYPH;
+			pixmap->usage_hint = 0;
 			sna_pixmap_force_to_gpu(pixmap, MOVE_READ);
 		}
 		return FALSE;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 421c7ff..d1e3500 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -378,8 +378,11 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		bool upload;
 
 		priv = sna_pixmap(pixmap);
-		if (!priv)
+		if (!priv) {
+			DBG(("%s: not migrating unattached pixmap\n",
+			     __FUNCTION__));
 			return false;
+		}
 
 		upload = true;
 		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
commit aa7a2bd71275e069a2e4383a26355854b0b8044c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 12:27:23 2012 +0000

    sna: Fix reversed logic for CREATE_NO_RETIRE
    
    If the flag is set, we cannot retire, not the other way around!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cfa46cf..5773d66 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2060,7 +2060,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		DBG(("%s: inactive and cache bucket empty\n",
 		     __FUNCTION__));
 
-		if ((flags & CREATE_NO_RETIRE) == 0) {
+		if (flags & CREATE_NO_RETIRE) {
 			DBG(("%s: can not retire\n", __FUNCTION__));
 			return NULL;
 		}
commit f8d520950edb5cec878999a09d3f7e6f8b15bf5e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 12:13:36 2012 +0000

    sna: Avoid NULL deference in DBG
    
    Only print out the details of the allocated CPU bo, if we actually
    allocate it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a1798a5..08ee537 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -313,10 +313,10 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 						  pixmap->drawable.height,
 						  pixmap->drawable.bitsPerPixel,
 						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
-		DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
-		     priv->cpu_bo->handle));
-
 		if (priv->cpu_bo) {
+			DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
+			     priv->cpu_bo->handle));
+
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
 			priv->stride = priv->cpu_bo->pitch;
 		}
commit 33af42e6284f104ed3c4cdba4bf3b1c29322ce9c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 12:10:24 2012 +0000

    sna: Force the creation of a backing pixmap for scanout
    
    Ordinarily if the GPU is wedged, we just want to create a shadow buffer.
    Except that we must ensure that we do allow a bo to be created for
    attaching to the scanout.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2dff2ed..cfa46cf 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2365,7 +2365,7 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 	uint32_t pitch, size;
 	unsigned flags = 0;
 
-	if (depth < 8 || kgem->wedged)
+	if (depth < 8)
 		return 0;
 
 	if (width > MAXSHORT || height > MAXSHORT)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2769d4b..a1798a5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -730,6 +730,14 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	}
 	assert(width && height);
 
+	if (wedged(sna)) {
+		if (usage == SNA_CREATE_FB) {
+			flags = KGEM_CAN_CREATE_GPU;
+			goto force_create;
+		}
+		goto fallback;
+	}
+
 	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
 	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
@@ -758,6 +766,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
 		goto fallback;
 
+force_create:
 	pad = PixmapBytePad(width, depth);
 	if (pad * height <= 4096) {
 		DBG(("%s: small buffer [%d], attaching to shadow pixmap\n",
commit 6abfa4c5662e5294edc501953dbfa7aabf0b2f57
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 11:59:02 2012 +0000

    sna: Mark the pixmap for writing when creating the screen resources
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 419d1c6..2769d4b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2051,6 +2051,13 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		struct sna *sna = to_sna_from_pixmap(pixmap);
 		unsigned mode;
 
+		DBG(("%s: forcing creation of  gpu bo (%dx%d@%d, flags=%x)\n",
+		     __FUNCTION__,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height,
+		     pixmap->drawable.bitsPerPixel,
+		     priv->create));
+
 		mode = 0;
 		if (priv->cpu_damage && !priv->cpu_bo)
 			mode |= CREATE_INACTIVE;
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index e53b75f..ddacfd1 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -198,7 +198,7 @@ static Bool sna_create_screen_resources(ScreenPtr screen)
 		return FALSE;
 	}
 
-	if (!sna_pixmap_force_to_gpu(sna->front, MOVE_READ)) {
+	if (!sna_pixmap_force_to_gpu(sna->front, MOVE_WRITE)) {
 		xf86DrvMsg(screen->myNum, X_ERROR,
 			   "[intel] Failed to allocate video resources for front buffer %dx%d at depth %d\n",
 			   screen->width,
commit 5c5ebd6a3ecf3bd83747003bc272a736b7c333c0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 11:13:34 2012 +0000

    intel: Fix typo s/asert/assert/
    
    The joy of conditional compiles masked this compilation failure when
    testing.
    
    Reported-by: Reinhard Karcher <reinhard.karcher at gmx.net>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index cbadebf..cfaa1ad 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -207,7 +207,7 @@ list_append(struct list *entry, struct list *head)
 static inline void
 __list_del(struct list *prev, struct list *next)
 {
-	asert(next->prev == prev->next);
+	assert(next->prev == prev->next);
 	next->prev = prev;
 	prev->next = next;
 }
commit f6474883d325cff443da9ceaa99ec734e6cdc1d6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 11:04:05 2012 +0000

    sna/gen2+: Use the reduced operator from CompositeRectangles
    
    Do not attempt to further reduce the operator locally in each backend as
    the reduction is already performed in the upper layer.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42606
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 597d5f3..6907dd6 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2348,30 +2348,24 @@ gen2_render_fill_boxes_try_blt(struct sna *sna,
 			       PixmapPtr dst, struct kgem_bo *dst_bo,
 			       const BoxRec *box, int n)
 {
-	uint8_t alu = GXcopy;
+	uint8_t alu;
 	uint32_t pixel;
 
-	if (!sna_get_pixel_from_rgba(&pixel,
-				     color->red,
-				     color->green,
-				     color->blue,
-				     color->alpha,
-				     format))
+	if (op > PictOpSrc)
 		return FALSE;
 
 	if (op == PictOpClear) {
 		alu = GXclear;
 		pixel = 0;
-		op = PictOpSrc;
-	}
-
-	if (op == PictOpOver) {
-		if ((pixel & 0xff000000) == 0xff000000)
-			op = PictOpSrc;
-	}
-
-	if (op != PictOpSrc)
+	} else if (!sna_get_pixel_from_rgba(&pixel,
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    format))
 		return FALSE;
+	else
+		alu = GXcopy;
 
 	return sna_blt_fill_boxes(sna, alu,
 				  dst_bo, dst->drawable.bitsPerPixel,
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 2a18631..d3ed2ef 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -4153,7 +4153,7 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 			       PixmapPtr dst, struct kgem_bo *dst_bo,
 			       const BoxRec *box, int n)
 {
-	uint8_t alu = GXcopy;
+	uint8_t alu;
 	uint32_t pixel;
 
 	if (dst_bo->tiling == I915_TILING_Y) {
@@ -4162,36 +4162,21 @@ gen3_render_fill_boxes_try_blt(struct sna *sna,
 		return FALSE;
 	}
 
-	if (color->alpha >= 0xff00) {
-		if (op == PictOpOver)
-			op = PictOpSrc;
-		else if (op == PictOpOutReverse)
-			op = PictOpClear;
-		else if (op == PictOpAdd &&
-			 (color->red & color->green & color->blue) >= 0xff00)
-			op = PictOpSrc;
-	}
+	if (op > PictOpSrc)
+		return FALSE;
 
-	pixel = 0;
 	if (op == PictOpClear) {
 		alu = GXclear;
-	} else if (op == PictOpSrc) {
-		if (color->alpha <= 0x00ff)
-			alu = GXclear;
-		else if (!sna_get_pixel_from_rgba(&pixel,
-						  color->red,
-						  color->green,
-						  color->blue,
-						  color->alpha,
-						  format)) {
-			DBG(("%s: unknown format %x\n", __FUNCTION__,
-			     (uint32_t)format));
-			return FALSE;
-		}
-	} else {
-		DBG(("%s: unhandle op %d\n", __FUNCTION__, alu));
+		pixel = 0;
+	} else if (!sna_get_pixel_from_rgba(&pixel,
+					    color->red,
+					    color->green,
+					    color->blue,
+					    color->alpha,
+					    format))
 		return FALSE;
-	}
+	else
+		alu = GXcopy;
 
 	return sna_blt_fill_boxes(sna, alu,
 				  dst_bo, dst->drawable.bitsPerPixel,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 02454b2..a69852e 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2901,29 +2901,24 @@ gen4_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt(sna) ||
-	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    !gen4_check_dst_format(format)) {
+	if (op <= PictOpSrc &&
+	    (prefer_blt(sna) ||
+	     too_large(dst->drawable.width, dst->drawable.height) ||
+	     !gen4_check_dst_format(format))) {
 		uint8_t alu = -1;
 
-		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
+		pixel = 0;
+		if (op == PictOpClear)
 			alu = GXclear;
-
-		if (op == PictOpSrc || (op == PictOpOver && color->alpha >= 0xff00)) {
+		else if (sna_get_pixel_from_rgba(&pixel,
+						 color->red,
+						 color->green,
+						 color->blue,
+						 color->alpha,
+						 format))
 			alu = GXcopy;
-			if (color->alpha <= 0x00ff)
-				alu = GXclear;
-		}
 
-		pixel = 0;
-		if ((alu == GXclear ||
-		     (alu == GXcopy &&
-		      sna_get_pixel_from_rgba(&pixel,
-					      color->red,
-					      color->green,
-					      color->blue,
-					      color->alpha,
-					      format))) &&
+		if (alu != -1 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6763edf..01604ef 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3241,29 +3241,24 @@ gen5_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt_fill(sna) ||
-	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    !gen5_check_dst_format(format)) {
+	if (op <= PictOpSrc &&
+	    (prefer_blt_fill(sna) ||
+	     too_large(dst->drawable.width, dst->drawable.height) ||
+	     !gen5_check_dst_format(format))) {
 		uint8_t alu = -1;
 
-		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
+		pixel = 0;
+		if (op == PictOpClear)
 			alu = GXclear;
-
-		if (op == PictOpSrc || (op == PictOpOver && color->alpha >= 0xff00)) {
+		else if (sna_get_pixel_from_rgba(&pixel,
+						 color->red,
+						 color->green,
+						 color->blue,
+						 color->alpha,
+						 format))
 			alu = GXcopy;
-			if (color->alpha <= 0x00ff)
-				alu = GXclear;
-		}
 
-		pixel = 0;
-		if ((alu == GXclear ||
-		     (alu == GXcopy &&
-		      sna_get_pixel_from_rgba(&pixel,
-					      color->red,
-					      color->green,
-					      color->blue,
-					      color->alpha,
-					      format))) &&
+		if (alu != -1 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 390cb0a..764b629 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3655,29 +3655,24 @@ gen6_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt_fill(sna, dst_bo) ||
-	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    !gen6_check_dst_format(format)) {
+	if (op <= PictOpSrc &&
+	    (prefer_blt_fill(sna, dst_bo) ||
+	     too_large(dst->drawable.width, dst->drawable.height) ||
+	     !gen6_check_dst_format(format))) {
 		uint8_t alu = -1;
 
-		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
+		pixel = 0;
+		if (op == PictOpClear)
 			alu = GXclear;
-
-		if (op == PictOpSrc || (op == PictOpOver && color->alpha >= 0xff00)) {
+		else if (sna_get_pixel_from_rgba(&pixel,
+						 color->red,
+						 color->green,
+						 color->blue,
+						 color->alpha,
+						 format))
 			alu = GXcopy;
-			if (color->alpha <= 0x00ff)
-				alu = GXclear;
-		}
 
-		pixel = 0;
-		if ((alu == GXclear ||
-		     (alu == GXcopy &&
-		      sna_get_pixel_from_rgba(&pixel,
-					      color->red,
-					      color->green,
-					      color->blue,
-					      color->alpha,
-					      format))) &&
+		if (alu != -1 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 2b3f67b..36ea8a1 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3738,29 +3738,24 @@ gen7_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt_fill(sna, dst_bo) ||
-	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    !gen7_check_dst_format(format)) {
+	if (op <= PictOpSrc &&
+	    (prefer_blt_fill(sna, dst_bo) ||
+	     too_large(dst->drawable.width, dst->drawable.height) ||
+	     !gen7_check_dst_format(format))) {
 		uint8_t alu = -1;
 
-		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
+		pixel = 0;
+		if (op == PictOpClear)
 			alu = GXclear;
-
-		if (op == PictOpSrc || (op == PictOpOver && color->alpha >= 0xff00)) {
+		else if (sna_get_pixel_from_rgba(&pixel,
+						 color->red,
+						 color->green,
+						 color->blue,
+						 color->alpha,
+						 format))
 			alu = GXcopy;
-			if (color->alpha <= 0x00ff)
-				alu = GXclear;
-		}
 
-		pixel = 0;
-		if ((alu == GXclear ||
-		     (alu == GXcopy &&
-		      sna_get_pixel_from_rgba(&pixel,
-					      color->red,
-					      color->green,
-					      color->blue,
-					      color->alpha,
-					      format))) &&
+		if (alu != -1 &&
 		    sna_blt_fill_boxes(sna, alu,
 				       dst_bo, dst->drawable.bitsPerPixel,
 				       pixel, box, n))
commit 2ae3cd36ae98fa43716ef482e70364e7563cf1ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 10:51:05 2012 +0000

    sna: Tidy marking pixmap->clear for CompositeRectangles
    
    Reduce the two unsightly checks into one.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 38ff99d..a610e7c 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -781,45 +781,29 @@ sna_composite_rectangles(CARD8		 op,
 	/* Clearing a pixmap after creation is a common operation, so take
 	 * advantage and reduce further damage operations.
 	 */
+	if (region.data == NULL &&
+	    region.extents.x2 - region.extents.x1 == pixmap->drawable.width &&
+	    region.extents.y2 - region.extents.y1 == pixmap->drawable.height) {
+		sna_damage_all(&priv->gpu_damage,
+			       pixmap->drawable.width, pixmap->drawable.height);
+		priv->undamaged = false;
+		if (op <= PictOpSrc) {
+			priv->clear = true;
+			priv->clear_color = 0;
+			if (op == PictOpSrc)
+				sna_get_pixel_from_rgba(&priv->clear_color,
+							color->red,
+							color->green,
+							color->blue,
+							color->alpha,
+							dst->format);
+			DBG(("%s: marking clear [%08x]\n",
+			     __FUNCTION__, priv->clear_color));
+		}
+	}
 	if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
 		assert_pixmap_contains_box(pixmap, RegionExtents(&region));
-
-		if (region.data == NULL &&
-		    region.extents.x2 - region.extents.x1 == pixmap->drawable.width &&
-		    region.extents.y2 - region.extents.y1 == pixmap->drawable.height) {
-			sna_damage_all(&priv->gpu_damage,
-				       pixmap->drawable.width, pixmap->drawable.height);
-			priv->undamaged = false;
-			if (op <= PictOpSrc) {
-				priv->clear = true;
-				priv->clear_color = 0;
-				if (op == PictOpSrc)
-					sna_get_pixel_from_rgba(&priv->clear_color,
-								color->red,
-								color->green,
-								color->blue,
-								color->alpha,
-								dst->format);
-				DBG(("%s: marking clear [%08x]\n",
-				     __FUNCTION__, priv->clear_color));
-			}
-		} else
-			sna_damage_add(&priv->gpu_damage, &region);
-	} else if (op <= PictOpSrc &&
-		   region.data == NULL &&
-		   region.extents.x2 - region.extents.x1 == pixmap->drawable.width &&
-		   region.extents.y2 - region.extents.y1 == pixmap->drawable.height) {
-		priv->clear = true;
-		priv->clear_color = 0;
-		if (op == PictOpSrc)
-			sna_get_pixel_from_rgba(&priv->clear_color,
-						color->red,
-						color->green,
-						color->blue,
-						color->alpha,
-						dst->format);
-		DBG(("%s: marking clear [%08x]\n",
-		     __FUNCTION__, priv->clear_color));
+		sna_damage_add(&priv->gpu_damage, &region);
 	}
 
 	goto done;
commit 8b21659ef2f9cec799b8215a2e5481667bc744bf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 10:50:32 2012 +0000

    sna: Add some assertions around pixmap creation for render operations
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e961c2c..419d1c6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2023,6 +2023,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 static inline struct sna_pixmap *
 sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
 {
+	assert(priv->gpu_bo);
 	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
@@ -2051,7 +2052,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		unsigned mode;
 
 		mode = 0;
-		if (priv->cpu_damage)
+		if (priv->cpu_damage && !priv->cpu_bo)
 			mode |= CREATE_INACTIVE;
 		if (pixmap->usage_hint == SNA_CREATE_FB)
 			mode |= CREATE_EXACT | CREATE_SCANOUT;
@@ -2132,14 +2133,25 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	sna_damage_reduce(&priv->cpu_damage);
 	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
-		if (!wedged(sna) && priv->create & KGEM_CAN_CREATE_GPU)
+		DBG(("%s: creating GPU bo (%dx%d@%d), create=%x\n",
+		     __FUNCTION__,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height,
+		     pixmap->drawable.bitsPerPixel,
+		     priv->create));
+		assert(!priv->mapped);
+		if (!wedged(sna) && priv->create & KGEM_CAN_CREATE_GPU) {
+			assert(pixmap->drawable.width > 0);
+			assert(pixmap->drawable.height > 0);
+			assert(pixmap->drawable.bitsPerPixel >= 8);
 			priv->gpu_bo =
 				kgem_create_2d(&sna->kgem,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
 					       pixmap->drawable.bitsPerPixel,
 					       sna_pixmap_choose_tiling(pixmap),
-					       priv->cpu_damage ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
+					       (priv->cpu_damage && priv->cpu_bo == NULL) ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
+		}
 		if (priv->gpu_bo == NULL) {
 			DBG(("%s: not creating GPU bo\n", __FUNCTION__));
 			assert(list_is_empty(&priv->list));
@@ -2179,6 +2191,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		Bool ok;
 
 		assert(pixmap_contains_damage(pixmap, priv->cpu_damage));
+		DBG(("%s: uploading %d damage boxes\n", __FUNCTION__, n));
 
 		ok = FALSE;
 		if (priv->cpu_bo)
@@ -2225,7 +2238,7 @@ done:
 		}
 	}
 active:
-	return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
+	return sna_pixmap_mark_active(sna, priv);
 }
 
 static bool must_check sna_validate_pixmap(DrawablePtr draw, PixmapPtr pixmap)
commit 7d74300647dd815ae5b4399f84ea339045d12563
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 09:54:16 2012 +0000

    sna: Discard unbound partial buffers
    
    Instead of keeping a virgin partial buffer around on its inactive list,
    just transfer it to the global bo cache (in actuality destroy it since
    it is just a kmalloc with no pages bound).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b036d26..2dff2ed 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1246,13 +1246,12 @@ static void kgem_retire_partials(struct kgem *kgem)
 
 		assert(bo->base.refcnt == 1);
 		assert(bo->base.exec == NULL);
-		if (!bo->mmapped) {
+		if (!bo->mmapped || bo->base.presumed_offset == 0) {
 			list_del(&bo->base.list);
 			kgem_bo_unref(kgem, &bo->base);
 			continue;
 		}
 
-		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
 		bo->used = 0;
commit 9eb8ba4df2a99342dbb3569c043d84aa82506aba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 09:42:58 2012 +0000

    sna: Preserve the offset alignment when trimming unused rows from partials
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8e3de97..b036d26 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2062,7 +2062,7 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		     __FUNCTION__));
 
 		if ((flags & CREATE_NO_RETIRE) == 0) {
-			DBG(("%s: can not retire\n"));
+			DBG(("%s: can not retire\n", __FUNCTION__));
 			return NULL;
 		}
 
@@ -3340,7 +3340,7 @@ void kgem_sync(struct kgem *kgem)
 	list_for_each_entry(bo, &kgem->sync_list, list)
 		kgem_bo_sync__cpu(kgem, bo);
 
-	assert (kgem->sync == NULL);
+	assert(kgem->sync == NULL);
 }
 
 void kgem_clear_dirty(struct kgem *kgem)
@@ -3812,15 +3812,23 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 
 	if (height & 1) {
 		struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
+		int min;
+
+		assert(io->used);
 
 		/* Having padded this surface to ensure that accesses to
 		 * the last pair of rows is valid, remove the padding so
 		 * that it can be allocated to other pixmaps.
 		 */
-		if (io->used)
-			io->used -= stride;
+		min = bo->delta + height * stride;
+		min = ALIGN(min, 64);
+		if (io->used != min) {
+			DBG(("%s: trimming partial buffer from %d to %d\n",
+			     __FUNCTION__, io->used, min));
+			io->used = min;
+			bubble_sort_partial(&kgem->active_partials, io);
+		}
 		bo->size.bytes -= stride;
-		bubble_sort_partial(&kgem->active_partials, io);
 	}
 
 	bo->pitch = stride;
commit 8e6166a0e87a00248d0d4925953c923850f5b840
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 02:09:25 2012 +0000

    sna: Becareful not to reduce operators for superluminal colors
    
    wine-1.4 is such an example of a crazy application.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42606
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 225008f..38ff99d 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -644,7 +644,7 @@ sna_composite_rectangles(CARD8		 op,
 		return;
 	}
 
-	if (color->alpha <= 0x00ff) {
+	if ((color->red|color->green|color->blue|color->alpha) <= 0x00ff) {
 		switch (op) {
 		case PictOpOver:
 		case PictOpOutReverse:
@@ -661,6 +661,22 @@ sna_composite_rectangles(CARD8		 op,
 			op = PictOpOverReverse;
 			break;
 		}
+	}
+	if (color->alpha <= 0x00ff) {
+		switch (op) {
+		case PictOpOver:
+		case PictOpOutReverse:
+			return;
+		case  PictOpInReverse:
+			op = PictOpClear;
+			break;
+		case  PictOpAtopReverse:
+			op = PictOpOut;
+			break;
+		case  PictOpXor:
+			op = PictOpOverReverse;
+			break;
+		}
 	} else if (color->alpha >= 0xff00) {
 		switch (op) {
 		case PictOpOver:
commit 3f73cc706ff39cd4c10433791f12b5f829f62e6d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 08:54:24 2012 +0000

    sna/dri: Use a counter for the number of DRI drawables attached to a pixmap
    
    The root pixmap, for instance, may have unique DRI2Drawables for each
    inferior window. We only want to clear the flush flag on the last
    release, so we need to keep a count of how many DRI drawables remain
    attached rather than a solitary flag.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 119244d..8340345 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -129,12 +129,12 @@ struct sna_pixmap {
 
 	uint32_t stride;
 	uint32_t clear_color;
+	unsigned flush;
 
 #define SOURCE_BIAS 4
 	uint16_t source_count;
 	uint8_t pinned :1;
 	uint8_t mapped :1;
-	uint8_t flush :1;
 	uint8_t clear :1;
 	uint8_t undamaged :1;
 	uint8_t create :3;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 3909b84..92132d6 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -163,7 +163,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	if (priv == NULL)
 		return NULL;
 
-	if (priv->flush)
+	if (priv->flush++)
 		return priv->gpu_bo;
 
 	tiling = color_tiling(sna, &pixmap->drawable);
@@ -177,7 +177,6 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	 *
 	 * As we don't track which Client, we flush for all.
 	 */
-	priv->flush = 1;
 	sna_accel_watch_flush(sna, 1);
 
 	/* Don't allow this named buffer to be replaced */
@@ -324,10 +323,12 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 			struct sna_pixmap *priv = sna_pixmap(private->pixmap);
 
 			/* Undo the DRI markings on this pixmap */
-			list_del(&priv->list);
-			sna_accel_watch_flush(sna, -1);
-			priv->pinned = private->pixmap == sna->front;
-			priv->flush = 0;
+			assert(priv->flush > 0);
+			if (--priv->flush == 0) {
+				list_del(&priv->list);
+				sna_accel_watch_flush(sna, -1);
+				priv->pinned = private->pixmap == sna->front;
+			}
 
 			screen->DestroyPixmap(private->pixmap);
 		}
commit 49a80ce1ff336fb2fa7d214bd3fddbce5a62b77a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 15:52:41 2012 +0000

    sna/gen2+: Prefer not to fallback if the source is busy
    
    As if we try to perform the operation with outstanding operations on the
    source pixmaps, we will stall waiting for them to complete.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 57bb835..597d5f3 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1512,16 +1512,37 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return false;
+
+	if (priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		is_unhandled_gradient(p) ||
-		!gen2_check_filter(p) ||
-		!gen2_check_repeat(p) ||
-		need_upload(p));
+	if (is_unhandled_gradient(p) || !gen2_check_repeat(p))
+		return true;
+
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen2_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -1534,6 +1555,7 @@ gen2_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen2_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -1542,18 +1564,27 @@ gen2_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(mask, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = NULL;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -1566,34 +1597,28 @@ gen2_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (src_pixmap && !src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask_pixmap && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 78c7ea0..2a18631 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2511,16 +2511,37 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return false;
+
+	if (priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		!gen3_check_xformat(p) ||
-		!gen3_check_filter(p) ||
-		!gen3_check_repeat(p) ||
-		need_upload(p));
+	if (!gen3_check_xformat(p) || !gen3_check_repeat(p))
+		return true;
+
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen3_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -2534,6 +2555,7 @@ gen3_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen3_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -2542,18 +2564,27 @@ gen3_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(mask, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = false;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -2575,38 +2606,28 @@ gen3_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv &&
-		    ((priv->gpu_damage && !priv->cpu_damage) ||
-		     (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (src_pixmap && !src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv &&
-		    ((priv->gpu_damage && !priv->cpu_damage) ||
-		     (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask_pixmap && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c3a82a3..02454b2 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2122,11 +2122,8 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_gradient(PicturePtr picture)
+check_gradient(PicturePtr picture)
 {
-	if (picture->pDrawable)
-		return FALSE;
-
 	switch (picture->pSourcePict->type) {
 	case SourcePictTypeSolidFill:
 	case SourcePictTypeLinear:
@@ -2155,17 +2152,38 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return false;
+
+	if (priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		is_gradient(p) ||
-		!gen4_check_filter(p) ||
-		!gen4_check_repeat(p) ||
-		!gen4_check_format(p->format) ||
-		need_upload(p));
+	if (p->pSourcePict)
+		return check_gradient(p);
+
+	if (!gen4_check_repeat(p) || !gen4_check_format(p->format))
+		return true;
+
+	/* soft errors: perfer to upload/compute rather than readback */
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen4_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -2178,6 +2196,7 @@ gen4_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen4_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -2186,18 +2205,27 @@ gen4_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(mask, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = false;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -2210,34 +2238,28 @@ gen4_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (!src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index bce5a3c..6763edf 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2198,17 +2198,39 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL)
+		return false;
+
+	if (priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		is_gradient(p) ||
-		!gen5_check_filter(p) ||
-		!gen5_check_repeat(p) ||
-		!gen5_check_format(p->format) ||
-		need_upload(p));
+	if (is_gradient(p) ||
+	    !gen5_check_repeat(p) ||
+	    !gen5_check_format(p->format))
+		return true;
+
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen5_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -2221,6 +2243,7 @@ gen5_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen5_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -2229,18 +2252,27 @@ gen5_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(mask, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = false;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -2253,34 +2285,28 @@ gen5_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (src_pixmap && !src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask_pixmap && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 9eb4221..390cb0a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2374,7 +2374,7 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_gradient(PicturePtr picture)
+check_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
 		return FALSE;
@@ -2407,17 +2407,37 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL || priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		is_gradient(p) ||
-		!gen6_check_filter(p) ||
-		!gen6_check_repeat(p) ||
-		!gen6_check_format(p->format) ||
-		need_upload(p));
+	if (p->pSourcePict)
+		return check_gradient(p);
+
+	if (!gen6_check_repeat(p) || !gen6_check_format(p->format))
+		return true;
+
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen6_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -2430,6 +2450,7 @@ gen6_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen6_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -2438,18 +2459,27 @@ gen6_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(mask, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = false;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -2464,34 +2494,28 @@ gen6_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (src_pixmap && !src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask_pixmap && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5829ae3..2b3f67b 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2487,17 +2487,36 @@ need_upload(PicturePtr p)
 }
 
 static bool
-source_fallback(PicturePtr p)
+source_is_busy(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	if (priv == NULL || priv->clear)
+		return false;
+
+	if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+		return true;
+
+	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		return true;
+
+	return priv->gpu_damage && !priv->cpu_damage;
+}
+
+static bool
+source_fallback(PicturePtr p, PixmapPtr pixmap)
 {
 	if (sna_picture_is_solid(p, NULL))
 		return false;
 
-	return (has_alphamap(p) ||
-		is_gradient(p) ||
-		!gen7_check_filter(p) ||
-		!gen7_check_repeat(p) ||
-		!gen7_check_format(p->format) ||
-		need_upload(p));
+	if (is_gradient(p) ||
+	    !gen7_check_repeat(p) ||
+	    !gen7_check_format(p->format))
+		return true;
+
+	if (pixmap && source_is_busy(pixmap))
+		return false;
+
+	return has_alphamap(p) || !gen7_check_filter(p) || need_upload(p);
 }
 
 static bool
@@ -2510,6 +2529,7 @@ gen7_composite_fallback(struct sna *sna,
 	PixmapPtr src_pixmap;
 	PixmapPtr mask_pixmap;
 	PixmapPtr dst_pixmap;
+	bool src_fallback, mask_fallback;
 
 	if (!gen7_check_dst_format(dst->format)) {
 		DBG(("%s: unknown destination format: %d\n",
@@ -2518,18 +2538,27 @@ gen7_composite_fallback(struct sna *sna,
 	}
 
 	dst_pixmap = get_drawable_pixmap(dst->pDrawable);
+
 	src_pixmap = src->pDrawable ? get_drawable_pixmap(src->pDrawable) : NULL;
-	mask_pixmap = (mask && mask->pDrawable) ? get_drawable_pixmap(mask->pDrawable) : NULL;
+	src_fallback = source_fallback(src, src_pixmap);
+
+	if (mask) {
+		mask_pixmap = mask->pDrawable ? get_drawable_pixmap(mask->pDrawable) : NULL;
+		mask_fallback = source_fallback(src, mask_pixmap);
+	} else {
+		mask_pixmap = NULL;
+		mask_fallback = false;
+	}
 
 	/* If we are using the destination as a source and need to
 	 * readback in order to upload the source, do it all
 	 * on the cpu.
 	 */
-	if (src_pixmap == dst_pixmap && source_fallback(src)) {
+	if (src_pixmap == dst_pixmap && src_fallback) {
 		DBG(("%s: src is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
-	if (mask_pixmap == dst_pixmap && source_fallback(mask)) {
+	if (mask_pixmap == dst_pixmap && mask_fallback) {
 		DBG(("%s: mask is dst and will fallback\n",__FUNCTION__));
 		return TRUE;
 	}
@@ -2544,34 +2573,28 @@ gen7_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !source_fallback(src)) {
-		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: src is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (src_pixmap && !src_fallback) {
+		DBG(("%s: src is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
-	if (mask_pixmap && !source_fallback(mask)) {
-		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
-			DBG(("%s: mask is already on the GPU, try to use GPU\n",
-			     __FUNCTION__));
-			return FALSE;
-		}
+	if (mask_pixmap && !mask_fallback) {
+		DBG(("%s: mask is already on the GPU, try to use GPU\n",
+		     __FUNCTION__));
+		return FALSE;
 	}
 
 	/* However if the dst is not on the GPU and we need to
 	 * render one of the sources using the CPU, we may
 	 * as well do the entire operation in place onthe CPU.
 	 */
-	if (source_fallback(src)) {
+	if (src_fallback) {
 		DBG(("%s: dst is on the CPU and src will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
 	}
 
-	if (mask && source_fallback(mask)) {
+	if (mask && mask_fallback) {
 		DBG(("%s: dst is on the CPU and mask will fallback\n",
 		     __FUNCTION__));
 		return TRUE;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 96d945e..6c31f33 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -450,7 +450,6 @@ static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
 {
 	DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n",
 		 __FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL));
-	assert(bo->proxy == NULL);
 	return bo->rq;
 }
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ce3afae..e961c2c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -871,10 +871,10 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		return true;
 	}
 
-	DBG(("%s: gpu_bo=%d, gpu_damage=%p\n",
+	DBG(("%s: gpu_bo=%d, gpu_damage=%p, cpu_damage=%p, is-clear?=%d\n",
 	     __FUNCTION__,
 	     priv->gpu_bo ? priv->gpu_bo->handle : 0,
-	     priv->gpu_damage));
+	     priv->gpu_damage, priv->cpu_damage, priv->clear));
 
 	if ((flags & MOVE_READ) == 0) {
 		assert(flags & MOVE_WRITE);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 74c04af..421c7ff 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1418,7 +1418,7 @@ sna_render_picture_fixup(struct sna *sna,
 
 	if (picture->alphaMap) {
 		DBG(("%s: alphamap\n", __FUNCTION__));
-		if ((is_gpu(picture->pDrawable) || is_gpu(picture->alphaMap->pDrawable))) {
+		if (is_gpu(picture->pDrawable) || is_gpu(picture->alphaMap->pDrawable)) {
 			return sna_render_picture_flatten(sna, picture, channel,
 							  x, y, w, y, dst_x, dst_y);
 		}
@@ -1428,7 +1428,7 @@ sna_render_picture_fixup(struct sna *sna,
 
 	if (picture->filter == PictFilterConvolution) {
 		DBG(("%s: convolution\n", __FUNCTION__));
-		if (picture->pDrawable && is_gpu(picture->pDrawable)) {
+		if (is_gpu(picture->pDrawable)) {
 			return sna_render_picture_convolve(sna, picture, channel,
 							   x, y, w, h, dst_x, dst_y);
 		}
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 6c8f66a..a523fed 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -72,10 +72,10 @@ is_gpu(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
 
-	if (priv == NULL)
+	if (priv == NULL || priv->clear)
 		return false;
 
-	if (priv->gpu_damage)
+	if (priv->gpu_damage || (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo)))
 		return true;
 
 	return priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo);
commit 4899740f6f0f5b1a5b2b3490502ccdeb7b092877
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 8 00:57:12 2012 +0000

    sna: Do not reset partial buffers if they are not attached to the current batch
    
    As we may be holding on to them as an active mapping whilst they are
    executing; reseting the used counter back to zero in this case can cause
    corruption.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e4dcbb2..8e3de97 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1464,10 +1464,6 @@ static void kgem_finish_partials(struct kgem *kgem)
 		assert(bo->base.refcnt >= 1);
 
 		if (!bo->base.exec) {
-			if (bo->base.refcnt == 1 && bo->used) {
-				bo->used = 0;
-				bubble_sort_partial(&kgem->active_partials, bo);
-			}
 			DBG(("%s: skipping unattached handle=%d, used=%d\n",
 			     __FUNCTION__, bo->base.handle, bo->used));
 			continue;
commit 72a7538d4e7bcf0bd7455d9e67d8751e17739e6c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 15:45:21 2012 +0000

    sna: Convolution filter fixes
    
    A couple of typos made the convolution filter explode rather than
    convolve.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 404e12b..c3a82a3 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -681,7 +681,10 @@ static uint32_t gen4_check_filter(PicturePtr picture)
 	case PictFilterBilinear:
 		return TRUE;
 	default:
-		DBG(("%s: unknown filter: %d\n", __FUNCTION__, picture->filter));
+		DBG(("%s: unknown filter: %s [%d]\n",
+		     __FUNCTION__,
+		     PictureGetFilterName(picture->filter),
+		     picture->filter));
 		return FALSE;
 	}
 }
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index a345962..74c04af 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1234,15 +1234,18 @@ sna_render_picture_convolve(struct sna *sna,
 	PixmapPtr pixmap;
 	PicturePtr tmp;
 	pixman_fixed_t *params = picture->filter_params;
-	int x_off = (params[0] - pixman_fixed_1) >> 1;
-	int y_off = (params[1] - pixman_fixed_1) >> 1;
+	int x_off = -pixman_fixed_to_int((params[0] - pixman_fixed_1) >> 1);
+	int y_off = -pixman_fixed_to_int((params[1] - pixman_fixed_1) >> 1);
 	int cw = pixman_fixed_to_int(params[0]);
 	int ch = pixman_fixed_to_int(params[1]);
 	int i, j, error, depth;
+	struct kgem_bo *bo;
 
 	/* Lame multi-pass accumulation implementation of a general convolution
 	 * that works everywhere.
 	 */
+	DBG(("%s: origin=(%d,%d) kernel=%dx%d, size=%dx%d\n",
+	     __FUNCTION__, x_off, y_off, cw, ch, w, h));
 
 	assert(picture->pDrawable);
 	assert(picture->filter == PictFilterConvolution);
@@ -1267,8 +1270,10 @@ sna_render_picture_convolve(struct sna *sna,
 	if (tmp == NULL)
 		return 0;
 
-	if (!sna->render.fill_one(sna, pixmap, sna_pixmap_get_bo(pixmap), 0,
-				  0, 0, w, h, GXclear)) {
+	ValidatePicture(tmp);
+
+	bo = sna_pixmap_get_bo(pixmap);
+	if (!sna->render.clear(sna, pixmap, bo)) {
 		FreePicture(tmp, 0);
 		return 0;
 	}
@@ -1282,6 +1287,8 @@ sna_render_picture_convolve(struct sna *sna,
 
 			color.alpha = *params++;
 			color.red = color.green = color.blue = 0;
+			DBG(("%s: (%d, %d), alpha=%x\n",
+			     __FUNCTION__, i,j, color.alpha));
 
 			if (color.alpha <= 0x00ff)
 				continue;
@@ -1291,7 +1298,7 @@ sna_render_picture_convolve(struct sna *sna,
 				sna_composite(PictOpAdd, picture, alpha, tmp,
 					      x, y,
 					      0, 0,
-					      x_off-i, y_off-j,
+					      x_off+i, y_off+j,
 					      w, h);
 				FreePicture(alpha, 0);
 			}
@@ -1309,7 +1316,7 @@ sna_render_picture_convolve(struct sna *sna,
 	channel->scale[1] = 1.f / h;
 	channel->offset[0] = -dst_x;
 	channel->offset[1] = -dst_y;
-	channel->bo = kgem_bo_reference(sna_pixmap_get_bo(pixmap));
+	channel->bo = kgem_bo_reference(bo); /* transfer ownership */
 	FreePicture(tmp, 0);
 
 	return 1;
@@ -1423,7 +1430,7 @@ sna_render_picture_fixup(struct sna *sna,
 		DBG(("%s: convolution\n", __FUNCTION__));
 		if (picture->pDrawable && is_gpu(picture->pDrawable)) {
 			return sna_render_picture_convolve(sna, picture, channel,
-							   x, y, w, y, dst_x, dst_y);
+							   x, y, w, h, dst_x, dst_y);
 		}
 
 		goto do_fixup;
commit 34fe3cbb316c36c7022735cf9b03d8b655e04434
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 17:49:01 2012 +0000

    sna: Avoid recursive calls to kgem_retire_partials()
    
    Whilst iterating the partial list and uploading the buffers, we need to
    avoid trigger a recursive call into retire should we attempt to shrink a
    buffer. Such a recursive call will modify the list beneath us so that we
    chase a stale pointer and wreak havoc with memory corruption.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=47061
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 366b9e8..cbadebf 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -207,8 +207,9 @@ list_append(struct list *entry, struct list *head)
 static inline void
 __list_del(struct list *prev, struct list *next)
 {
-    next->prev = prev;
-    prev->next = next;
+	asert(next->prev == prev->next);
+	next->prev = prev;
+	prev->next = next;
 }
 
 static inline void
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 279face..e4dcbb2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -121,22 +121,47 @@ static bool validate_partials(struct kgem *kgem)
 
 	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
 		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.refcnt >= 1);
 		assert(bo->base.io);
-		if (bo->base.list.next == &kgem->active_partials)
-			return true;
+
+		if (&next->base.list == &kgem->active_partials)
+			break;
+
 		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
-			ErrorF("this rem: %d, next rem: %d\n",
+			ErrorF("active error: this rem: %d, next rem: %d\n",
 			       bytes(&bo->base) - bo->used,
 			       bytes(&next->base) - next->used);
 			goto err;
 		}
 	}
+
+	list_for_each_entry_safe(bo, next, &kgem->inactive_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+		assert(bo->base.refcnt == 1);
+
+		if (&next->base.list == &kgem->inactive_partials)
+			break;
+
+		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
+			ErrorF("inactive error: this rem: %d, next rem: %d\n",
+			       bytes(&bo->base) - bo->used,
+			       bytes(&next->base) - next->used);
+			goto err;
+		}
+	}
+
 	return true;
 
 err:
+	ErrorF("active partials:\n");
 	list_for_each_entry(bo, &kgem->active_partials, base.list)
-		ErrorF("bo: used=%d / %d, rem=%d\n",
-		       bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
+		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
+		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
+	ErrorF("inactive partials:\n");
+	list_for_each_entry(bo, &kgem->inactive_partials, base.list)
+		ErrorF("bo handle=%d: used=%d / %d, rem=%d\n",
+		       bo->base.handle, bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
 	return false;
 }
 #else
@@ -1220,7 +1245,13 @@ static void kgem_retire_partials(struct kgem *kgem)
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
 		assert(bo->base.refcnt == 1);
-		assert(bo->mmapped);
+		assert(bo->base.exec == NULL);
+		if (!bo->mmapped) {
+			list_del(&bo->base.list);
+			kgem_bo_unref(kgem, &bo->base);
+			continue;
+		}
+
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
@@ -1229,6 +1260,7 @@ static void kgem_retire_partials(struct kgem *kgem)
 		list_move_tail(&bo->base.list, &kgem->inactive_partials);
 		bubble_sort_partial(&kgem->inactive_partials, bo);
 	}
+	assert(validate_partials(kgem));
 }
 
 bool kgem_retire(struct kgem *kgem)
@@ -1423,12 +1455,23 @@ static void kgem_finish_partials(struct kgem *kgem)
 	struct kgem_partial_bo *bo, *next;
 
 	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		DBG(("%s: partial handle=%d, used=%d, exec?=%d, write=%d, mmapped=%d\n",
+		     __FUNCTION__, bo->base.handle, bo->used, bo->base.exec!=NULL,
+		     bo->write, bo->mmapped));
+
 		assert(next->base.list.prev == &bo->base.list);
 		assert(bo->base.io);
 		assert(bo->base.refcnt >= 1);
 
-		if (!bo->base.exec)
+		if (!bo->base.exec) {
+			if (bo->base.refcnt == 1 && bo->used) {
+				bo->used = 0;
+				bubble_sort_partial(&kgem->active_partials, bo);
+			}
+			DBG(("%s: skipping unattached handle=%d, used=%d\n",
+			     __FUNCTION__, bo->base.handle, bo->used));
 			continue;
+		}
 
 		if (!bo->write) {
 			assert(bo->base.exec || bo->base.refcnt > 1);
@@ -1458,12 +1501,14 @@ static void kgem_finish_partials(struct kgem *kgem)
 		assert(bo->base.rq == kgem->next_request);
 		assert(bo->base.domain != DOMAIN_GPU);
 
-		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
+		if (bo->base.refcnt == 1 &&
+		    bo->base.size.pages.count > 1 &&
+		    bo->used < bytes(&bo->base) / 2) {
 			struct kgem_bo *shrink;
 
 			shrink = search_linear_cache(kgem,
 						     PAGE_ALIGN(bo->used),
-						     CREATE_INACTIVE);
+						     CREATE_INACTIVE | CREATE_NO_RETIRE);
 			if (shrink) {
 				int n;
 
@@ -1513,6 +1558,8 @@ static void kgem_finish_partials(struct kgem *kgem)
 		bo->need_io = 0;
 
 decouple:
+		DBG(("%s: releasing handle=%d\n",
+		     __FUNCTION__, bo->base.handle));
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
 	}
@@ -2018,6 +2065,11 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		DBG(("%s: inactive and cache bucket empty\n",
 		     __FUNCTION__));
 
+		if ((flags & CREATE_NO_RETIRE) == 0) {
+			DBG(("%s: can not retire\n"));
+			return NULL;
+		}
+
 		if (!kgem->need_retire || !kgem_retire(kgem)) {
 			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 9abb72a..96d945e 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -222,6 +222,7 @@ enum {
 	CREATE_GTT_MAP = 0x8,
 	CREATE_SCANOUT = 0x10,
 	CREATE_TEMPORARY = 0x20,
+	CREATE_NO_RETIRE = 0x40,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
commit 46c79e4d59ec4f90a1fa97b24a3e7058fdbfa6ba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 13:17:21 2012 +0000

    sna: Restore checking for all-clipped-out for CompositeRectangles
    
    In the refactoring to avoid repeatedly applying the singular
    pCompositeClip, the check for the all-clipped state was lost.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 7f1d096..225008f 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -596,11 +596,9 @@ _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 			j++;
 	}
 
-	ret = TRUE;
+	ret = FALSE;
 	if (j)
 	    ret = pixman_region_init_rects(region, boxes, j);
-	else
-	    pixman_region_init(region);
 
 	if (boxes != stack_boxes)
 		free(boxes);
@@ -609,7 +607,7 @@ _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 	     __FUNCTION__, num_rects,
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2,
-	     pixman_region_n_rects(region)));
+	     j));
 	return ret;
 }
 
commit 9944f1834a9c53091e4415f928f917e9247f17c5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 10:47:42 2012 +0000

    sna/gen2: Fix transformation of linear gradients
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index c3a16cb..57bb835 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1189,33 +1189,30 @@ gen2_composite_linear_init(struct sna *sna,
 		struct pixman_f_vector p1, p2;
 		struct pixman_f_transform m, inv;
 
+		pixman_f_transform_from_pixman_transform(&m, picture->transform);
 		DBG(("%s: transform = [%f %f %f, %f %f %f, %f %f %f]\n",
 		     __FUNCTION__,
-		     pixman_fixed_to_double(picture->transform->matrix[0][0]),
-		     pixman_fixed_to_double(picture->transform->matrix[0][1]),
-		     pixman_fixed_to_double(picture->transform->matrix[0][2]),
-		     pixman_fixed_to_double(picture->transform->matrix[1][0]),
-		     pixman_fixed_to_double(picture->transform->matrix[1][1]),
-		     pixman_fixed_to_double(picture->transform->matrix[1][2]),
-		     pixman_fixed_to_double(picture->transform->matrix[2][0]),
-		     pixman_fixed_to_double(picture->transform->matrix[2][1]),
-		     pixman_fixed_to_double(picture->transform->matrix[2][2])));
-
-		pixman_f_transform_from_pixman_transform(&m,
-							 picture->transform);
+		     m.m[0][0], m.m[0][1], m.m[0][2],
+		     m.m[1][0], m.m[1][1], m.m[1][2],
+		     m.m[2][0], m.m[2][1], m.m[2][2]));
 		if (!pixman_f_transform_invert(&inv, &m))
 			return 0;
 
-		p1.v[0] = linear->p1.x;
-		p1.v[1] = linear->p1.y;
-		p1.v[2] = pixman_fixed_1;
+		p1.v[0] = pixman_fixed_to_double(linear->p1.x);
+		p1.v[1] = pixman_fixed_to_double(linear->p1.y);
+		p1.v[2] = 1.;
 		pixman_f_transform_point(&inv, &p1);
 
-		p2.v[0] = linear->p2.x;
-		p2.v[1] = linear->p2.y;
-		p2.v[2] = pixman_fixed_1;
+		p2.v[0] = pixman_fixed_to_double(linear->p2.x);
+		p2.v[1] = pixman_fixed_to_double(linear->p2.y);
+		p2.v[2] = 1.;
 		pixman_f_transform_point(&inv, &p2);
 
+		DBG(("%s: untransformed: p1=(%f, %f, %f), p2=(%f, %f, %f)\n",
+		     __FUNCTION__,
+		     p1.v[0], p1.v[1], p1.v[2],
+		     p2.v[0], p2.v[1], p2.v[2]));
+
 		dx = p2.v[0] - p1.v[0];
 		dy = p2.v[1] - p1.v[1];
 
@@ -1229,7 +1226,7 @@ gen2_composite_linear_init(struct sna *sna,
 
 	channel->u.gen2.linear_dx = dx;
 	channel->u.gen2.linear_dy = dy;
-	channel->u.gen2.linear_offset = -dx*(x0+x-dst_x) + -dy*(y0+y-dst_y);
+	channel->u.gen2.linear_offset = -dx*(x0+dst_x-x) + -dy*(y0+dst_y-y);
 
 	DBG(("%s: dx=%f, dy=%f, offset=%f\n",
 	     __FUNCTION__, dx, dy, channel->u.gen2.linear_offset));
commit df25495eaab5bcd5baf86047a2dd0149eea00d1e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 10:43:24 2012 +0000

    sna/gen4: Hook in the poor-man's linear gradient
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 97af7fc..404e12b 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1816,6 +1816,120 @@ gen4_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
+static Bool
+gen4_composite_linear_init(struct sna *sna,
+			   PicturePtr picture,
+			   struct sna_composite_channel *channel,
+			   int x, int y,
+			   int w, int h,
+			   int dst_x, int dst_y)
+{
+	PictLinearGradient *linear =
+		(PictLinearGradient *)picture->pSourcePict;
+	pixman_fixed_t tx, ty;
+	float x0, y0, sf;
+	float dx, dy;
+
+	DBG(("%s: p1=(%f, %f), p2=(%f, %f), src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n",
+	     __FUNCTION__,
+	     pixman_fixed_to_double(linear->p1.x), pixman_fixed_to_double(linear->p1.y),
+	     pixman_fixed_to_double(linear->p2.x), pixman_fixed_to_double(linear->p2.y),
+	     x, y, dst_x, dst_y, w, h));
+
+	if (linear->p2.x == linear->p1.x && linear->p2.y == linear->p1.y)
+		return 0;
+
+	if (!sna_transform_is_affine(picture->transform)) {
+		DBG(("%s: fallback due to projective transform\n",
+		     __FUNCTION__));
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, w, h, dst_x, dst_y);
+	}
+
+	channel->bo = sna_render_get_gradient(sna, (PictGradient *)linear);
+	if (!channel->bo)
+		return 0;
+
+	channel->filter = PictFilterBilinear;
+	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
+	channel->width  = channel->bo->pitch / 4;
+	channel->height = 1;
+	channel->pict_format = PICT_a8r8g8b8;
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+
+	if (sna_transform_is_translation(picture->transform, &tx, &ty)) {
+		dx = pixman_fixed_to_double(linear->p2.x - linear->p1.x);
+		dy = pixman_fixed_to_double(linear->p2.y - linear->p1.y);
+
+		x0 = pixman_fixed_to_double(linear->p1.x);
+		y0 = pixman_fixed_to_double(linear->p1.y);
+
+		if (tx | ty) {
+			x0 -= pixman_fixed_to_double(tx);
+			y0 -= pixman_fixed_to_double(ty);
+		}
+	} else {
+		struct pixman_f_vector p1, p2;
+		struct pixman_f_transform m, inv;
+
+		pixman_f_transform_from_pixman_transform(&m, picture->transform);
+		DBG(("%s: transform = [%f %f %f, %f %f %f, %f %f %f]\n",
+		     __FUNCTION__,
+		     m.m[0][0], m.m[0][1], m.m[0][2],
+		     m.m[1][0], m.m[1][1], m.m[1][2],
+		     m.m[2][0], m.m[2][1], m.m[2][2]));
+		if (!pixman_f_transform_invert(&inv, &m))
+			return 0;
+
+		p1.v[0] = pixman_fixed_to_double(linear->p1.x);
+		p1.v[1] = pixman_fixed_to_double(linear->p1.y);
+		p1.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p1);
+
+		p2.v[0] = pixman_fixed_to_double(linear->p2.x);
+		p2.v[1] = pixman_fixed_to_double(linear->p2.y);
+		p2.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p2);
+
+		DBG(("%s: untransformed: p1=(%f, %f, %f), p2=(%f, %f, %f)\n",
+		     __FUNCTION__,
+		     p1.v[0], p1.v[1], p1.v[2],
+		     p2.v[0], p2.v[1], p2.v[2]));
+
+		dx = p2.v[0] - p1.v[0];
+		dy = p2.v[1] - p1.v[1];
+
+		x0 = p1.v[0];
+		y0 = p1.v[1];
+	}
+
+	sf = dx*dx + dy*dy;
+	dx /= sf;
+	dy /= sf;
+
+	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
+	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
+
+	channel->embedded_transform.matrix[1][0] = 0;
+	channel->embedded_transform.matrix[1][1] = 0;
+	channel->embedded_transform.matrix[1][2] = pixman_double_to_fixed(.5);
+
+	channel->embedded_transform.matrix[2][0] = 0;
+	channel->embedded_transform.matrix[2][1] = 0;
+	channel->embedded_transform.matrix[2][2] = pixman_fixed_1;
+
+	channel->transform = &channel->embedded_transform;
+	channel->is_affine = 1;
+
+	DBG(("%s: dx=%f, dy=%f, offset=%f\n",
+	     __FUNCTION__, dx, dy, -dx*(x0-x+dst_x) + -dy*(y0-y+dst_y)));
+
+	return channel->bo != NULL;
+}
+
 static int
 gen4_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -1838,7 +1952,13 @@ gen4_composite_picture(struct sna *sna,
 		return gen4_composite_solid_init(sna, channel, color);
 
 	if (picture->pDrawable == NULL) {
-		DBG(("%s: procedural source fixup\n", __FUNCTION__));
+		if (picture->pSourcePict->type == SourcePictTypeLinear)
+			return gen4_composite_linear_init(sna, picture, channel,
+							  x, y,
+							  w, h,
+							  dst_x, dst_y);
+
+		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
 	}
@@ -2004,7 +2124,13 @@ is_gradient(PicturePtr picture)
 	if (picture->pDrawable)
 		return FALSE;
 
-	return picture->pSourcePict->type != SourcePictTypeSolidFill;
+	switch (picture->pSourcePict->type) {
+	case SourcePictTypeSolidFill:
+	case SourcePictTypeLinear:
+		return FALSE;
+	default:
+		return TRUE;
+	}
 }
 
 static bool
commit 69d6a0387b8eec8396631714a0564b7e28d187ab
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 10:43:24 2012 +0000

    sna/gen5: Hook in the poor-man's linear gradient
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 18325b5..bce5a3c 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1862,6 +1862,120 @@ gen5_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
+static Bool
+gen5_composite_linear_init(struct sna *sna,
+			   PicturePtr picture,
+			   struct sna_composite_channel *channel,
+			   int x, int y,
+			   int w, int h,
+			   int dst_x, int dst_y)
+{
+	PictLinearGradient *linear =
+		(PictLinearGradient *)picture->pSourcePict;
+	pixman_fixed_t tx, ty;
+	float x0, y0, sf;
+	float dx, dy;
+
+	DBG(("%s: p1=(%f, %f), p2=(%f, %f), src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n",
+	     __FUNCTION__,
+	     pixman_fixed_to_double(linear->p1.x), pixman_fixed_to_double(linear->p1.y),
+	     pixman_fixed_to_double(linear->p2.x), pixman_fixed_to_double(linear->p2.y),
+	     x, y, dst_x, dst_y, w, h));
+
+	if (linear->p2.x == linear->p1.x && linear->p2.y == linear->p1.y)
+		return 0;
+
+	if (!sna_transform_is_affine(picture->transform)) {
+		DBG(("%s: fallback due to projective transform\n",
+		     __FUNCTION__));
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, w, h, dst_x, dst_y);
+	}
+
+	channel->bo = sna_render_get_gradient(sna, (PictGradient *)linear);
+	if (!channel->bo)
+		return 0;
+
+	channel->filter = PictFilterBilinear;
+	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
+	channel->width  = channel->bo->pitch / 4;
+	channel->height = 1;
+	channel->pict_format = PICT_a8r8g8b8;
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+
+	if (sna_transform_is_translation(picture->transform, &tx, &ty)) {
+		dx = pixman_fixed_to_double(linear->p2.x - linear->p1.x);
+		dy = pixman_fixed_to_double(linear->p2.y - linear->p1.y);
+
+		x0 = pixman_fixed_to_double(linear->p1.x);
+		y0 = pixman_fixed_to_double(linear->p1.y);
+
+		if (tx | ty) {
+			x0 -= pixman_fixed_to_double(tx);
+			y0 -= pixman_fixed_to_double(ty);
+		}
+	} else {
+		struct pixman_f_vector p1, p2;
+		struct pixman_f_transform m, inv;
+
+		pixman_f_transform_from_pixman_transform(&m, picture->transform);
+		DBG(("%s: transform = [%f %f %f, %f %f %f, %f %f %f]\n",
+		     __FUNCTION__,
+		     m.m[0][0], m.m[0][1], m.m[0][2],
+		     m.m[1][0], m.m[1][1], m.m[1][2],
+		     m.m[2][0], m.m[2][1], m.m[2][2]));
+		if (!pixman_f_transform_invert(&inv, &m))
+			return 0;
+
+		p1.v[0] = pixman_fixed_to_double(linear->p1.x);
+		p1.v[1] = pixman_fixed_to_double(linear->p1.y);
+		p1.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p1);
+
+		p2.v[0] = pixman_fixed_to_double(linear->p2.x);
+		p2.v[1] = pixman_fixed_to_double(linear->p2.y);
+		p2.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p2);
+
+		DBG(("%s: untransformed: p1=(%f, %f, %f), p2=(%f, %f, %f)\n",
+		     __FUNCTION__,
+		     p1.v[0], p1.v[1], p1.v[2],
+		     p2.v[0], p2.v[1], p2.v[2]));
+
+		dx = p2.v[0] - p1.v[0];
+		dy = p2.v[1] - p1.v[1];
+
+		x0 = p1.v[0];
+		y0 = p1.v[1];
+	}
+
+	sf = dx*dx + dy*dy;
+	dx /= sf;
+	dy /= sf;
+
+	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
+	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
+
+	channel->embedded_transform.matrix[1][0] = 0;
+	channel->embedded_transform.matrix[1][1] = 0;
+	channel->embedded_transform.matrix[1][2] = pixman_double_to_fixed(.5);
+
+	channel->embedded_transform.matrix[2][0] = 0;
+	channel->embedded_transform.matrix[2][1] = 0;
+	channel->embedded_transform.matrix[2][2] = pixman_fixed_1;
+
+	channel->transform = &channel->embedded_transform;
+	channel->is_affine = 1;
+
+	DBG(("%s: dx=%f, dy=%f, offset=%f\n",
+	     __FUNCTION__, dx, dy, -dx*(x0-x+dst_x) + -dy*(y0-y+dst_y)));
+
+	return channel->bo != NULL;
+}
+
 static int
 gen5_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -1883,9 +1997,17 @@ gen5_composite_picture(struct sna *sna,
 	if (sna_picture_is_solid(picture, &color))
 		return gen5_composite_solid_init(sna, channel, color);
 
-	if (picture->pDrawable == NULL)
+	if (picture->pDrawable == NULL) {
+		if (picture->pSourcePict->type == SourcePictTypeLinear)
+			return gen5_composite_linear_init(sna, picture, channel,
+							  x, y,
+							  w, h,
+							  dst_x, dst_y);
+
+		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
+	}
 
 	if (picture->alphaMap) {
 		DBG(("%s -- fallback, alphamap\n", __FUNCTION__));
@@ -2048,7 +2170,13 @@ is_gradient(PicturePtr picture)
 	if (picture->pDrawable)
 		return FALSE;
 
-	return picture->pSourcePict->type != SourcePictTypeSolidFill;
+	switch (picture->pSourcePict->type) {
+	case SourcePictTypeSolidFill:
+	case SourcePictTypeLinear:
+		return FALSE;
+	default:
+		return TRUE;
+	}
 }
 
 static bool
commit b0d3c4f661bace84b78b194eaeee7bfa009a32cf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 10:43:24 2012 +0000

    sna/gen7: Hook in the poor-man's linear gradient
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index a401d94..5829ae3 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2150,6 +2150,120 @@ gen7_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
+static Bool
+gen7_composite_linear_init(struct sna *sna,
+			   PicturePtr picture,
+			   struct sna_composite_channel *channel,
+			   int x, int y,
+			   int w, int h,
+			   int dst_x, int dst_y)
+{
+	PictLinearGradient *linear =
+		(PictLinearGradient *)picture->pSourcePict;
+	pixman_fixed_t tx, ty;
+	float x0, y0, sf;
+	float dx, dy;
+
+	DBG(("%s: p1=(%f, %f), p2=(%f, %f), src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n",
+	     __FUNCTION__,
+	     pixman_fixed_to_double(linear->p1.x), pixman_fixed_to_double(linear->p1.y),
+	     pixman_fixed_to_double(linear->p2.x), pixman_fixed_to_double(linear->p2.y),
+	     x, y, dst_x, dst_y, w, h));
+
+	if (linear->p2.x == linear->p1.x && linear->p2.y == linear->p1.y)
+		return 0;
+
+	if (!sna_transform_is_affine(picture->transform)) {
+		DBG(("%s: fallback due to projective transform\n",
+		     __FUNCTION__));
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, w, h, dst_x, dst_y);
+	}
+
+	channel->bo = sna_render_get_gradient(sna, (PictGradient *)linear);
+	if (!channel->bo)
+		return 0;
+
+	channel->filter = PictFilterBilinear;
+	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
+	channel->width  = channel->bo->pitch / 4;
+	channel->height = 1;
+	channel->pict_format = PICT_a8r8g8b8;
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+
+	if (sna_transform_is_translation(picture->transform, &tx, &ty)) {
+		dx = pixman_fixed_to_double(linear->p2.x - linear->p1.x);
+		dy = pixman_fixed_to_double(linear->p2.y - linear->p1.y);
+
+		x0 = pixman_fixed_to_double(linear->p1.x);
+		y0 = pixman_fixed_to_double(linear->p1.y);
+
+		if (tx | ty) {
+			x0 -= pixman_fixed_to_double(tx);
+			y0 -= pixman_fixed_to_double(ty);
+		}
+	} else {
+		struct pixman_f_vector p1, p2;
+		struct pixman_f_transform m, inv;
+
+		pixman_f_transform_from_pixman_transform(&m, picture->transform);
+		DBG(("%s: transform = [%f %f %f, %f %f %f, %f %f %f]\n",
+		     __FUNCTION__,
+		     m.m[0][0], m.m[0][1], m.m[0][2],
+		     m.m[1][0], m.m[1][1], m.m[1][2],
+		     m.m[2][0], m.m[2][1], m.m[2][2]));
+		if (!pixman_f_transform_invert(&inv, &m))
+			return 0;
+
+		p1.v[0] = pixman_fixed_to_double(linear->p1.x);
+		p1.v[1] = pixman_fixed_to_double(linear->p1.y);
+		p1.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p1);
+
+		p2.v[0] = pixman_fixed_to_double(linear->p2.x);
+		p2.v[1] = pixman_fixed_to_double(linear->p2.y);
+		p2.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p2);
+
+		DBG(("%s: untransformed: p1=(%f, %f, %f), p2=(%f, %f, %f)\n",
+		     __FUNCTION__,
+		     p1.v[0], p1.v[1], p1.v[2],
+		     p2.v[0], p2.v[1], p2.v[2]));
+
+		dx = p2.v[0] - p1.v[0];
+		dy = p2.v[1] - p1.v[1];
+
+		x0 = p1.v[0];
+		y0 = p1.v[1];
+	}
+
+	sf = dx*dx + dy*dy;
+	dx /= sf;
+	dy /= sf;
+
+	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
+	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
+
+	channel->embedded_transform.matrix[1][0] = 0;
+	channel->embedded_transform.matrix[1][1] = 0;
+	channel->embedded_transform.matrix[1][2] = pixman_double_to_fixed(.5);
+
+	channel->embedded_transform.matrix[2][0] = 0;
+	channel->embedded_transform.matrix[2][1] = 0;
+	channel->embedded_transform.matrix[2][2] = pixman_fixed_1;
+
+	channel->transform = &channel->embedded_transform;
+	channel->is_affine = 1;
+
+	DBG(("%s: dx=%f, dy=%f, offset=%f\n",
+	     __FUNCTION__, dx, dy, -dx*(x0-x+dst_x) + -dy*(y0-y+dst_y)));
+
+	return channel->bo != NULL;
+}
+
 static int
 gen7_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -2171,9 +2285,17 @@ gen7_composite_picture(struct sna *sna,
 	if (sna_picture_is_solid(picture, &color))
 		return gen7_composite_solid_init(sna, channel, color);
 
-	if (picture->pDrawable == NULL)
+	if (picture->pDrawable == NULL) {
+		if (picture->pSourcePict->type == SourcePictTypeLinear)
+			return gen7_composite_linear_init(sna, picture, channel,
+							  x, y,
+							  w, h,
+							  dst_x, dst_y);
+
+		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
+	}
 
 	if (picture->alphaMap) {
 		DBG(("%s -- fallback, alphamap\n", __FUNCTION__));
@@ -2337,7 +2459,13 @@ is_gradient(PicturePtr picture)
 	if (picture->pDrawable)
 		return FALSE;
 
-	return picture->pSourcePict->type != SourcePictTypeSolidFill;
+	switch (picture->pSourcePict->type) {
+	case SourcePictTypeSolidFill:
+	case SourcePictTypeLinear:
+		return FALSE;
+	default:
+		return TRUE;
+	}
 }
 
 static bool
commit dcc364a7b190ce9cec2ff35aa22c10529f84e4c3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Mar 7 10:40:50 2012 +0000

    sna/gen6: Add poor-man's linear implementation
    
    Still no JIT, in the meantime we can at least cache the gradient ramps.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 71c0046..9eb4221 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1420,141 +1420,72 @@ gen6_emit_composite_primitive_identity_source_mask(struct sna *sna,
 	v[14] = msk_y * op->mask.scale[1];
 }
 
-fastcall static void
-gen6_emit_composite_primitive(struct sna *sna,
-			      const struct sna_composite_op *op,
-			      const struct sna_composite_rectangles *r)
+inline static void
+gen6_emit_composite_texcoord(struct sna *sna,
+			     const struct sna_composite_channel *channel,
+			     int16_t x, int16_t y)
 {
-	float src_x[3], src_y[3], src_w[3], mask_x[3], mask_y[3], mask_w[3];
-	Bool is_affine = op->is_affine;
-	const float *src_sf = op->src.scale;
-	const float *mask_sf = op->mask.scale;
-
-	if (is_affine) {
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
-						r->src.y + op->src.offset[1],
-						op->src.transform,
-						&src_x[0],
-						&src_y[0]);
-
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0],
-						r->src.y + op->src.offset[1] + r->height,
-						op->src.transform,
-						&src_x[1],
-						&src_y[1]);
-
-		sna_get_transformed_coordinates(r->src.x + op->src.offset[0] + r->width,
-						r->src.y + op->src.offset[1] + r->height,
-						op->src.transform,
-						&src_x[2],
-						&src_y[2]);
-	} else {
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1],
-							op->src.transform,
-							&src_x[0],
-							&src_y[0],
-							&src_w[0]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0],
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[1],
-							&src_y[1],
-							&src_w[1]))
-			return;
-
-		if (!sna_get_transformed_coordinates_3d(r->src.x + op->src.offset[0] + r->width,
-							r->src.y + op->src.offset[1] + r->height,
-							op->src.transform,
-							&src_x[2],
-							&src_y[2],
-							&src_w[2]))
-			return;
-	}
+	x += channel->offset[0];
+	y += channel->offset[1];
 
-	if (op->mask.bo) {
-		if (is_affine) {
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
-							r->mask.y + op->mask.offset[1],
-							op->mask.transform,
-							&mask_x[0],
-							&mask_y[0]);
-
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0],
-							r->mask.y + op->mask.offset[1] + r->height,
-							op->mask.transform,
-							&mask_x[1],
-							&mask_y[1]);
-
-			sna_get_transformed_coordinates(r->mask.x + op->mask.offset[0] + r->width,
-							r->mask.y + op->mask.offset[1] + r->height,
-							op->mask.transform,
-							&mask_x[2],
-							&mask_y[2]);
-		} else {
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1],
-								op->mask.transform,
-								&mask_x[0],
-								&mask_y[0],
-								&mask_w[0]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0],
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[1],
-								&mask_y[1],
-								&mask_w[1]))
-				return;
-
-			if (!sna_get_transformed_coordinates_3d(r->mask.x + op->mask.offset[0] + r->width,
-								r->mask.y + op->mask.offset[1] + r->height,
-								op->mask.transform,
-								&mask_x[2],
-								&mask_y[2],
-								&mask_w[2]))
-				return;
-		}
-	}
+	if (channel->is_affine) {
+		float s, t;
 
-	OUT_VERTEX(r->dst.x + r->width, r->dst.y + r->height);
-	OUT_VERTEX_F(src_x[2] * src_sf[0]);
-	OUT_VERTEX_F(src_y[2] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[2]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[2] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[2] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[2]);
-	}
+		sna_get_transformed_coordinates(x, y,
+						channel->transform,
+						&s, &t);
+		OUT_VERTEX_F(s * channel->scale[0]);
+		OUT_VERTEX_F(t * channel->scale[1]);
+	} else {
+		float s, t, w;
 
-	OUT_VERTEX(r->dst.x, r->dst.y + r->height);
-	OUT_VERTEX_F(src_x[1] * src_sf[0]);
-	OUT_VERTEX_F(src_y[1] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[1]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[1] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[1] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[1]);
+		sna_get_transformed_coordinates_3d(x, y,
+						   channel->transform,
+						   &s, &t, &w);
+		OUT_VERTEX_F(s * channel->scale[0]);
+		OUT_VERTEX_F(t * channel->scale[1]);
+		OUT_VERTEX_F(w);
 	}
+}
 
-	OUT_VERTEX(r->dst.x, r->dst.y);
-	OUT_VERTEX_F(src_x[0] * src_sf[0]);
-	OUT_VERTEX_F(src_y[0] * src_sf[1]);
-	if (!is_affine)
-		OUT_VERTEX_F(src_w[0]);
-	if (op->mask.bo) {
-		OUT_VERTEX_F(mask_x[0] * mask_sf[0]);
-		OUT_VERTEX_F(mask_y[0] * mask_sf[1]);
-		if (!is_affine)
-			OUT_VERTEX_F(mask_w[0]);
-	}
+static void
+gen6_emit_composite_vertex(struct sna *sna,
+			   const struct sna_composite_op *op,
+			   int16_t srcX, int16_t srcY,
+			   int16_t mskX, int16_t mskY,
+			   int16_t dstX, int16_t dstY)
+{
+	OUT_VERTEX(dstX, dstY);
+	gen6_emit_composite_texcoord(sna, &op->src, srcX, srcY);
+	gen6_emit_composite_texcoord(sna, &op->mask, mskX, mskY);
+}
+
+fastcall static void
+gen6_emit_composite_primitive(struct sna *sna,
+			      const struct sna_composite_op *op,
+			      const struct sna_composite_rectangles *r)
+{
+	gen6_emit_composite_vertex(sna, op,
+				   r->src.x + r->width,
+				   r->src.y + r->height,
+				   r->mask.x + r->width,
+				   r->mask.y + r->height,
+				   op->dst.x + r->dst.x + r->width,
+				   op->dst.y + r->dst.y + r->height);
+	gen6_emit_composite_vertex(sna, op,
+				   r->src.x,
+				   r->src.y + r->height,
+				   r->mask.x,
+				   r->mask.y + r->height,
+				   op->dst.x + r->dst.x,
+				   op->dst.y + r->dst.y + r->height);
+	gen6_emit_composite_vertex(sna, op,
+				   r->src.x,
+				   r->src.y,
+				   r->mask.x,
+				   r->mask.y,
+				   op->dst.x + r->dst.x,
+				   op->dst.y + r->dst.y);
 }
 
 static void gen6_emit_vertex_buffer(struct sna *sna,
@@ -2123,6 +2054,120 @@ gen6_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
+static Bool
+gen6_composite_linear_init(struct sna *sna,
+			   PicturePtr picture,
+			   struct sna_composite_channel *channel,
+			   int x, int y,
+			   int w, int h,
+			   int dst_x, int dst_y)
+{
+	PictLinearGradient *linear =
+		(PictLinearGradient *)picture->pSourcePict;
+	pixman_fixed_t tx, ty;
+	float x0, y0, sf;
+	float dx, dy;
+
+	DBG(("%s: p1=(%f, %f), p2=(%f, %f), src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n",
+	     __FUNCTION__,
+	     pixman_fixed_to_double(linear->p1.x), pixman_fixed_to_double(linear->p1.y),
+	     pixman_fixed_to_double(linear->p2.x), pixman_fixed_to_double(linear->p2.y),
+	     x, y, dst_x, dst_y, w, h));
+
+	if (linear->p2.x == linear->p1.x && linear->p2.y == linear->p1.y)
+		return 0;
+
+	if (!sna_transform_is_affine(picture->transform)) {
+		DBG(("%s: fallback due to projective transform\n",
+		     __FUNCTION__));
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, w, h, dst_x, dst_y);
+	}
+
+	channel->bo = sna_render_get_gradient(sna, (PictGradient *)linear);
+	if (!channel->bo)
+		return 0;
+
+	channel->filter = PictFilterBilinear;
+	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
+	channel->width  = channel->bo->pitch / 4;
+	channel->height = 1;
+	channel->pict_format = PICT_a8r8g8b8;
+
+	channel->scale[0]  = channel->scale[1]  = 1;
+	channel->offset[0] = channel->offset[1] = 0;
+
+	if (sna_transform_is_translation(picture->transform, &tx, &ty)) {
+		dx = pixman_fixed_to_double(linear->p2.x - linear->p1.x);
+		dy = pixman_fixed_to_double(linear->p2.y - linear->p1.y);
+
+		x0 = pixman_fixed_to_double(linear->p1.x);
+		y0 = pixman_fixed_to_double(linear->p1.y);
+
+		if (tx | ty) {
+			x0 -= pixman_fixed_to_double(tx);
+			y0 -= pixman_fixed_to_double(ty);
+		}
+	} else {
+		struct pixman_f_vector p1, p2;
+		struct pixman_f_transform m, inv;
+
+		pixman_f_transform_from_pixman_transform(&m, picture->transform);
+		DBG(("%s: transform = [%f %f %f, %f %f %f, %f %f %f]\n",
+		     __FUNCTION__,
+		     m.m[0][0], m.m[0][1], m.m[0][2],
+		     m.m[1][0], m.m[1][1], m.m[1][2],
+		     m.m[2][0], m.m[2][1], m.m[2][2]));
+		if (!pixman_f_transform_invert(&inv, &m))
+			return 0;
+
+		p1.v[0] = pixman_fixed_to_double(linear->p1.x);
+		p1.v[1] = pixman_fixed_to_double(linear->p1.y);
+		p1.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p1);
+
+		p2.v[0] = pixman_fixed_to_double(linear->p2.x);
+		p2.v[1] = pixman_fixed_to_double(linear->p2.y);
+		p2.v[2] = 1.;
+		pixman_f_transform_point(&inv, &p2);
+
+		DBG(("%s: untransformed: p1=(%f, %f, %f), p2=(%f, %f, %f)\n",
+		     __FUNCTION__,
+		     p1.v[0], p1.v[1], p1.v[2],
+		     p2.v[0], p2.v[1], p2.v[2]));
+
+		dx = p2.v[0] - p1.v[0];
+		dy = p2.v[1] - p1.v[1];
+
+		x0 = p1.v[0];
+		y0 = p1.v[1];
+	}
+
+	sf = dx*dx + dy*dy;
+	dx /= sf;
+	dy /= sf;
+
+	channel->embedded_transform.matrix[0][0] = pixman_double_to_fixed(dx);
+	channel->embedded_transform.matrix[0][1] = pixman_double_to_fixed(dy);
+	channel->embedded_transform.matrix[0][2] = -pixman_double_to_fixed(dx*(x0+dst_x-x) + dy*(y0+dst_y-y));
+
+	channel->embedded_transform.matrix[1][0] = 0;
+	channel->embedded_transform.matrix[1][1] = 0;
+	channel->embedded_transform.matrix[1][2] = pixman_double_to_fixed(.5);
+
+	channel->embedded_transform.matrix[2][0] = 0;
+	channel->embedded_transform.matrix[2][1] = 0;
+	channel->embedded_transform.matrix[2][2] = pixman_fixed_1;
+
+	channel->transform = &channel->embedded_transform;
+	channel->is_affine = 1;
+
+	DBG(("%s: dx=%f, dy=%f, offset=%f\n",
+	     __FUNCTION__, dx, dy, -dx*(x0-x+dst_x) + -dy*(y0-y+dst_y)));
+
+	return channel->bo != NULL;
+}
+
 static int
 gen6_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -2144,12 +2189,20 @@ gen6_composite_picture(struct sna *sna,
 	if (sna_picture_is_solid(picture, &color))
 		return gen6_composite_solid_init(sna, channel, color);
 
-	if (picture->pDrawable == NULL)
+	if (picture->pDrawable == NULL) {
+		if (picture->pSourcePict->type == SourcePictTypeLinear)
+			return gen6_composite_linear_init(sna, picture, channel,
+							  x, y,
+							  w, h,
+							  dst_x, dst_y);
+
+		DBG(("%s -- fixup, gradient\n", __FUNCTION__));
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
+	}
 
 	if (picture->alphaMap) {
-		DBG(("%s -- fallback, alphamap\n", __FUNCTION__));
+		DBG(("%s -- fixup, alphamap\n", __FUNCTION__));
 		return sna_render_picture_fixup(sna, picture, channel,
 						x, y, w, h, dst_x, dst_y);
 	}
@@ -2326,7 +2379,13 @@ is_gradient(PicturePtr picture)
 	if (picture->pDrawable)
 		return FALSE;
 
-	return picture->pSourcePict->type != SourcePictTypeSolidFill;
+	switch (picture->pSourcePict->type) {
+	case SourcePictTypeSolidFill:
+	case SourcePictTypeLinear:
+		return FALSE;
+	default:
+		return TRUE;
+	}
 }
 
 static bool
@@ -2740,32 +2799,6 @@ gen6_composite_alpha_gradient_init(struct sna *sna,
 }
 
 inline static void
-gen6_emit_composite_texcoord(struct sna *sna,
-			     const struct sna_composite_channel *channel,
-			     int16_t x, int16_t y)
-{
-	float t[3];
-
-	if (channel->is_affine) {
-		sna_get_transformed_coordinates(x + channel->offset[0],
-						y + channel->offset[1],
-						channel->transform,
-						&t[0], &t[1]);
-		OUT_VERTEX_F(t[0] * channel->scale[0]);
-		OUT_VERTEX_F(t[1] * channel->scale[1]);
-	} else {
-		t[0] = t[1] = 0; t[2] = 1;
-		sna_get_transformed_coordinates_3d(x + channel->offset[0],
-						   y + channel->offset[1],
-						   channel->transform,
-						   &t[0], &t[1], &t[2]);
-		OUT_VERTEX_F(t[0] * channel->scale[0]);
-		OUT_VERTEX_F(t[1] * channel->scale[1]);
-		OUT_VERTEX_F(t[2]);
-	}
-}
-
-inline static void
 gen6_emit_composite_texcoord_affine(struct sna *sna,
 				    const struct sna_composite_channel *channel,
 				    int16_t x, int16_t y)
commit 232972c0e5bd833c1d0b33432fa8092c601fd0e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Mar 6 12:17:03 2012 +0000

    sna: Remove the 2-step damage flush
    
    The idea was to reduce the number of unnecessary flushes by checking for
    outgoing damage (could be refined further by inspecting the reply/event
    callback for a XDamageNotifyEvent). However, it does not flush
    sufficiently for the compositors' liking. As it doesn't appear to restore
    performance to near uncomposited levels anyway, remove the complication.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a8737b5..ce3afae 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11459,31 +11459,20 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 }
 
 static void
-sna_accel_reply_callback(CallbackListPtr *list,
-			 pointer user_data, pointer call_data)
-{
-	struct sna *sna = user_data;
-
-	if (sna->flush)
-		return;
-
-	/* Assume each callback corresponds to a new request. The use
-	 * of continuation WriteToClients in the server is relatively rare,
-	 * and we err on the side of safety.
-	 */
-	sna->flush = (sna->kgem.flush || sna->kgem.sync ||
-		      !list_is_empty(&sna->dirty_pixmaps));
-}
-
-static void
 sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
 	struct list preserve;
 
-	if (!sna->flush)
-		return;
+	/* XXX we should be able to reduce the frequency of flushes further
+	 * by checking for outgoing damage events or sync replies. Tricky,
+	 * and doesn't appear to mitigate the performance loss.
+	 */
+	if (!(sna->kgem.flush ||
+	      sna->kgem.sync ||
+	      !list_is_empty(&sna->dirty_pixmaps)))
+	    return;
 
 	DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__,
 	     sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps)));
@@ -11934,8 +11923,7 @@ void sna_accel_watch_flush(struct sna *sna, int enable)
 	if (sna->watch_flush == 0) {
 		DBG(("%s: installing watchers\n", __FUNCTION__));
 		assert(enable > 0);
-		if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna) ||
-		    !AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) {
+		if (!AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) {
 			xf86DrvMsg(sna->scrn->scrnIndex, X_Error,
 				   "Failed to attach ourselves to the flush callbacks, expect missing synchronisation with DRI clients (e.g a compositor)\n");
 		}
@@ -11959,7 +11947,6 @@ void sna_accel_close(struct sna *sna)
 	sna_glyphs_close(sna);
 
 	DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
-	DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
 
 	kgem_cleanup_cache(&sna->kgem);
 }
@@ -11989,7 +11976,6 @@ void sna_accel_block_handler(struct sna *sna)
 
 	if (sna->flush == 0 && sna->watch_flush == 1) {
 		DBG(("%s: removing watchers\n", __FUNCTION__));
-		DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
 		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
 		sna->watch_flush = 0;
 	}
commit eb10ade0fc8e21cd01c0d8acba7891fd7f2aa222
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 5 22:55:57 2012 +0000

    sna: Defer the FlushCallback removal until after the next flush
    
    Try to reduce the amount of Add/Delete ping-pong, in particular around
    the recreation of the DRI2 attachment to the scanout after pageflipping.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1dc0b99..a8737b5 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11482,7 +11482,6 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	struct sna *sna = user_data;
 	struct list preserve;
 
-	assert(sna->watch_flush);
 	if (!sna->flush)
 		return;
 
@@ -11929,22 +11928,21 @@ Bool sna_accel_create(struct sna *sna)
 
 void sna_accel_watch_flush(struct sna *sna, int enable)
 {
+	DBG(("%s: enable=%d\n", __FUNCTION__, enable));
+	assert(enable);
+
 	if (sna->watch_flush == 0) {
+		DBG(("%s: installing watchers\n", __FUNCTION__));
 		assert(enable > 0);
 		if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna) ||
-		    AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) {
+		    !AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) {
 			xf86DrvMsg(sna->scrn->scrnIndex, X_Error,
 				   "Failed to attach ourselves to the flush callbacks, expect missing synchronisation with DRI clients (e.g a compositor)\n");
 		}
+		sna->watch_flush++;
 	}
 
 	sna->watch_flush += enable;
-
-	if (sna->watch_flush == 0) {
-		assert(enable < 0);
-		DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
-		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
-	}
 }
 
 void sna_accel_close(struct sna *sna)
@@ -11989,6 +11987,13 @@ void sna_accel_block_handler(struct sna *sna)
 	if (sna_accel_do_inactive(sna))
 		sna_accel_inactive(sna);
 
+	if (sna->flush == 0 && sna->watch_flush == 1) {
+		DBG(("%s: removing watchers\n", __FUNCTION__));
+		DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
+		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
+		sna->watch_flush = 0;
+	}
+
 	sna->timer_ready = 0;
 }
 
commit 60dacdb127599606db13ad111af8ce26c1141da5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 5 22:46:20 2012 +0000

    sna: Only install the flush callback for the duration of the foriegn buffer
    
    After we are no longer sharing the bo with foreign clients, we no longer
    need to keep flushing before every X_Reply and so we can remove the
    callbacks to remove the overhead of having to check every time.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 1196cce..119244d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -221,6 +221,7 @@ struct sna {
 #define SNA_NO_THROTTLE		0x1
 #define SNA_NO_DELAYED_FLUSH	0x2
 
+	unsigned watch_flush;
 	unsigned flush;
 
 	int timer[NUM_TIMERS];
@@ -560,6 +561,7 @@ Bool sna_accel_pre_init(struct sna *sna);
 Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
 void sna_accel_block_handler(struct sna *sna);
 void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready);
+void sna_accel_watch_flush(struct sna *sna, int enable);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 460fbb3..1dc0b99 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -345,8 +345,10 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
-		if (priv->cpu_bo->sync)
+		if (priv->cpu_bo->sync) {
 			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+			sna_accel_watch_flush(sna, -1);
+		}
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
 	} else
@@ -617,6 +619,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 					      bpp, pitch, addr);
 	}
 	kgem_bo_set_sync(&sna->kgem, priv->cpu_bo);
+	sna_accel_watch_flush(sna, 1);
 	priv->cpu_bo->pitch = pitch;
 
 	priv->header = true;
@@ -11479,6 +11482,7 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	struct sna *sna = user_data;
 	struct list preserve;
 
+	assert(sna->watch_flush);
 	if (!sna->flush)
 		return;
 
@@ -11828,11 +11832,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 {
 	const char *backend;
 
-	if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna))
-		return FALSE;
-	if (!AddCallback(&FlushCallback, sna_accel_flush_callback, sna))
-		return FALSE;
-
 	sna_font_key = AllocateFontPrivateIndex();
 	screen->RealizeFont = sna_realize_font;
 	screen->UnrealizeFont = sna_unrealize_font;
@@ -11928,6 +11927,26 @@ Bool sna_accel_create(struct sna *sna)
 	return TRUE;
 }
 
+void sna_accel_watch_flush(struct sna *sna, int enable)
+{
+	if (sna->watch_flush == 0) {
+		assert(enable > 0);
+		if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna) ||
+		    AddCallback(&FlushCallback, sna_accel_flush_callback, sna)) {
+			xf86DrvMsg(sna->scrn->scrnIndex, X_Error,
+				   "Failed to attach ourselves to the flush callbacks, expect missing synchronisation with DRI clients (e.g a compositor)\n");
+		}
+	}
+
+	sna->watch_flush += enable;
+
+	if (sna->watch_flush == 0) {
+		assert(enable < 0);
+		DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
+		DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
+	}
+}
+
 void sna_accel_close(struct sna *sna)
 {
 	if (sna->freed_pixmap) {
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index fe3d1cf..3909b84 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -178,6 +178,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	 * As we don't track which Client, we flush for all.
 	 */
 	priv->flush = 1;
+	sna_accel_watch_flush(sna, 1);
 
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
@@ -324,6 +325,7 @@ static void _sna_dri_destroy_buffer(struct sna *sna, DRI2Buffer2Ptr buffer)
 
 			/* Undo the DRI markings on this pixmap */
 			list_del(&priv->list);
+			sna_accel_watch_flush(sna, -1);
 			priv->pinned = private->pixmap == sna->front;
 			priv->flush = 0;
 
commit b39d9f9166547effe066acfc3327dd88a019d273
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 5 22:29:38 2012 +0000

    sna: Check for flush at the start of every WriteToClient
    
    The goal is to simply avoid the flush before going to sleep when we have
    no pending events. That is we only want to flush when we know there will
    be at least on X_Reply sent to a Client. (Preferably, it would a Damage
    reply!) We can safe assume that every WriteToClient marks the beginning
    of a new reply added to the Client output queue and thus know that upon
    the next flush event we will emitting a Reply and so need to submit our
    batches.
    
    Second attempt to fix a438e4ac.
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 709f29d..460fbb3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11460,11 +11460,14 @@ sna_accel_reply_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
-	ReplyInfoRec *info = call_data;
 
-	if (sna->flush || !info->startOfReply)
+	if (sna->flush)
 		return;
 
+	/* Assume each callback corresponds to a new request. The use
+	 * of continuation WriteToClients in the server is relatively rare,
+	 * and we err on the side of safety.
+	 */
 	sna->flush = (sna->kgem.flush || sna->kgem.sync ||
 		      !list_is_empty(&sna->dirty_pixmaps));
 }
commit f30b0beea4f5657a60eb5b286f41105298fa451a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 4 22:23:39 2012 +0000

    sna/trapezoids: Ellide empty cells
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2d8b3b9..33ea3bb 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -1249,18 +1249,19 @@ tor_blt(struct sna *sna,
 		       cell->x, cell->covered_height, cell->uncovered_area,
 		       cover, xmax));
 
-		box.x2 = x;
-		if (box.x2 > box.x1 && (unbounded || cover)) {
-			__DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
-			       box.x1, box.y1,
-			       box.x2 - box.x1,
-			       box.y2 - box.y1,
-			       cover));
-			span(sna, op, clip, &box, cover);
+		if (cell->covered_height || cell->uncovered_area) {
+			box.x2 = x;
+			if (box.x2 > box.x1 && (unbounded || cover)) {
+				__DBG(("%s: span (%d, %d)x(%d, %d) @ %d\n", __FUNCTION__,
+				       box.x1, box.y1,
+				       box.x2 - box.x1,
+				       box.y2 - box.y1,
+				       cover));
+				span(sna, op, clip, &box, cover);
+			}
+			box.x1 = box.x2;
+			cover += cell->covered_height*FAST_SAMPLES_X*2;
 		}
-		box.x1 = box.x2;
-
-		cover += cell->covered_height*FAST_SAMPLES_X*2;
 
 		if (cell->uncovered_area) {
 			int area = cover - cell->uncovered_area;
commit b69c9dfae128ed69a397a066b8fbe62012742bf1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 5 21:05:34 2012 +0000

    sna/composite: Skip clipping the rectangle region against the singular clip
    
    As we will already have taken it into account when constructing the
    region from the rectangles.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 55a496e..7f1d096 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -563,7 +563,7 @@ _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 				       unsigned int num_rects,
 				       xRectangle *rects,
 				       int tx, int ty,
-				       int maxx, int maxy)
+				       BoxPtr extents)
 {
 	pixman_box16_t stack_boxes[64], *boxes = stack_boxes;
 	pixman_bool_t ret;
@@ -576,25 +576,21 @@ _pixman_region_init_clipped_rectangles(pixman_region16_t *region,
 	}
 
 	for (i = j = 0; i < num_rects; i++) {
-		boxes[j].x1 = rects[i].x;
-		if (boxes[j].x1 < 0)
-			boxes[j].x1 = 0;
-		boxes[j].x1 += tx;
-
-		boxes[j].y1 = rects[i].y;
-		if (boxes[j].y1 < 0)
-			boxes[j].y1 = 0;
-		boxes[j].y1 += ty;
-
-		boxes[j].x2 = bound(rects[i].x, rects[i].width);
-		if (boxes[j].x2 > maxx)
-			boxes[j].x2 = maxx;
-		boxes[j].x2 += tx;
-
-		boxes[j].y2 = bound(rects[i].y, rects[i].height);
-		if (boxes[j].y2 > maxy)
-			boxes[j].y2 = maxy;
-		boxes[j].y2 += ty;
+		boxes[j].x1 = rects[i].x + tx;
+		if (boxes[j].x1 < extents->x1)
+			boxes[j].x1 = extents->x1;
+
+		boxes[j].y1 = rects[i].y + ty;
+		if (boxes[j].y1 < extents->y1)
+			boxes[j].y1 = extents->y1;
+
+		boxes[j].x2 = bound(rects[i].x + tx, rects[i].width);
+		if (boxes[j].x2 > extents->x2)
+			boxes[j].x2 = extents->x2;
+
+		boxes[j].y2 = bound(rects[i].y + ty, rects[i].height);
+		if (boxes[j].y2 > extents->y2)
+			boxes[j].y2 = extents->y2;
 
 		if (boxes[j].x2 > boxes[j].x1 && boxes[j].y2 > boxes[j].y1)
 			j++;
@@ -689,8 +685,9 @@ sna_composite_rectangles(CARD8		 op,
 
 	if (!_pixman_region_init_clipped_rectangles(&region,
 						    num_rects, rects,
-						    dst->pDrawable->x, dst->pDrawable->y,
-						    dst->pDrawable->width, dst->pDrawable->height))
+						    dst->pDrawable->x,
+						    dst->pDrawable->y,
+						    &dst->pCompositeClip->extents))
 	{
 		DBG(("%s: allocation failed for region\n", __FUNCTION__));
 		return;
@@ -702,8 +699,9 @@ sna_composite_rectangles(CARD8		 op,
 	     RegionExtents(&region)->x2, RegionExtents(&region)->y2,
 	     RegionNumRects(&region)));
 
-	if (!pixman_region_intersect(&region, &region, dst->pCompositeClip) ||
-	    region_is_empty(&region)) {
+	if (dst->pCompositeClip->data &&
+	    (!pixman_region_intersect(&region, &region, dst->pCompositeClip) ||
+	     region_is_empty(&region))) {
 		DBG(("%s: zero-intersection between rectangles and clip\n",
 		     __FUNCTION__));
 		pixman_region_fini(&region);
commit f4846168a60feca7437f0039612ba9986c5f8d77
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Mar 5 21:04:25 2012 +0000

    sna: Flush dirty CPU damage before notifying the compositor
    
    Fixes regression from a438e4ac (sna: Revamp vmap support)
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 595b834..709f29d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11465,7 +11465,8 @@ sna_accel_reply_callback(CallbackListPtr *list,
 	if (sna->flush || !info->startOfReply)
 		return;
 
-	sna->flush = sna->kgem.flush || sna->kgem.sync;
+	sna->flush = (sna->kgem.flush || sna->kgem.sync ||
+		      !list_is_empty(&sna->dirty_pixmaps));
 }
 
 static void
commit d7600e4e7726bb8bd6c7256ad2baf195d4427c60
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 4 19:12:29 2012 +0000

    sna: Add some assertions to partial buffer list tracking
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f913369..279face 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1425,6 +1425,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
 		assert(next->base.list.prev == &bo->base.list);
 		assert(bo->base.io);
+		assert(bo->base.refcnt >= 1);
 
 		if (!bo->base.exec)
 			continue;
@@ -3366,6 +3367,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		flags &= ~KGEM_BUFFER_INPLACE;
 
 	list_for_each_entry(bo, &kgem->active_partials, base.list) {
+		assert(bo->base.io);
+		assert(bo->base.refcnt >= 1);
+
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
@@ -3415,6 +3419,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 
 	if (flags & KGEM_BUFFER_WRITE) {
 		list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
+			assert(bo->base.io);
+			assert(bo->base.refcnt == 1);
+
 			if (size > bytes(&bo->base))
 				continue;
 
@@ -3691,6 +3698,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 init:
 	bo->base.reusable = false;
 	assert(num_pages(&bo->base) == alloc);
+	assert(bo->base.io);
 	assert(!bo->need_io || !bo->base.needs_flush);
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
 
commit 3b5d556a93ac0afebf6c1dd02e0c92f6eb73f633
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Mar 4 15:48:33 2012 +0000

    sna: Fix assertion for checking inactive shadow buffers
    
    We may have an ordinary malloc with no CPU bo attached so check before
    dereferencing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index fc44b47..595b834 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11766,7 +11766,7 @@ static void sna_accel_inactive(struct sna *sna)
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 
-			assert(!priv->cpu_bo->sync);
+			assert(priv->cpu_bo == NULL || !priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 
commit aaed9e9722aa30a3d6dc9a3f07309655de65b6bd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 23:31:24 2012 +0000

    sna: Encourage promotion of snooped CPU bo to real GPU bo
    
    This fixes the regression in performance of fishietank on gen2. As
    the texture atlas is too large to be tiled, one might presume that it
    has the same performance characteristics as the snooped linear CPU
    buffer. It does not. Therefore if we attempt to reuse a vmap bo, promote
    it to a full GPU bo. This hopefully gains the benefit of avoiding the
    copy for single shot sources, but still gives us the benefit of avoiding
    the clflushes.
    
    On the plus side, it does prove that gen2 handles snoopable memory from
    both the blitter and the sampler!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 0266ea4..a345962 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -323,7 +323,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		}
 
 		if (priv->gpu_bo->tiling != I915_TILING_NONE &&
-		    priv->cpu_bo->pitch >= 4096) {
+		    (priv->cpu_bo->vmap || priv->cpu_bo->pitch >= 4096)) {
 			DBG(("%s: GPU bo exists and is tiled [%d], upload\n",
 			     __FUNCTION__, priv->gpu_bo->tiling));
 			return NULL;
@@ -332,21 +332,23 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
-		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
-			goto done;
+		if (priv->cpu_bo->vmap && priv->source_count > SOURCE_BIAS) {
+			DBG(("%s: promoting snooped CPU bo due to reuse\n",
+			     __FUNCTION__));
+			return NULL;
+		}
 
-		if (priv->source_count*w*h >= pixmap->drawable.width * pixmap->drawable.height &&
-		    I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_X,
-							   pixmap->drawable.width,
-							   pixmap->drawable.height,
-							   pixmap->drawable.bitsPerPixel)) {
+		if (priv->source_count++*w*h >= (int)pixmap->drawable.width * pixmap->drawable.height &&
+		     I915_TILING_NONE != kgem_choose_tiling(&sna->kgem, I915_TILING_X,
+							    pixmap->drawable.width,
+							    pixmap->drawable.height,
+							    pixmap->drawable.bitsPerPixel)) {
 			DBG(("%s: pitch (%d) requires tiling\n",
 			     __FUNCTION__, priv->cpu_bo->pitch));
 			return NULL;
 		}
 	}
 
-done:
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
 	return priv->cpu_bo;
@@ -528,7 +530,7 @@ sna_render_pixmap_bo(struct sna *sna,
 
 		if (priv->cpu_bo &&
 		    (DAMAGE_IS_ALL(priv->cpu_damage) || !priv->gpu_damage) &&
-		    priv->cpu_bo->pitch < 4096) {
+		    !priv->cpu_bo->vmap && priv->cpu_bo->pitch < 4096) {
 			channel->bo = kgem_bo_reference(priv->cpu_bo);
 			return 1;
 		}
commit 599cd0e8ef3080fc735860bef4e47107c1c05f9a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 20:18:32 2012 +0000

    sna: Align allocations with partial buffers to 64 bytes.
    
    A magic number required for so many functions of the GPU. In this
    particular case it is likely to be that the offset of a texture in the
    GTT has to have a minimum alignment of 64 bytes.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=46415
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cc80278..f913369 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3391,6 +3391,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				     __FUNCTION__, bo->write, flags));
 				continue;
 			}
+			assert(bo->mmapped || bo->need_io);
 		} else {
 			if (bo->write & KGEM_BUFFER_WRITE) {
 				DBG(("%s: skip write %x buffer, need %x\n",
@@ -3548,6 +3549,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			list_init(&bo->base.list);
 			free(old);
 
+			assert(bo->base.tiling == I915_TILING_NONE);
+			assert(num_pages(&bo->base) >= NUM_PAGES(size));
+
 			bo->mem = kgem_bo_map(kgem, &bo->base);
 			if (bo->mem) {
 				bo->need_io = false;
@@ -3564,11 +3568,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 #else
-	alloc = ALIGN(size, 64*1024) / PAGE_SIZE;
+	flags &= ~KGEM_BUFFER_INPLACE;
 #endif
 	/* Be more parsimonious with pwrite/pread buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
-		alloc = PAGE_ALIGN(size) / PAGE_SIZE;
+		alloc = NUM_PAGES(size);
 	flags &= ~KGEM_BUFFER_INPLACE;
 
 	if (kgem->has_vmap) {
@@ -3700,6 +3704,7 @@ init:
 	     __FUNCTION__, alloc, bo->base.handle));
 
 done:
+	bo->used = ALIGN(bo->used, 64);
 	/* adjust the position within the list to maintain decreasing order */
 	alloc = bytes(&bo->base) - bo->used;
 	{
commit 4918e309dfadaf14da6997468f08db03ac0a6327
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 18:18:48 2012 +0000

    sna: Silence an assertion failure during shutdown
    
    Clear the scanout flag on the front buffer during teardown to silence
    the debugger.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index bcd1191..e53b75f 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -784,6 +784,9 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 
 	sna_mode_remove_fb(sna);
 	if (sna->front) {
+		struct kgem_bo *bo = sna_pixmap_get_bo(sna->front);
+		if (bo)
+			kgem_bo_clear_scanout(&sna->kgem, bo); /* valgrind */
 		screen->DestroyPixmap(sna->front);
 		sna->front = NULL;
 	}
commit f890fc25c6c2ca358323be5a0d636c3f2ab4b298
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 18:11:56 2012 +0000

    sna: And fix compilation for last commit
    
    I skipped a GCC warning about the implicit function declaration, which
    of course results in a runtime silent death. Oops.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 5b81596..55a496e 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -740,15 +740,18 @@ sna_composite_rectangles(CARD8		 op,
 		goto fallback;
 	}
 
+	priv = sna_pixmap(pixmap);
+	if (priv == NULL) {
+		DBG(("%s: fallback, not attached\n", __FUNCTION__));
+		goto fallback;
+	}
+
 	/* If we going to be overwriting any CPU damage with a subsequent
 	 * operation, then we may as well delete it without moving it
 	 * first to the GPU.
 	 */
-	if (op <= PictOpSrc) {
-		priv = sna_pixmap_attach(pixmap);
-		if (priv)
-			sna_damage_subtract(&priv->cpu_damage, &region);
-	}
+	if (op <= PictOpSrc)
+		sna_damage_subtract(&priv->cpu_damage, &region);
 
 	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL) {
commit 4f853acfeccb92885f154b03125d5716591a87bd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 17:36:50 2012 +0000

    sna: Prevent backing pixmaps being created later
    
    We used to allow the backing pixmap to be created later in order to
    accommodate ShmPixmaps and ShmPutImage. However, they are now correctly
    handled upfront if we choose to accelerate those paths, and so all
    choice over whether to attach to a pixmap are made during creation and
    are invariant.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index c772d7d..1196cce 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -415,18 +415,6 @@ static inline Bool pixmap_is_scanout(PixmapPtr pixmap)
 	return pixmap == screen->GetScreenPixmap(screen);
 }
 
-struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap);
-inline static struct sna_pixmap *sna_pixmap_attach(PixmapPtr pixmap)
-{
-	struct sna_pixmap *priv;
-
-	priv = sna_pixmap(pixmap);
-	if (priv)
-		return priv;
-
-	return _sna_pixmap_attach(pixmap);
-}
-
 PixmapPtr sna_pixmap_create_upload(ScreenPtr screen,
 				   int width, int height, int depth,
 				   unsigned flags);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c427808..fc44b47 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -523,8 +523,7 @@ _sna_pixmap_reset(PixmapPtr pixmap)
 	return _sna_pixmap_init(priv, pixmap);
 }
 
-static struct sna_pixmap *__sna_pixmap_attach(struct sna *sna,
-					      PixmapPtr pixmap)
+static struct sna_pixmap *sna_pixmap_attach(struct sna *sna, PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
 
@@ -536,50 +535,6 @@ static struct sna_pixmap *__sna_pixmap_attach(struct sna *sna,
 	return _sna_pixmap_init(priv, pixmap);
 }
 
-struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
-{
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv;
-
-	DBG(("%s: serial=%ld, %dx%d, usage=%d\n",
-	     __FUNCTION__,
-	     pixmap->drawable.serialNumber,
-	     pixmap->drawable.width,
-	     pixmap->drawable.height,
-	     pixmap->usage_hint));
-
-	switch (pixmap->usage_hint) {
-	case CREATE_PIXMAP_USAGE_GLYPH_PICTURE:
-		DBG(("%s: not attaching due to crazy usage: %d\n",
-		     __FUNCTION__, pixmap->usage_hint));
-		return NULL;
-
-	case SNA_CREATE_FB:
-		/* We assume that the Screen pixmap will be pre-validated */
-		break;
-
-	default:
-		if (!kgem_can_create_2d(&sna->kgem,
-					pixmap->drawable.width,
-					pixmap->drawable.height,
-					pixmap->drawable.depth))
-			return NULL;
-		break;
-	}
-
-	priv = __sna_pixmap_attach(sna, pixmap);
-	if (priv == NULL)
-		return NULL;
-
-	DBG(("%s: created priv and marking all cpu damaged\n", __FUNCTION__));
-
-	sna_damage_all(&priv->cpu_damage,
-		       pixmap->drawable.width,
-		       pixmap->drawable.height);
-
-	return priv;
-}
-
 static inline PixmapPtr
 create_pixmap(struct sna *sna, ScreenPtr screen,
 	      int width, int height, int depth,
@@ -647,7 +602,7 @@ sna_pixmap_create_shm(ScreenPtr screen,
 		pixmap->drawable.depth = depth;
 		pixmap->drawable.bitsPerPixel = bpp;
 
-		priv = __sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(sna, pixmap);
 		if (!priv) {
 			fbDestroyPixmap(pixmap);
 			return NullPixmap;
@@ -729,7 +684,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 		pixmap->drawable.depth = depth;
 		pixmap->drawable.bitsPerPixel = bpp;
 
-		priv = __sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(sna, pixmap);
 		if (!priv) {
 			fbDestroyPixmap(pixmap);
 			return NullPixmap;
@@ -770,9 +725,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		usage = -1;
 		goto fallback;
 	}
-
-	if (!sna->have_render)
-		goto fallback;
+	assert(width && height);
 
 	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
 	if (flags == 0) {
@@ -811,7 +764,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		if (pixmap == NullPixmap)
 			return NullPixmap;
 
-		__sna_pixmap_attach(sna, pixmap);
+		sna_pixmap_attach(sna, pixmap);
 	} else {
 		struct sna_pixmap *priv;
 
@@ -827,7 +780,7 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		pixmap->devKind = pad;
 		pixmap->devPrivate.ptr = NULL;
 
-		priv = __sna_pixmap_attach(sna, pixmap);
+		priv = sna_pixmap_attach(sna, pixmap);
 		if (priv == NULL) {
 			free(pixmap);
 			goto fallback;
@@ -2080,7 +2033,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	DBG(("%s(pixmap=%p)\n", __FUNCTION__, pixmap));
 
-	priv = sna_pixmap_attach(pixmap);
+	priv = sna_pixmap(pixmap);
 	if (priv == NULL)
 		return NULL;
 
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 8c51a77..8420730 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1395,7 +1395,7 @@ prepare_blt_put(struct sna *sna,
 	op->done = nop_done;
 
 	src_bo = NULL;
-	priv = _sna_pixmap_attach(src);
+	priv = sna_pixmap(src);
 	if (priv)
 		src_bo = priv->cpu_bo;
 	if (src_bo) {
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 572d6ea..0266ea4 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -375,7 +375,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 	if (w == pixmap->drawable.width && h == pixmap->drawable.height) {
 		bool upload;
 
-		priv = sna_pixmap_attach(pixmap);
+		priv = sna_pixmap(pixmap);
 		if (!priv)
 			return false;
 
@@ -400,7 +400,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 	if (64*w*h < pixmap->drawable.width * pixmap->drawable.height)
 		return FALSE;
 
-	priv = sna_pixmap_attach(pixmap);
+	priv = sna_pixmap(pixmap);
 	if (!priv)
 		return FALSE;
 
commit 866a61a2590f0c5ae6592a13d4e3de3e68f5e373
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 16:06:59 2012 +0000

    sna: Disable vmap on 965gm
    
    The sampler just dies if it encounters a snoopable page, for no apparent
    reason. Whilst I encountered the bug on Crestline, disable it for the
    rest of gen4 just to be safe.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1ec9fb4..cc80278 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -599,6 +599,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 #if defined(USE_VMAP)
 	if (!DBG_NO_VMAP)
 		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
+	if (gen == 40)
+		kgem->has_vmap = false; /* sampler dies with snoopable memory */
 #endif
 	DBG(("%s: using vmap=%d\n", __FUNCTION__, kgem->has_vmap));
 
commit 1c653786895fc30be0e88455ce5f9caf9adc835d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 14:34:23 2012 +0000

    sna: Pass usage hint for creating linear buffers
    
    As we wish to immediate map the vertices buffers, it is beneficial to
    search the linear cache for an existing mapping to reuse first.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index bd1eddd..78c7ea0 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1642,7 +1642,8 @@ static int gen3_vertex_finish(struct sna *sna)
 	}
 
 	sna->render.vertices = NULL;
-	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	sna->render.vbo = kgem_create_linear(&sna->kgem,
+					     256*1024, CREATE_GTT_MAP);
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
@@ -1702,7 +1703,7 @@ static void gen3_vertex_close(struct sna *sna)
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
 			bo = kgem_create_linear(&sna->kgem,
-						4*sna->render.vertex_used);
+						4*sna->render.vertex_used, 0);
 			if (bo)
 				kgem_bo_write(&sna->kgem, bo,
 					      sna->render.vertex_data,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6ba59ee..97af7fc 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -398,7 +398,8 @@ static int gen4_vertex_finish(struct sna *sna)
 	}
 
 	sna->render.vertices = NULL;
-	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	sna->render.vbo = kgem_create_linear(&sna->kgem,
+					     256*1024, CREATE_GTT_MAP);
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
@@ -442,7 +443,8 @@ static void gen4_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used, 0);
 			if (bo && !kgem_bo_write(&sna->kgem, bo,
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index bccd343..18325b5 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -390,7 +390,8 @@ static int gen5_vertex_finish(struct sna *sna)
 	}
 
 	sna->render.vertices = NULL;
-	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	sna->render.vbo = kgem_create_linear(&sna->kgem,
+					     256*1024, CREATE_GTT_MAP);
 	if (sna->render.vbo)
 		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
@@ -447,7 +448,8 @@ static void gen5_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used, 0);
 			if (bo && !kgem_bo_write(&sna->kgem, bo,
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 439fb52..71c0046 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -964,9 +964,10 @@ static int gen6_vertex_finish(struct sna *sna)
 	}
 
 	sna->render.vertices = NULL;
-	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	sna->render.vbo = kgem_create_linear(&sna->kgem,
+					     256*1024, CREATE_GTT_MAP);
 	if (sna->render.vbo)
-		sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
 		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
@@ -1024,7 +1025,8 @@ static void gen6_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used, 0);
 			if (bo && !kgem_bo_write(&sna->kgem, bo,
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e3d9757..a401d94 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1065,9 +1065,10 @@ static int gen7_vertex_finish(struct sna *sna)
 	}
 
 	sna->render.vertices = NULL;
-	sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024);
+	sna->render.vbo = kgem_create_linear(&sna->kgem,
+					     256*1024, CREATE_GTT_MAP);
 	if (sna->render.vbo)
-		sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo);
+		sna->render.vertices = kgem_bo_map(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertices == NULL) {
 		kgem_bo_destroy(&sna->kgem, sna->render.vbo);
 		sna->render.vbo = NULL;
@@ -1121,7 +1122,8 @@ static void gen7_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used);
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used, 0);
 			if (bo && !kgem_bo_write(&sna->kgem, bo,
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5776a4f..1ec9fb4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1687,7 +1687,7 @@ void _kgem_submit(struct kgem *kgem)
 		size = compact_batch_surface(kgem);
 	else
 		size = kgem->nbatch * sizeof(kgem->batch[0]);
-	rq->bo = kgem_create_linear(kgem, size);
+	rq->bo = kgem_create_linear(kgem, size, 0);
 	if (rq->bo) {
 		uint32_t handle = rq->bo->handle;
 		int i;
@@ -2188,15 +2188,20 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 	return bo;
 }
 
-struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
+struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
 {
 	struct kgem_bo *bo;
 	uint32_t handle;
 
 	DBG(("%s(%d)\n", __FUNCTION__, size));
 
+	if (flags & CREATE_GTT_MAP && kgem->has_llc) {
+		flags &= ~CREATE_GTT_MAP;
+		flags |= CREATE_CPU_MAP;
+	}
+
 	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-	bo = search_linear_cache(kgem, size, CREATE_INACTIVE);
+	bo = search_linear_cache(kgem, size, CREATE_INACTIVE | flags);
 	if (bo)
 		return kgem_bo_reference(bo);
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 446ac68..9abb72a 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -192,7 +192,7 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 
 struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
 
-struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size);
+struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
 struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 				  int offset, int length);
 
diff --git a/src/sna/sna_gradient.c b/src/sna/sna_gradient.c
index 96841dd..943cbf9 100644
--- a/src/sna/sna_gradient.c
+++ b/src/sna/sna_gradient.c
@@ -168,7 +168,7 @@ sna_render_get_gradient(struct sna *sna,
 	     width/2, pixman_image_get_data(image)[width/2],
 	     width-1, pixman_image_get_data(image)[width-1]));
 
-	bo = kgem_create_linear(&sna->kgem, width*4);
+	bo = kgem_create_linear(&sna->kgem, width*4, 0);
 	if (!bo) {
 		pixman_image_unref(image);
 		return NULL;
@@ -248,7 +248,7 @@ sna_render_finish_solid(struct sna *sna, bool force)
 
 	DBG(("sna_render_finish_solid reset\n"));
 
-	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
+	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
 	cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
 	cache->bo[0]->pitch = 4;
 	if (force)
@@ -316,7 +316,7 @@ static Bool sna_alpha_cache_init(struct sna *sna)
 
 	DBG(("%s\n", __FUNCTION__));
 
-	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(color));
+	cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(color), 0);
 	if (!cache->cache_bo)
 		return FALSE;
 
@@ -338,7 +338,7 @@ static Bool sna_solid_cache_init(struct sna *sna)
 	DBG(("%s\n", __FUNCTION__));
 
 	cache->cache_bo =
-		kgem_create_linear(&sna->kgem, sizeof(cache->color));
+		kgem_create_linear(&sna->kgem, sizeof(cache->color), 0);
 	if (!cache->cache_bo)
 		return FALSE;
 
diff --git a/src/sna/sna_stream.c b/src/sna/sna_stream.c
index d6d817d..7f05d21 100644
--- a/src/sna/sna_stream.c
+++ b/src/sna/sna_stream.c
@@ -87,7 +87,7 @@ struct kgem_bo *sna_static_stream_fini(struct sna *sna,
 
 	DBG(("uploaded %d bytes of static state\n", stream->used));
 
-	bo = kgem_create_linear(&sna->kgem, stream->used);
+	bo = kgem_create_linear(&sna->kgem, stream->used, 0);
 	if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) {
 		kgem_bo_destroy(&sna->kgem, bo);
 		return NULL;
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index ebc3860..56cf260 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -104,7 +104,8 @@ sna_video_buffer(struct sna *sna,
 		sna_video_free_buffers(sna, video);
 
 	if (video->buf == NULL)
-		video->buf = kgem_create_linear(&sna->kgem, frame->size);
+		video->buf = kgem_create_linear(&sna->kgem, frame->size,
+						CREATE_GTT_MAP);
 
 	return video->buf;
 }
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 1aaf972..a71751c 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -273,7 +273,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 
 		assert(kgem_bo_size(frame.bo) >= frame.size);
 	} else {
-		frame.bo = kgem_create_linear(&sna->kgem, frame.size);
+		frame.bo = kgem_create_linear(&sna->kgem, frame.size,
+					      CREATE_GTT_MAP);
 		if (frame.bo == NULL) {
 			DBG(("%s: failed to allocate bo\n", __FUNCTION__));
 			return BadAlloc;
commit 29ec36ff063472e0744af99aa81ed5ad8e291a36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 10:01:07 2012 +0000

    sna: Only discard the inplace flag for LLC partial buffers
    
    KGEM_BUFFER_WRITE_INPLACE is WRITE | INPLACE and so the typo prevented
    uploading of partial data through the pwrite paths.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e4ff6a7..5776a4f 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3356,7 +3356,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	assert(size <= kgem->max_object_size);
 
 	if (kgem->has_llc)
-		flags &= ~KGEM_BUFFER_WRITE_INPLACE;
+		flags &= ~KGEM_BUFFER_INPLACE;
 
 	list_for_each_entry(bo, &kgem->active_partials, base.list) {
 		/* We can reuse any write buffer which we can fit */
commit f039ccf9587eb07528034c3247a6e700c87a5500
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Mar 2 09:47:10 2012 +0000

    sna: Be careful not to discard the clear operation for move-region-to-cpu
    
    When moving only a region to the CPU and we detect a pending clear, we
    transform the operation into a move whole pixmap. In such situations, we
    only have a partial damage area and so need to or in MOVE_READ to
    prevent the pending clear of the whole pixmap from being discarded.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46792
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 52e75c7..c427808 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -922,6 +922,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 
 	if ((flags & MOVE_READ) == 0) {
 		assert(flags & MOVE_WRITE);
+		DBG(("%s: no readbck, discarding gpu damage [%d], pending clear[%d]\n",
+		     __FUNCTION__, priv->gpu_damage != NULL, priv->clear));
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->clear = false;
 
@@ -1273,9 +1275,6 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		goto out;
 	}
 
-	if (priv->clear)
-		return _sna_pixmap_move_to_cpu(pixmap, flags);
-
 	if (priv->gpu_bo == NULL &&
 	    (priv->create & KGEM_CAN_CREATE_GPU) == 0 &&
 	    flags & MOVE_WRITE)
@@ -1293,6 +1292,13 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 	}
 
+	if (priv->clear) {
+		DBG(("%s: pending clear, moving whole pixmap\n", __FUNCTION__));
+		if (dx | dy)
+			RegionTranslate(region, -dx, -dy);
+		return _sna_pixmap_move_to_cpu(pixmap, flags | MOVE_READ);
+	}
+
 	if ((flags & MOVE_READ) == 0) {
 		DBG(("%s: no read, checking to see if we can stream the write into the GPU bo\n",
 		     __FUNCTION__));
commit 392593e61dac3ac65ee8e32de492c4439413ee85
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 17:54:51 2012 +0000

    sna/gen5: Help the compiler avoid an uncached read
    
    Debug builds are excruitatingly slow as the compiler doesn't store the
    temporary in a register but uses an uncached readback instead. Maybe
    this will help...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index bcba0d8..bccd343 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -849,16 +849,14 @@ gen5_emit_composite_primitive_identity_source(struct sna *sna,
 	dst.p.y = r->dst.y + r->height;
 	v[0] = dst.f;
 	v[1] = (sx + r->width) * sf[0];
-	v[2] = (sy + r->height) * sf[1];
+	v[5] = v[2] = (sy + r->height) * sf[1];
 
 	dst.p.x = r->dst.x;
 	v[3] = dst.f;
-	v[4] = sx * sf[0];
-	v[5] = v[2];
+	v[7] = v[4] = sx * sf[0];
 
 	dst.p.y = r->dst.y;
 	v[6] = dst.f;
-	v[7] = v[4];
 	v[8] = sy * sf[1];
 }
 
commit 9c0c04cac245db046ef17ff24c32e6ab93535f48
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 14:52:39 2012 +0000

    sna: Split storage of inactive partials
    
    As we now attempt to keep retain partial buffers after execution, we can
    end up will lots of inactive buffers sitting on the partial buffer list.
    In any one batch, we wish to minimise the number of buffers used, so
    keep all the inactive buffers on a seperate list and only pull from them
    as required.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 8e8c612..366b9e8 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -214,6 +214,8 @@ __list_del(struct list *prev, struct list *next)
 static inline void
 _list_del(struct list *entry)
 {
+    assert(entry->prev->next == entry);
+    assert(entry->next->prev == entry);
     __list_del(entry->prev, entry->next);
 }
 
@@ -327,6 +329,11 @@ list_is_empty(struct list *head)
 	 &pos->member != (head);					\
 	 pos = __container_of(pos->member.next, pos, member))
 
+#define list_for_each_entry_reverse(pos, head, member)				\
+    for (pos = __container_of((head)->prev, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = __container_of(pos->member.prev, pos, member))
+
 /**
  * Loop through the list, keeping a backup pointer to the element. This
  * macro allows for the deletion of a list element while looping through the
@@ -353,6 +360,8 @@ list_add_tail(struct list *entry, struct list *head)
 static inline void
 _list_del(struct list *entry)
 {
+    assert(entry->prev->next == entry);
+    assert(entry->next->prev == entry);
     __list_del(entry->prev, entry->next);
 }
 
@@ -382,6 +391,11 @@ static inline void list_move_tail(struct list *list, struct list *head)
 #define list_last_entry(ptr, type, member) \
     list_entry((ptr)->prev, type, member)
 
+#define list_for_each_entry_reverse(pos, head, member)				\
+    for (pos = __container_of((head)->prev, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = __container_of(pos->member.prev, pos, member))
+
 #endif
 
 #undef container_of
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4fcdf37..e4ff6a7 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -119,8 +119,10 @@ static bool validate_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->base.list.next == &kgem->partial)
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+		if (bo->base.list.next == &kgem->active_partials)
 			return true;
 		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
 			ErrorF("this rem: %d, next rem: %d\n",
@@ -132,7 +134,7 @@ static bool validate_partials(struct kgem *kgem)
 	return true;
 
 err:
-	list_for_each_entry(bo, &kgem->partial, base.list)
+	list_for_each_entry(bo, &kgem->active_partials, base.list)
 		ErrorF("bo: used=%d / %d, rem=%d\n",
 		       bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
 	return false;
@@ -573,7 +575,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->half_cpu_cache_pages = cpu_cache_size() >> 13;
 
-	list_init(&kgem->partial);
+	list_init(&kgem->active_partials);
+	list_init(&kgem->inactive_partials);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
 	list_init(&kgem->sync_list);
@@ -1173,11 +1176,11 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
-static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
+static void bubble_sort_partial(struct list *head, struct kgem_partial_bo *bo)
 {
 	int remain = bytes(&bo->base) - bo->used;
 
-	while (bo->base.list.prev != &kgem->partial) {
+	while (bo->base.list.prev != head) {
 		struct kgem_partial_bo *p;
 
 		p = list_entry(bo->base.list.prev,
@@ -1204,21 +1207,25 @@ static void kgem_retire_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->used == 0 || !bo->mmapped)
-			continue;
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+
 		if (bo->base.refcnt != 1 || bo->base.rq)
 			continue;
 
 		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
+		assert(bo->base.refcnt == 1);
+		assert(bo->mmapped);
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
 		bo->used = 0;
 
-		bubble_sort_partial(kgem, bo);
+		list_move_tail(&bo->base.list, &kgem->inactive_partials);
+		bubble_sort_partial(&kgem->inactive_partials, bo);
 	}
 }
 
@@ -1343,6 +1350,8 @@ static void kgem_commit(struct kgem *kgem)
 	struct kgem_bo *bo, *next;
 
 	list_for_each_entry_safe(bo, next, &rq->buffers, request) {
+		assert(next->request.prev == &bo->request);
+
 		DBG(("%s: release handle=%d (proxy? %d), dirty? %d flush? %d -> offset=%x\n",
 		     __FUNCTION__, bo->handle, bo->proxy != NULL,
 		     bo->dirty, bo->needs_flush, (unsigned)bo->exec->offset));
@@ -1411,15 +1420,18 @@ static void kgem_finish_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
 
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
+	list_for_each_entry_safe(bo, next, &kgem->active_partials, base.list) {
+		assert(next->base.list.prev == &bo->base.list);
+		assert(bo->base.io);
+
+		if (!bo->base.exec)
+			continue;
+
 		if (!bo->write) {
 			assert(bo->base.exec || bo->base.refcnt > 1);
 			goto decouple;
 		}
 
-		if (!bo->base.exec)
-			continue;
-
 		if (bo->mmapped) {
 			assert(!bo->need_io);
 			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
@@ -1439,9 +1451,9 @@ static void kgem_finish_partials(struct kgem *kgem)
 			goto decouple;
 		}
 
+		assert(bo->need_io);
 		assert(bo->base.rq == kgem->next_request);
 		assert(bo->base.domain != DOMAIN_GPU);
-		assert(bo->need_io);
 
 		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
 			struct kgem_bo *shrink;
@@ -1485,8 +1497,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 				bo->base.needs_flush = false;
 				bo->used = 0;
 
-				bubble_sort_partial(kgem, bo);
-				continue;
+				goto decouple;
 			}
 		}
 
@@ -1508,16 +1519,6 @@ decouple:
 
 static void kgem_cleanup(struct kgem *kgem)
 {
-	while (!list_is_empty(&kgem->partial)) {
-		struct kgem_bo *bo;
-
-		bo = list_first_entry(&kgem->partial,
-				      struct kgem_bo,
-				      list);
-		list_del(&bo->list);
-		kgem_bo_unref(kgem, bo);
-	}
-
 	while (!list_is_empty(&kgem->requests)) {
 		struct kgem_request *rq;
 
@@ -1844,14 +1845,17 @@ void kgem_throttle(struct kgem *kgem)
 
 static void kgem_expire_partial(struct kgem *kgem)
 {
-	struct kgem_partial_bo *bo, *next;
-
-	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->base.refcnt > 1 || bo->base.rq)
-			continue;
-
-		DBG(("%s: discarding unused partial buffer: %d/%d, write? %d\n",
-		     __FUNCTION__, bo->used, bytes(&bo->base), bo->write));
+	while (!list_is_empty(&kgem->inactive_partials)) {
+		struct kgem_partial_bo *bo =
+			list_first_entry(&kgem->inactive_partials,
+					 struct kgem_partial_bo,
+					 base.list);
+
+		DBG(("%s: discarding unused partial buffer: %d, last write? %d\n",
+		     __FUNCTION__, bytes(&bo->base), bo->write));
+		assert(bo->base.list.prev == &kgem->inactive_partials);
+		assert(bo->base.io);
+		assert(bo->base.refcnt == 1);
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
 	}
@@ -2785,7 +2789,7 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 
 	if (bo->delta + bo->size.bytes == io->used) {
 		io->used = bo->delta;
-		bubble_sort_partial(kgem, io);
+		bubble_sort_partial(&kgem->active_partials, io);
 	}
 }
 
@@ -3210,10 +3214,16 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	return bo;
 }
 #else
+static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
+{
+	assert(0);
+	return 0;
+}
 struct kgem_bo *kgem_create_map(struct kgem *kgem,
 				void *ptr, uint32_t size,
 				bool read_only)
 {
+	assert(0);
 	return 0;
 }
 #endif
@@ -3243,6 +3253,7 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
 {
 	assert(!bo->reusable);
+	assert(list_is_empty(&bo->list));
 	list_add(&bo->list, &kgem->sync_list);
 	bo->sync = true;
 }
@@ -3342,17 +3353,16 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
 	/* we should never be asked to create anything TOO large */
-	assert(size <= kgem->max_cpu_size);
+	assert(size <= kgem->max_object_size);
 
 	if (kgem->has_llc)
 		flags &= ~KGEM_BUFFER_WRITE_INPLACE;
 
-	list_for_each_entry(bo, &kgem->partial, base.list) {
+	list_for_each_entry(bo, &kgem->active_partials, base.list) {
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
 		    bo->write == KGEM_BUFFER_WRITE &&
-		    bo->base.exec && !bo->mmapped &&
-		    size <= bytes(&bo->base)) {
+		    !bo->mmapped && size <= bytes(&bo->base)) {
 			assert(bo->base.refcnt == 1);
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
@@ -3362,7 +3372,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->write = 0;
 			offset = 0;
 			bo->used = size;
-			bubble_sort_partial(kgem, bo);
+			bubble_sort_partial(&kgem->active_partials, bo);
 			goto done;
 		}
 
@@ -3395,6 +3405,27 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		break;
 	}
 
+	if (flags & KGEM_BUFFER_WRITE) {
+		list_for_each_entry_reverse(bo, &kgem->inactive_partials, base.list) {
+			if (size > bytes(&bo->base))
+				continue;
+
+			if (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
+			    !bo->base.vmap) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
+
+			DBG(("%s: reusing inactive partial buffer? size=%d, total=%d\n",
+			     __FUNCTION__, size, bytes(&bo->base)));
+			offset = 0;
+			bo->used = size;
+			list_move(&bo->base.list, &kgem->active_partials);
+			goto done;
+		}
+	}
+
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
@@ -3656,7 +3687,8 @@ init:
 	bo->write = flags & KGEM_BUFFER_WRITE_INPLACE;
 	offset = 0;
 
-	list_add(&bo->base.list, &kgem->partial);
+	assert(list_is_empty(&bo->base.list));
+	list_add(&bo->base.list, &kgem->active_partials);
 	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
 
@@ -3669,7 +3701,7 @@ done:
 		first = p = list_first_entry(&bo->base.list,
 					     struct kgem_partial_bo,
 					     base.list);
-		while (&p->base.list != &kgem->partial &&
+		while (&p->base.list != &kgem->active_partials &&
 		       alloc < bytes(&p->base) - p->used) {
 			DBG(("%s: this=%d, right=%d\n",
 			     __FUNCTION__, alloc, bytes(&p->base) -p->used));
@@ -3720,7 +3752,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		if (io->used)
 			io->used -= stride;
 		bo->size.bytes -= stride;
-		bubble_sort_partial(kgem, io);
+		bubble_sort_partial(&kgem->active_partials, io);
 	}
 
 	bo->pitch = stride;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 58316dc..446ac68 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -125,7 +125,7 @@ struct kgem {
 	struct list large;
 	struct list active[NUM_CACHE_BUCKETS][3];
 	struct list inactive[NUM_CACHE_BUCKETS];
-	struct list partial;
+	struct list active_partials, inactive_partials;
 	struct list requests;
 	struct list sync_list;
 	struct kgem_request *next_request;
commit a438e4ac9ba162e870fb22bc54024d35daa2121e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 19:15:34 2012 +0000

    sna: Revamp vmap support
    
    Dust off the kernel patches and update to reflect the changes made to
    support LLC CPU bo, in particular to support the unsynchronized shadow
    buffers.
    
    However, due to the forced synchronisation required for strict client
    coherency we prefer not to use the vmap for shared pixmaps unless we are
    already busy (i.e. sync afterwards rather than before in the hope that
    we can squash a few operations into one). Being able to block the reply
    to the client until the request is actually complete and so avoid the
    sync remains a dream.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3ba2ec3..4fcdf37 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -76,8 +76,23 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 #define MAX_CPU_VMA_CACHE INT16_MAX
 #define MAP_PRESERVE_TIME 10
 
-#define CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) & ~1))
+#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
 #define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1))
+#define MAKE_VMAP_MAP(ptr) ((void*)((uintptr_t)(ptr) | 3))
+#define IS_VMAP_MAP(ptr) ((uintptr_t)(ptr) & 2)
+
+#if defined(USE_VMAP) && !defined(I915_PARAM_HAS_VMAP)
+#define DRM_I915_GEM_VMAP       0x2c
+#define DRM_IOCTL_I915_GEM_VMAP DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_VMAP, struct drm_i915_gem_vmap)
+#define I915_PARAM_HAS_VMAP              18
+struct drm_i915_gem_vmap {
+	uint64_t user_ptr;
+	uint32_t user_size;
+	uint32_t flags;
+#define I915_VMAP_READ_ONLY 0x1
+	uint32_t handle;
+};
+#endif
 
 struct kgem_partial_bo {
 	struct kgem_bo base;
@@ -561,6 +576,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->partial);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
+	list_init(&kgem->sync_list);
 	list_init(&kgem->large);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
@@ -577,7 +593,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->next_request = __kgem_request_alloc();
 
-#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP)
+#if defined(USE_VMAP)
 	if (!DBG_NO_VMAP)
 		kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0;
 #endif
@@ -605,9 +621,8 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		}
 		kgem->has_llc = has_llc;
 	}
-	kgem->has_cpu_bo = kgem->has_llc;
-	DBG(("%s: cpu bo enabled %d: llc? %d\n", __FUNCTION__,
-	     kgem->has_cpu_bo, kgem->has_llc));
+	DBG(("%s: cpu bo enabled %d: llc? %d, vmap? %d\n", __FUNCTION__,
+	     kgem->has_llc | kgem->has_vmap, kgem->has_llc, kgem->has_vmap));
 
 	kgem->has_semaphores = false;
 	if (gen >= 60 && semaphores_enabled())
@@ -688,10 +703,13 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->max_upload_tile_size = half_gpu_max;
 
 	kgem->large_object_size = MAX_CACHE_SIZE;
-	if (kgem->large_object_size > kgem->max_cpu_size)
-		kgem->large_object_size = kgem->max_cpu_size;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
+	if (kgem->has_llc | kgem->has_vmap) {
+		if (kgem->large_object_size > kgem->max_cpu_size)
+			kgem->large_object_size = kgem->max_cpu_size;
+	} else
+		kgem->max_cpu_size = 0;
 
 	DBG(("%s: large object thresold=%d\n",
 	     __FUNCTION__, kgem->large_object_size));
@@ -887,8 +905,10 @@ void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
 
 	/* XXX is it worth working around gcc here? */
 	kgem->flush |= bo->flush;
-	kgem->sync |= bo->sync;
 	kgem->scanout |= bo->scanout;
+
+	if (bo->sync)
+		kgem->sync = kgem->next_request;
 }
 
 static uint32_t kgem_end_batch(struct kgem *kgem)
@@ -932,12 +952,14 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	int type = IS_CPU_MAP(bo->map);
 
+	assert(!IS_VMAP_MAP(bo->map));
+
 	DBG(("%s: releasing %s vma for handle=%d, count=%d\n",
 	     __FUNCTION__, type ? "CPU" : "GTT",
 	     bo->handle, kgem->vma[type].count));
 
-	VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-	munmap(CPU_MAP(bo->map), bytes(bo));
+	VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+	munmap(MAP(bo->map), bytes(bo));
 	bo->map = NULL;
 
 	if (!list_is_empty(&bo->vma)) {
@@ -951,9 +973,15 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
 	DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle));
 	assert(bo->refcnt == 0);
 	assert(bo->exec == NULL);
+	assert(!bo->vmap || bo->rq == NULL);
 
 	kgem_bo_binding_free(kgem, bo);
 
+	if (IS_VMAP_MAP(bo->map)) {
+		assert(bo->rq == NULL);
+		free(MAP(bo->map));
+		bo->map = NULL;
+	}
 	if (bo->map)
 		kgem_bo_release_map(kgem, bo);
 	assert(list_is_empty(&bo->vma));
@@ -978,6 +1006,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert(!bo->needs_flush);
 	assert(bo->rq == NULL);
 	assert(bo->domain != DOMAIN_GPU);
+	assert(bo->reusable);
 
 	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
 		kgem_bo_free(kgem, bo);
@@ -990,7 +1019,7 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
 		    (!type && !kgem_bo_is_mappable(kgem, bo))) {
 			list_del(&bo->vma);
-			munmap(CPU_MAP(bo->map), bytes(bo));
+			munmap(MAP(bo->map), bytes(bo));
 			bo->map = NULL;
 		}
 		if (bo->map) {
@@ -1035,6 +1064,19 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (NO_CACHE)
 		goto destroy;
 
+	if (bo->vmap) {
+		if (bo->rq == NULL) {
+			if (bo->needs_flush && kgem_busy(kgem, bo->handle)) {
+				list_add(&bo->request, &kgem->flushing);
+				bo->rq = &_kgem_static_request;
+			} else
+				kgem_bo_free(kgem, bo);
+		} else {
+			assert(!bo->sync);
+		}
+		return;
+	}
+
 	if (bo->io) {
 		struct kgem_bo *base;
 
@@ -1065,7 +1107,7 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
-	assert(bo->vmap == false && bo->sync == false);
+	assert(bo->vmap == false);
 	assert(bo->io == false);
 	assert(bo->scanout == false);
 	assert(bo->flush == false);
@@ -1197,7 +1239,7 @@ bool kgem_retire(struct kgem *kgem)
 
 		DBG(("%s: moving %d from flush to inactive\n",
 		     __FUNCTION__, bo->handle));
-		if (kgem_bo_set_purgeable(kgem, bo)) {
+		if (bo->reusable && kgem_bo_set_purgeable(kgem, bo)) {
 			bo->needs_flush = false;
 			bo->domain = DOMAIN_NONE;
 			bo->rq = NULL;
@@ -1278,6 +1320,9 @@ bool kgem_retire(struct kgem *kgem)
 			kgem_bo_free(kgem, rq->bo);
 		}
 
+		if (kgem->sync == rq)
+			kgem->sync = NULL;
+
 		_list_del(&rq->list);
 		free(rq);
 	}
@@ -1308,7 +1353,7 @@ static void kgem_commit(struct kgem *kgem)
 		bo->presumed_offset = bo->exec->offset;
 		bo->exec = NULL;
 
-		if (!bo->refcnt && !bo->reusable) {
+		if (!bo->refcnt && !bo->reusable && !bo->vmap) {
 			kgem_bo_free(kgem, bo);
 			continue;
 		}
@@ -1708,8 +1753,10 @@ void _kgem_submit(struct kgem *kgem)
 #if !NDEBUG
 			if (ret < 0) {
 				int i;
-				ErrorF("batch (end=%d, size=%d) submit failed: %d\n",
-				       batch_end, size, errno);
+
+				ErrorF("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d: errno=%d\n",
+				       kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface,
+				       kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture, errno);
 
 				i = open("/tmp/batchbuffer", O_WRONLY | O_CREAT | O_APPEND, 0666);
 				if (i != -1) {
@@ -1746,8 +1793,7 @@ void _kgem_submit(struct kgem *kgem)
 					       kgem->reloc[i].write_domain,
 					       (int)kgem->reloc[i].presumed_offset);
 				}
-				FatalError("SNA: failed to submit batchbuffer: ret=%d\n",
-					   errno);
+				FatalError("SNA: failed to submit batchbuffer\n");
 			}
 #endif
 
@@ -2594,6 +2640,7 @@ search_inactive:
 	cache = &kgem->inactive[bucket];
 	list_for_each_entry_safe(bo, next, cache, list) {
 		assert(bucket(bo) == bucket);
+		assert(bo->reusable);
 
 		if (size > num_pages(bo)) {
 			DBG(("inactive too small: %d < %d\n",
@@ -2673,6 +2720,59 @@ create:
 	return bo;
 }
 
+struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
+				   int width,
+				   int height,
+				   int bpp,
+				   uint32_t flags)
+{
+	struct kgem_bo *bo;
+
+	DBG(("%s(%dx%d, bpp=%d)\n", __FUNCTION__, width, height, bpp));
+
+	if (kgem->has_llc) {
+		bo = kgem_create_2d(kgem, width, height, bpp,
+				    I915_TILING_NONE, flags);
+		if (bo == NULL)
+			return bo;
+
+		if (kgem_bo_map__cpu(kgem, bo) == NULL) {
+			_kgem_bo_destroy(kgem, bo);
+			return NULL;
+		}
+
+		return bo;
+	}
+
+	if (kgem->has_vmap) {
+		int stride, size;
+		void *ptr;
+
+		stride = ALIGN(width, 2) * bpp >> 3;
+		stride = ALIGN(stride, 4);
+		size = ALIGN(height, 2) * stride;
+
+		assert(size >= PAGE_SIZE);
+
+		/* XXX */
+		//if (posix_memalign(&ptr, 64, ALIGN(size, 64)))
+		if (posix_memalign(&ptr, PAGE_SIZE, ALIGN(size, PAGE_SIZE)))
+			return NULL;
+
+		bo = kgem_create_map(kgem, ptr, size, false);
+		if (bo == NULL) {
+			free(ptr);
+			return NULL;
+		}
+
+		bo->map = MAKE_VMAP_MAP(ptr);
+		bo->pitch = stride;
+		return bo;
+	}
+
+	return NULL;
+}
+
 static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
@@ -2702,9 +2802,6 @@ void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		return;
 	}
 
-	if (bo->vmap)
-		kgem_bo_sync__cpu(kgem, bo);
-
 	__kgem_bo_destroy(kgem, bo);
 }
 
@@ -2915,8 +3012,8 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		assert(bo->map);
 		assert(bo->rq == NULL);
 
-		VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-		munmap(CPU_MAP(bo->map), bytes(bo));
+		VG(if (type) VALGRIND_FREELIKE_BLOCK(MAP(bo->map), 0));
+		munmap(MAP(bo->map), bytes(bo));
 		bo->map = NULL;
 		list_del(&bo->vma);
 		kgem->vma[type].count--;
@@ -2996,7 +3093,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (bo->map)
-		return CPU_MAP(bo->map);
+		return MAP(bo->map);
 
 	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
@@ -3012,7 +3109,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	assert(list_is_empty(&bo->list));
 
 	if (IS_CPU_MAP(bo->map))
-		return CPU_MAP(bo->map);
+		return MAP(bo->map);
 
 	if (bo->map)
 		kgem_bo_release_map(kgem, bo);
@@ -3067,7 +3164,7 @@ uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo)
 	return flink.name;
 }
 
-#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP)
+#if defined(USE_VMAP)
 static uint32_t gem_vmap(int fd, void *ptr, int size, int read_only)
 {
 	struct drm_i915_gem_vmap vmap;
@@ -3095,14 +3192,11 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	if (!kgem->has_vmap)
 		return NULL;
 
-	if (size >= MAX_CACHE_SIZE)
-		return NULL;
-
 	handle = gem_vmap(kgem->fd, ptr, size, read_only);
 	if (handle == 0)
 		return NULL;
 
-	bo = __kgem_bo_alloc(handle, size);
+	bo = __kgem_bo_alloc(handle, NUM_PAGES(size));
 	if (bo == NULL) {
 		gem_close(kgem->fd, handle);
 		return NULL;
@@ -3110,10 +3204,9 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 
 	bo->reusable = false;
 	bo->vmap = true;
-	bo->sync = true;
 
-	DBG(("%s(ptr=%p, size=%d, read_only=%d) => handle=%d\n",
-	     __FUNCTION__, ptr, size, read_only, handle));
+	DBG(("%s(ptr=%p, size=%d, pages=%d, read_only=%d) => handle=%d\n",
+	     __FUNCTION__, ptr, size, NUM_PAGES(size), read_only, handle));
 	return bo;
 }
 #else
@@ -3129,7 +3222,6 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
 	kgem_bo_submit(kgem, bo);
 
-	/* XXX assumes bo is snoopable */
 	if (bo->domain != DOMAIN_CPU) {
 		struct drm_i915_gem_set_domain set_domain;
 
@@ -3148,28 +3240,40 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	}
 }
 
+void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo)
+{
+	assert(!bo->reusable);
+	list_add(&bo->list, &kgem->sync_list);
+	bo->sync = true;
+}
+
 void kgem_sync(struct kgem *kgem)
 {
+	struct drm_i915_gem_set_domain set_domain;
+	struct kgem_request *rq;
+	struct kgem_bo *bo;
+
 	DBG(("%s\n", __FUNCTION__));
 
-	if (!list_is_empty(&kgem->requests)) {
-		struct drm_i915_gem_set_domain set_domain;
-		struct kgem_request *rq;
+	rq = kgem->sync;
+	if (rq == NULL)
+		return;
 
-		rq = list_first_entry(&kgem->requests,
-				      struct kgem_request,
-				      list);
+	if (rq == kgem->next_request)
+		_kgem_submit(kgem);
 
-		VG_CLEAR(set_domain);
-		set_domain.handle = rq->bo->handle;
-		set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-		set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+	VG_CLEAR(set_domain);
+	set_domain.handle = rq->bo->handle;
+	set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+	set_domain.write_domain = I915_GEM_DOMAIN_GTT;
 
-		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-		kgem_retire(kgem);
-	}
+	drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+	kgem_retire(kgem);
+
+	list_for_each_entry(bo, &kgem->sync_list, list)
+		kgem_bo_sync__cpu(kgem, bo);
 
-	kgem->sync = false;
+	assert (kgem->sync == NULL);
 }
 
 void kgem_clear_dirty(struct kgem *kgem)
@@ -3262,11 +3366,20 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			goto done;
 		}
 
-		if ((bo->write & KGEM_BUFFER_WRITE) != (flags & KGEM_BUFFER_WRITE) ||
-		    (bo->write & ~flags) & KGEM_BUFFER_INPLACE) {
-			DBG(("%s: skip write %x buffer, need %x\n",
-			     __FUNCTION__, bo->write, flags));
-			continue;
+		if (flags & KGEM_BUFFER_WRITE) {
+			if ((bo->write & KGEM_BUFFER_WRITE) == 0 ||
+			    (((bo->write & ~flags) & KGEM_BUFFER_INPLACE) &&
+			     !bo->base.vmap)) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
+		} else {
+			if (bo->write & KGEM_BUFFER_WRITE) {
+				DBG(("%s: skip write %x buffer, need %x\n",
+				     __FUNCTION__, bo->write, flags));
+				continue;
+			}
 		}
 
 		if (bo->used + size <= bytes(&bo->base)) {
@@ -3290,7 +3403,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (alloc > MAX_CACHE_SIZE)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
-	if (kgem->has_cpu_bo) {
+	if (kgem->has_llc) {
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
 			return NULL;
@@ -3420,6 +3533,29 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		alloc = PAGE_ALIGN(size) / PAGE_SIZE;
 	flags &= ~KGEM_BUFFER_INPLACE;
 
+	if (kgem->has_vmap) {
+		bo = partial_bo_alloc(alloc);
+		if (bo) {
+			if (!__kgem_bo_init(&bo->base,
+					    gem_vmap(kgem->fd, bo->mem,
+						     alloc * PAGE_SIZE, false),
+					    alloc)) {
+				free(bo);
+				return NULL;
+			}
+
+			DBG(("%s: created vmap handle=%d for buffer\n",
+			     __FUNCTION__, bo->base.handle));
+
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->base.vmap = true;
+			bo->mmapped = true;
+
+			goto init;
+		}
+	}
+
 	old = NULL;
 	if ((flags & KGEM_BUFFER_WRITE) == 0)
 		old = search_linear_cache(kgem, alloc, 0);
@@ -3561,7 +3697,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	assert(width > 0 && height > 0);
 	assert(ret != NULL);
 	stride = ALIGN(width, 2) * bpp >> 3;
-	stride = ALIGN(stride, kgem->min_alignment);
+	stride = ALIGN(stride, 4);
 
 	DBG(("%s: %dx%d, %d bpp, stride=%d\n",
 	     __FUNCTION__, width, height, bpp, stride));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 30303ce..58316dc 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -127,7 +127,9 @@ struct kgem {
 	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list partial;
 	struct list requests;
+	struct list sync_list;
 	struct kgem_request *next_request;
+	struct kgem_request *sync;
 
 	struct {
 		struct list inactive[NUM_CACHE_BUCKETS];
@@ -142,7 +144,6 @@ struct kgem {
 	uint16_t max_batch_size;
 
 	uint32_t flush:1;
-	uint32_t sync:1;
 	uint32_t need_expire:1;
 	uint32_t need_purge:1;
 	uint32_t need_retire:1;
@@ -154,7 +155,6 @@ struct kgem {
 	uint32_t has_relaxed_fencing :1;
 	uint32_t has_semaphores :1;
 	uint32_t has_llc :1;
-	uint32_t has_cpu_bo :1;
 
 	uint16_t fence_max;
 	uint16_t half_cpu_cache_pages;
@@ -229,6 +229,11 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int bpp,
 			       int tiling,
 			       uint32_t flags);
+struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
+				   int width,
+				   int height,
+				   int bpp,
+				   uint32_t flags);
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
 void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
@@ -359,6 +364,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
 void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
 void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_set_sync(struct kgem *kgem, struct kgem_bo *bo);
 uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
 
 Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 00fc80a..c772d7d 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -115,11 +115,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "sna_damage.h"
 #include "sna_render.h"
 
-#ifndef CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-#define FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER 1
-#define CREATE_PIXMAP_USAGE_SCRATCH_HEADER (unsigned)-1
-#endif
-
 #define SNA_CURSOR_X			64
 #define SNA_CURSOR_Y			SNA_CURSOR_X
 
@@ -226,13 +221,14 @@ struct sna {
 #define SNA_NO_THROTTLE		0x1
 #define SNA_NO_DELAYED_FLUSH	0x2
 
+	unsigned flush;
+
 	int timer[NUM_TIMERS];
 	uint16_t timer_active;
 	uint16_t timer_ready;
 
 	int vblank_interval;
 
-	struct list deferred_free;
 	struct list dirty_pixmaps;
 	struct list active_pixmaps;
 	struct list inactive_clock[2];
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8fa59a6..52e75c7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -44,6 +44,7 @@
 #include <fbpict.h>
 #endif
 #include <miline.h>
+#include <shmint.h>
 
 #include <sys/time.h>
 #include <sys/mman.h>
@@ -61,7 +62,8 @@
 #define USE_INPLACE 1
 #define USE_WIDE_SPANS 1 /* -1 force CPU, 1 force GPU */
 #define USE_ZERO_SPANS 1 /* -1 force CPU, 1 force GPU */
-#define USE_BO_FOR_SCRATCH_PIXMAP 1
+#define USE_SHM_VMAP 0
+#define PREFER_VMAP 0
 
 #define MIGRATE_ALL 0
 
@@ -302,27 +304,21 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-	if ((sna->kgem.has_cpu_bo || (priv->create & KGEM_CAN_CREATE_GPU) == 0) &&
-	    (priv->create & KGEM_CAN_CREATE_CPU)) {
+	if (priv->create & KGEM_CAN_CREATE_CPU) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
-		priv->cpu_bo = kgem_create_2d(&sna->kgem,
-					      pixmap->drawable.width,
-					      pixmap->drawable.height,
-					      pixmap->drawable.bitsPerPixel,
-					      I915_TILING_NONE,
-					      from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
+		priv->cpu_bo = kgem_create_cpu_2d(&sna->kgem,
+						  pixmap->drawable.width,
+						  pixmap->drawable.height,
+						  pixmap->drawable.bitsPerPixel,
+						  from_gpu ? 0 : CREATE_CPU_MAP | CREATE_INACTIVE);
 		DBG(("%s: allocated CPU handle=%d\n", __FUNCTION__,
 		     priv->cpu_bo->handle));
 
 		if (priv->cpu_bo) {
 			priv->ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo);
-			if (priv->ptr == NULL) {
-				kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-				priv->cpu_bo = NULL;
-			} else
-				priv->stride = priv->cpu_bo->pitch;
+			priv->stride = priv->cpu_bo->pitch;
 		}
 	}
 
@@ -349,7 +345,8 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
 		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
-
+		if (priv->cpu_bo->sync)
+			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
 	} else
@@ -377,14 +374,6 @@ static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
 	if (priv->ptr)
 		sna_pixmap_free_cpu(sna, priv);
 
-	if (priv->cpu_bo) {
-		if (priv->cpu_bo->vmap && kgem_bo_is_busy(priv->cpu_bo)) {
-			list_add_tail(&priv->list, &sna->deferred_free);
-			return false;
-		}
-		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-	}
-
 	if (!sna->freed_pixmap && priv->header) {
 		sna->freed_pixmap = pixmap;
 		assert(priv->ptr == NULL);
@@ -524,7 +513,6 @@ _sna_pixmap_reset(PixmapPtr pixmap)
 
 	assert(pixmap->drawable.type == DRAWABLE_PIXMAP);
 	assert(pixmap->drawable.class == 0);
-	assert(pixmap->drawable.id == 0);
 	assert(pixmap->drawable.x == 0);
 	assert(pixmap->drawable.y == 0);
 
@@ -561,11 +549,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 	     pixmap->usage_hint));
 
 	switch (pixmap->usage_hint) {
-	case CREATE_PIXMAP_USAGE_SCRATCH_HEADER:
-#if !FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-		if (sna->kgem.has_vmap)
-			break;
-#endif
 	case CREATE_PIXMAP_USAGE_GLYPH_PICTURE:
 		DBG(("%s: not attaching due to crazy usage: %d\n",
 		     __FUNCTION__, pixmap->usage_hint));
@@ -594,15 +577,6 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 		       pixmap->drawable.width,
 		       pixmap->drawable.height);
 
-	if (pixmap->usage_hint == CREATE_PIXMAP_USAGE_SCRATCH_HEADER) {
-		priv->cpu_bo = kgem_create_map(&sna->kgem,
-					       pixmap->devPrivate.ptr,
-					       pixmap_size(pixmap),
-					       0);
-		if (priv->cpu_bo)
-			priv->cpu_bo->pitch = pixmap->devKind;
-	}
-
 	return priv;
 }
 
@@ -630,6 +604,75 @@ create_pixmap(struct sna *sna, ScreenPtr screen,
 }
 
 static PixmapPtr
+sna_pixmap_create_shm(ScreenPtr screen,
+		      int width, int height, int depth,
+		      char *addr)
+{
+	struct sna *sna = to_sna_from_screen(screen);
+	int bpp = BitsPerPixel(depth);
+	int pitch = PixmapBytePad(width, depth);
+	struct sna_pixmap *priv;
+	PixmapPtr pixmap;
+
+	DBG(("%s(%d, %d, %d)\n", __FUNCTION__,
+	     width, height, depth));
+
+	if (sna->freed_pixmap) {
+		pixmap = sna->freed_pixmap;
+		sna->freed_pixmap = NULL;
+
+		pixmap->usage_hint = -1;
+		pixmap->refcnt = 1;
+
+		pixmap->drawable.width = width;
+		pixmap->drawable.height = height;
+		pixmap->drawable.depth = depth;
+		pixmap->drawable.bitsPerPixel = bpp;
+		pixmap->drawable.serialNumber = NEXT_SERIAL_NUMBER;
+
+		DBG(("%s: serial=%ld, %dx%d\n",
+		     __FUNCTION__,
+		     pixmap->drawable.serialNumber,
+		     pixmap->drawable.width,
+		     pixmap->drawable.height));
+
+		priv = _sna_pixmap_reset(pixmap);
+	} else {
+		pixmap = create_pixmap(sna, screen, 0, 0, depth, -1);
+		if (pixmap == NullPixmap)
+			return NullPixmap;
+
+		pixmap->drawable.width = width;
+		pixmap->drawable.height = height;
+		pixmap->drawable.depth = depth;
+		pixmap->drawable.bitsPerPixel = bpp;
+
+		priv = __sna_pixmap_attach(sna, pixmap);
+		if (!priv) {
+			fbDestroyPixmap(pixmap);
+			return NullPixmap;
+		}
+	}
+
+	priv->cpu_bo = kgem_create_map(&sna->kgem, addr, pitch*height, false);
+	if (priv->cpu_bo == NULL) {
+		free(priv);
+		fbDestroyPixmap(pixmap);
+		return GetScratchPixmapHeader(screen, width, height, depth,
+					      bpp, pitch, addr);
+	}
+	kgem_bo_set_sync(&sna->kgem, priv->cpu_bo);
+	priv->cpu_bo->pitch = pitch;
+
+	priv->header = true;
+	sna_damage_all(&priv->cpu_damage, width, height);
+
+	pixmap->devKind = pitch;
+	pixmap->devPrivate.ptr = addr;
+	return pixmap;
+}
+
+static PixmapPtr
 sna_pixmap_create_scratch(ScreenPtr screen,
 			  int width, int height, int depth,
 			  uint32_t tiling)
@@ -723,6 +766,11 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
+	if ((width|height) == 0) {
+		usage = -1;
+		goto fallback;
+	}
+
 	if (!sna->have_render)
 		goto fallback;
 
@@ -733,11 +781,6 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		goto fallback;
 	}
 
-#if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
-	if (width == 0 || height == 0)
-		goto fallback;
-#endif
-
 	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
 		if (flags & KGEM_CAN_CREATE_GPU)
 			return sna_pixmap_create_scratch(screen,
@@ -919,14 +962,16 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 			sna_damage_destroy(&priv->cpu_damage);
 			priv->undamaged = false;
 			list_del(&priv->list);
-			if (priv->cpu_bo)
+			if (priv->cpu_bo) {
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
+			}
 
 			return true;
 		}
 
 skip_inplace_map:
-		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
+		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo)) {
 			if (priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 
@@ -941,6 +986,7 @@ skip_inplace_map:
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -982,7 +1028,7 @@ skip_inplace_map:
 	}
 
 	if (priv->clear) {
-		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+		if (priv->cpu_bo && !priv->cpu_bo->sync && kgem_bo_is_busy(priv->cpu_bo))
 			sna_pixmap_free_cpu(sna, priv);
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = true;
@@ -1282,7 +1328,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			}
 		}
 
-		if (priv->cpu_bo && !priv->cpu_bo->vmap) {
+		if (priv->cpu_bo && !priv->cpu_bo->sync) {
 			if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 				kgem_retire(&sna->kgem);
 			if (sync_will_stall(priv->cpu_bo)) {
@@ -1372,7 +1418,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 			assert(pixmap_contains_damage(pixmap, priv->gpu_damage));
 
 			ok = FALSE;
-			if (priv->cpu_bo && sna->kgem.gen >= 60)
+			if (priv->cpu_bo && sna->kgem.gen >= 30)
 				ok = sna->render.copy_boxes(sna, GXcopy,
 							    pixmap, priv->gpu_bo, 0, 0,
 							    pixmap, priv->cpu_bo, 0, 0,
@@ -1910,6 +1956,9 @@ use_cpu_bo:
 	if (priv->cpu_bo == NULL)
 		return NULL;
 
+	if (priv->cpu_bo->sync && !kgem_bo_is_busy(priv->cpu_bo))
+		return NULL;
+
 	/* Continue to use the shadow pixmap once mapped */
 	if (pixmap->devPrivate.ptr) {
 		/* But only if we do not need to sync the CPU bo */
@@ -2084,6 +2133,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		sna_damage_destroy(&priv->cpu_damage);
 		priv->undamaged = false;
 		list_del(&priv->list);
+		assert(!priv->cpu_bo->sync);
 		sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv);
 	}
 
@@ -2098,8 +2148,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	BoxPtr box;
 	int n;
 
-	assert(pixmap->usage_hint != CREATE_PIXMAP_USAGE_SCRATCH_HEADER);
-
 	DBG(("%s(pixmap=%ld, usage=%d)\n",
 	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
 
@@ -2209,8 +2257,10 @@ done:
 			      pixmap->drawable.height);
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
-		if (priv->ptr)
+		if (priv->ptr) {
+			assert(!priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
+		}
 	}
 active:
 	return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
@@ -2410,8 +2460,6 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	struct kgem_bo *src_bo;
-	Bool ok = FALSE;
 	BoxPtr box;
 	int nbox;
 	int16_t dx, dy;
@@ -2423,14 +2471,16 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	     __FUNCTION__, nbox,
 	     box->x1, box->y1, box->x2, box->y2));
 
+	if (gc->alu != GXcopy)
+		return FALSE;
+
 	if (priv->gpu_bo == NULL &&
 	    !sna_pixmap_create_mappable_gpu(pixmap))
 		return FALSE;
 
 	assert(priv->gpu_bo);
 
-	if (gc->alu == GXcopy &&
-	    !priv->pinned && nbox == 1 &&
+	if (!priv->pinned && nbox == 1 &&
 	    box->x1 <= 0 && box->y1 <= 0 &&
 	    box->x2 >= pixmap->drawable.width &&
 	    box->y2 >= pixmap->drawable.height)
@@ -2440,25 +2490,10 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	x += dx + drawable->x;
 	y += dy + drawable->y;
 
-	src_bo = kgem_create_map(&sna->kgem, bits, stride*h, 1);
-	if (src_bo) {
-		src_bo->pitch = stride;
-		ok = sna->render.copy_boxes(sna, gc->alu,
-					    pixmap, src_bo, -x, -y,
-					    pixmap, priv->gpu_bo, 0, 0,
-					    box, nbox);
-		kgem_bo_destroy(&sna->kgem, src_bo);
-	}
-
-	if (!ok && gc->alu == GXcopy)
-		ok = sna_write_boxes(sna, pixmap,
-				     priv->gpu_bo, 0, 0,
-				     bits,
-				     stride,
-				     -x, -y,
-				     box, nbox);
-
-	return ok;
+	return sna_write_boxes(sna, pixmap,
+			       priv->gpu_bo, 0, 0,
+			       bits, stride, -x, -y,
+			       box, nbox);
 }
 
 static bool upload_inplace(struct sna *sna,
@@ -2561,7 +2596,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (sync_will_stall(priv->cpu_bo) && priv->cpu_bo->exec == NULL)
 			kgem_retire(&sna->kgem);
 		if (sync_will_stall(priv->cpu_bo)) {
-			if (priv->cpu_bo->vmap) {
+			if (priv->cpu_bo->sync) {
 				if (sna_put_image_upload_blt(drawable, gc, region,
 							     x, y, w, h, bits, stride)) {
 					if (!DAMAGE_IS_ALL(priv->gpu_damage)) {
@@ -2603,6 +2638,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 					list_del(&priv->list);
 					priv->undamaged = false;
 				}
+				assert(!priv->cpu_bo->sync);
 				sna_pixmap_free_cpu(sna, priv);
 			}
 		}
@@ -3236,6 +3272,22 @@ static bool copy_use_gpu_bo(struct sna *sna,
 	return kgem_bo_is_busy(priv->cpu_bo);
 }
 
+static bool
+copy_use_cpu_bo(struct sna_pixmap *priv, struct kgem_bo *dst_bo)
+{
+	if (priv == NULL || priv->cpu_bo == NULL)
+		return false;
+
+	if (PREFER_VMAP) {
+		return true;
+	} else {
+		if (kgem_bo_is_busy(priv->cpu_bo) || kgem_bo_is_busy(dst_bo))
+			return true;
+
+		return !priv->cpu_bo->sync;
+	}
+}
+
 static void
 sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	       BoxPtr box, int n,
@@ -3433,7 +3485,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 					RegionTranslate(&region, -dst_dx, -dst_dy);
 				}
 			}
-		} else if (src_priv && src_priv->cpu_bo) {
+		} else if (copy_use_cpu_bo(src_priv, dst_priv->gpu_bo)) {
 			if (!sna->render.copy_boxes(sna, alu,
 						    src_pixmap, src_priv->cpu_bo, src_dx, src_dy,
 						    dst_pixmap, dst_priv->gpu_bo, dst_dx, dst_dy,
@@ -11445,17 +11497,30 @@ static Bool sna_change_window_attributes(WindowPtr win, unsigned long mask)
 }
 
 static void
+sna_accel_reply_callback(CallbackListPtr *list,
+			 pointer user_data, pointer call_data)
+{
+	struct sna *sna = user_data;
+	ReplyInfoRec *info = call_data;
+
+	if (sna->flush || !info->startOfReply)
+		return;
+
+	sna->flush = sna->kgem.flush || sna->kgem.sync;
+}
+
+static void
 sna_accel_flush_callback(CallbackListPtr *list,
 			 pointer user_data, pointer call_data)
 {
 	struct sna *sna = user_data;
 	struct list preserve;
 
-	if ((sna->kgem.sync|sna->kgem.flush) == 0 &&
-	    list_is_empty(&sna->dirty_pixmaps))
+	if (!sna->flush)
 		return;
 
-	DBG(("%s\n", __FUNCTION__));
+	DBG(("%s: need_sync=%d, need_flush=%d, dirty? %d\n", __FUNCTION__,
+	     sna->kgem.sync!=NULL, sna->kgem.flush, !list_is_empty(&sna->dirty_pixmaps)));
 
 	/* flush any pending damage from shadow copies to tfp clients */
 	list_init(&preserve);
@@ -11476,35 +11541,9 @@ sna_accel_flush_callback(CallbackListPtr *list,
 	kgem_submit(&sna->kgem);
 	sna->kgem.flush_now = 0;
 
-	if (sna->kgem.sync) {
-		kgem_sync(&sna->kgem);
+	kgem_sync(&sna->kgem);
 
-		while (!list_is_empty(&sna->deferred_free)) {
-			struct sna_pixmap *priv =
-				list_first_entry(&sna->deferred_free,
-						 struct sna_pixmap,
-						 list);
-			list_del(&priv->list);
-			kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-			fbDestroyPixmap(priv->pixmap);
-			free(priv);
-		}
-	}
-}
-
-static void sna_deferred_free(struct sna *sna)
-{
-	struct sna_pixmap *priv, *next;
-
-	list_for_each_entry_safe(priv, next, &sna->deferred_free, list) {
-		if (kgem_bo_is_busy(priv->cpu_bo))
-			continue;
-
-		list_del(&priv->list);
-		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
-		fbDestroyPixmap(priv->pixmap);
-		free(priv);
-	}
+	sna->flush = false;
 }
 
 static struct sna_pixmap *sna_accel_scanout(struct sna *sna)
@@ -11768,6 +11807,7 @@ static void sna_accel_inactive(struct sna *sna)
 			sna_damage_destroy(&priv->cpu_damage);
 			list_del(&priv->list);
 
+			assert(!priv->cpu_bo->sync);
 			sna_pixmap_free_cpu(sna, priv);
 			priv->undamaged = false;
 
@@ -11819,10 +11859,14 @@ Bool sna_accel_pre_init(struct sna *sna)
 	return TRUE;
 }
 
+static ShmFuncs shm_funcs = { sna_pixmap_create_shm, NULL };
+
 Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 {
 	const char *backend;
 
+	if (!AddCallback(&ReplyCallback, sna_accel_reply_callback, sna))
+		return FALSE;
 	if (!AddCallback(&FlushCallback, sna_accel_flush_callback, sna))
 		return FALSE;
 
@@ -11830,7 +11874,6 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	screen->RealizeFont = sna_realize_font;
 	screen->UnrealizeFont = sna_unrealize_font;
 
-	list_init(&sna->deferred_free);
 	list_init(&sna->dirty_pixmaps);
 	list_init(&sna->active_pixmaps);
 	list_init(&sna->inactive_clock[0]);
@@ -11866,6 +11909,9 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
 	}
 #endif
 
+	if (USE_SHM_VMAP && sna->kgem.has_vmap)
+		ShmRegisterFuncs(screen, &shm_funcs);
+
 	backend = "no";
 	sna->have_render = false;
 	sna->default_tiling = I915_TILING_X;
@@ -11933,6 +11979,7 @@ void sna_accel_close(struct sna *sna)
 	sna_glyphs_close(sna);
 
 	DeleteCallback(&FlushCallback, sna_accel_flush_callback, sna);
+	DeleteCallback(&ReplyCallback, sna_accel_reply_callback, sna);
 
 	kgem_cleanup_cache(&sna->kgem);
 }
@@ -11976,8 +12023,6 @@ void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 	for (id = 0; id < NUM_TIMERS; id++)
 		if (active & (1 << id) && FD_ISSET(sna->timer[id], ready))
 			sna->timer_ready |= 1 << id;
-
-	sna_deferred_free(sna);
 }
 
 void sna_accel_free(struct sna *sna)
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index a9ec899..8c51a77 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -907,7 +907,7 @@ prepare_blt_clear(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	op->blt   = blt_composite_fill;
+	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
 		op->boxes = blt_composite_fill_boxes;
@@ -915,7 +915,7 @@ prepare_blt_clear(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = nop_done;
+	op->done = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt,
 				 op->dst.bo,
@@ -930,7 +930,7 @@ prepare_blt_fill(struct sna *sna,
 {
 	DBG(("%s\n", __FUNCTION__));
 
-	op->blt   = blt_composite_fill;
+	op->blt = blt_composite_fill;
 	if (op->dst.x|op->dst.y) {
 		op->box   = blt_composite_fill_box;
 		op->boxes = blt_composite_fill_boxes;
@@ -938,7 +938,7 @@ prepare_blt_fill(struct sna *sna,
 		op->box   = blt_composite_fill_box_no_offset;
 		op->boxes = blt_composite_fill_boxes_no_offset;
 	}
-	op->done  = nop_done;
+	op->done = nop_done;
 
 	return sna_blt_fill_init(sna, &op->u.blt, op->dst.bo,
 				 op->dst.pixmap->drawable.bitsPerPixel,
@@ -1126,9 +1126,9 @@ prepare_blt_copy(struct sna *sna,
 	DBG(("%s\n", __FUNCTION__));
 
 	if (sna->kgem.gen >= 60)
-		op->done  = gen6_blt_copy_done;
+		op->done = gen6_blt_copy_done;
 	else
-		op->done  = nop_done;
+		op->done = nop_done;
 
 	if (alpha_fixup) {
 		op->blt   = blt_composite_copy_with_alpha;
@@ -1153,14 +1153,6 @@ prepare_blt_copy(struct sna *sna,
 	}
 }
 
-static void blt_vmap_done(struct sna *sna, const struct sna_composite_op *op)
-{
-	struct kgem_bo *bo = (struct kgem_bo *)op->u.blt.src_pixmap;
-
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-}
-
 fastcall static void
 blt_put_composite(struct sna *sna,
 		  const struct sna_composite_op *op,
@@ -1395,26 +1387,18 @@ prepare_blt_put(struct sna *sna,
 		uint32_t alpha_fixup)
 {
 	PixmapPtr src = op->u.blt.src_pixmap;
-	struct sna_pixmap *priv = sna_pixmap_attach(src);
-	struct kgem_bo *src_bo = NULL;
-	struct kgem_bo *free_bo = NULL;
+	struct sna_pixmap *priv;
+	struct kgem_bo *src_bo;
 
 	DBG(("%s\n", __FUNCTION__));
 
-	if (priv) {
+	op->done = nop_done;
+
+	src_bo = NULL;
+	priv = _sna_pixmap_attach(src);
+	if (priv)
 		src_bo = priv->cpu_bo;
-	} else {
-		src_bo = kgem_create_map(&sna->kgem,
-					 src->devPrivate.ptr,
-					 pixmap_size(src),
-					 0);
-		free_bo = src_bo;
-	}
 	if (src_bo) {
-		op->u.blt.src_pixmap = (void *)free_bo;
-		op->done = blt_vmap_done;
-
-		src_bo->pitch = src->devKind;
 		if (alpha_fixup) {
 			op->blt   = blt_composite_copy_with_alpha;
 			op->box   = blt_composite_copy_box_with_alpha;
@@ -1435,12 +1419,15 @@ prepare_blt_put(struct sna *sna,
 						 GXcopy);
 		}
 	} else {
-		if (alpha_fixup)
-			return FALSE; /* XXX */
-
 		if (!sna_pixmap_move_to_cpu(src, MOVE_READ))
 			return FALSE;
 
+		assert(src->devKind);
+		assert(src->devPrivate.ptr);
+
+		if (alpha_fixup)
+			return FALSE; /* XXX */
+
 		if (alpha_fixup) {
 			op->u.blt.pixel = alpha_fixup;
 			op->blt   = blt_put_composite_with_alpha;
@@ -1451,7 +1438,6 @@ prepare_blt_put(struct sna *sna,
 			op->box   = blt_put_composite_box;
 			op->boxes = blt_put_composite_boxes;
 		}
-		op->done  = nop_done;
 	}
 
 	return TRUE;
commit 272f5d9f8407d8084846b429c1722bddb3e861e9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Mar 1 11:10:03 2012 +0000

    sna: Discard use of inplace GTT uploads on LLC architectures
    
    As the buffer is cache-coherent, we can read as well as write to any
    partial buffer so the distinction is irrelevant.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ee81c1b..3ba2ec3 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1171,7 +1171,6 @@ static void kgem_retire_partials(struct kgem *kgem)
 		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
 		     bo->base.handle, bo->used, bytes(&bo->base)));
 
-		assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
 		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
 		bo->base.dirty = false;
 		bo->base.needs_flush = false;
@@ -1377,7 +1376,6 @@ static void kgem_finish_partials(struct kgem *kgem)
 			continue;
 
 		if (bo->mmapped) {
-			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
 			assert(!bo->need_io);
 			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
@@ -3242,10 +3240,14 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	/* we should never be asked to create anything TOO large */
 	assert(size <= kgem->max_cpu_size);
 
+	if (kgem->has_llc)
+		flags &= ~KGEM_BUFFER_WRITE_INPLACE;
+
 	list_for_each_entry(bo, &kgem->partial, base.list) {
 		/* We can reuse any write buffer which we can fit */
 		if (flags == KGEM_BUFFER_LAST &&
-		    bo->write == KGEM_BUFFER_WRITE && bo->base.exec &&
+		    bo->write == KGEM_BUFFER_WRITE &&
+		    bo->base.exec && !bo->mmapped &&
 		    size <= bytes(&bo->base)) {
 			assert(bo->base.refcnt == 1);
 			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
commit 43b1a717bae047c7ebbf99e6fa4c03b7a67896b8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 13:37:14 2012 +0000

    sna: Sort the partial buffers after stealing a write buffer
    
    It will be decoupled and not used again, but this keeps the sanity
    checks happy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3dd7863..ee81c1b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3254,7 +3254,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				  0, bo->used, bo->mem);
 			bo->need_io = 0;
 			bo->write = 0;
-			offset = bo->used = 0;
+			offset = 0;
+			bo->used = size;
+			bubble_sort_partial(kgem, bo);
 			goto done;
 		}
 
commit 8198e5872c3771e2aefabe1e3e93afa94d2ea0ec
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 28 10:42:19 2012 +0000

    sna/gen3: Tweak glyph rendering fast paths
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 8f597cf..bd1eddd 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -524,6 +524,31 @@ gen3_emit_composite_primitive_identity_source(struct sna *sna,
 }
 
 fastcall static void
+gen3_emit_composite_primitive_identity_source_no_offset(struct sna *sna,
+							const struct sna_composite_op *op,
+							const struct sna_composite_rectangles *r)
+{
+	float w = r->width;
+	float h = r->height;
+	float *v;
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	v[8] = v[4] = r->dst.x;
+	v[9] = r->dst.y;
+
+	v[0] = v[4] + w;
+	v[5] = v[1] = v[9] + h;
+
+	v[10] = v[6] = r->src.x * op->src.scale[0];
+	v[11] = r->src.y * op->src.scale[1];
+
+	v[2] = v[6] + w * op->src.scale[0];
+	v[7] = v[3] = v[11] + h * op->src.scale[1];
+}
+
+fastcall static void
 gen3_emit_composite_primitive_affine_source(struct sna *sna,
 					    const struct sna_composite_op *op,
 					    const struct sna_composite_rectangles *r)
@@ -584,6 +609,31 @@ gen3_emit_composite_primitive_constant_identity_mask(struct sna *sna,
 }
 
 fastcall static void
+gen3_emit_composite_primitive_constant_identity_mask_no_offset(struct sna *sna,
+							       const struct sna_composite_op *op,
+							       const struct sna_composite_rectangles *r)
+{
+	float w = r->width;
+	float h = r->height;
+	float *v;
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 12;
+
+	v[8] = v[4] = r->dst.x;
+	v[9] = r->dst.y;
+
+	v[0] = v[4] + w;
+	v[5] = v[1] = v[9] + h;
+
+	v[10] = v[6] = r->mask.x * op->mask.scale[0];
+	v[11] = r->mask.y * op->mask.scale[1];
+
+	v[2] = v[6] + w * op->mask.scale[0];
+	v[7] = v[3] = v[11] + h * op->mask.scale[1];
+}
+
+fastcall static void
 gen3_emit_composite_primitive_identity_source_mask(struct sna *sna,
 						   const struct sna_composite_op *op,
 						   const struct sna_composite_rectangles *r)
@@ -2831,17 +2881,23 @@ gen3_render_composite(struct sna *sna,
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_gradient;
 			break;
 		case SHADER_TEXTURE:
-			if (tmp->src.transform == NULL)
-				tmp->prim_emit = gen3_emit_composite_primitive_identity_source;
-			else if (tmp->src.is_affine)
+			if (tmp->src.transform == NULL) {
+				if ((tmp->src.offset[0]|tmp->src.offset[1]|tmp->dst.x|tmp->dst.y) == 0)
+					tmp->prim_emit = gen3_emit_composite_primitive_identity_source_no_offset;
+				else
+					tmp->prim_emit = gen3_emit_composite_primitive_identity_source;
+			} else if (tmp->src.is_affine)
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_source;
 			break;
 		}
 	} else if (tmp->mask.u.gen3.type == SHADER_TEXTURE) {
 		if (tmp->mask.transform == NULL) {
-			if (is_constant_ps(tmp->src.u.gen3.type))
-				tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask;
-			else if (tmp->src.transform == NULL)
+			if (is_constant_ps(tmp->src.u.gen3.type)) {
+				if ((tmp->mask.offset[0]|tmp->mask.offset[1]|tmp->dst.x|tmp->dst.y) == 0)
+					tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask_no_offset;
+				else
+					tmp->prim_emit = gen3_emit_composite_primitive_constant_identity_mask;
+			} else if (tmp->src.transform == NULL)
 				tmp->prim_emit = gen3_emit_composite_primitive_identity_source_mask;
 			else if (tmp->src.is_affine)
 				tmp->prim_emit = gen3_emit_composite_primitive_affine_source_mask;
commit 3c4f29820bca336af2c997bafc7ef288b455813c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 27 16:29:38 2012 +0000

    uxa/gen3: Remove special casing of solid pictures
    
    Fixes use of alpha-groups and opacity masks in cairo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/i915_render.c b/src/i915_render.c
index 87d2336..6210035 100644
--- a/src/i915_render.c
+++ b/src/i915_render.c
@@ -257,11 +257,8 @@ i915_check_composite_texture(ScreenPtr screen, PicturePtr picture)
 		return FALSE;
 	}
 
-	if (picture->pSourcePict) {
-		SourcePict *source = picture->pSourcePict;
-		if (source->type == SourcePictTypeSolidFill)
-			return TRUE;
-	}
+	if (picture->pSourcePict)
+		return FALSE;
 
 	if (picture->pDrawable) {
 		int w, h, i;
@@ -387,23 +384,6 @@ static Bool i915_texture_setup(PicturePtr picture, PixmapPtr pixmap, int unit)
 }
 
 static void
-i915_emit_composite_primitive_constant(intel_screen_private *intel,
-				       int srcX, int srcY,
-				       int maskX, int maskY,
-				       int dstX, int dstY,
-				       int w, int h)
-{
-	OUT_VERTEX(dstX + w);
-	OUT_VERTEX(dstY + h);
-
-	OUT_VERTEX(dstX);
-	OUT_VERTEX(dstY + h);
-
-	OUT_VERTEX(dstX);
-	OUT_VERTEX(dstY);
-}
-
-static void
 i915_emit_composite_primitive_identity_source(intel_screen_private *intel,
 					      int srcX, int srcY,
 					      int maskX, int maskY,
@@ -470,29 +450,6 @@ i915_emit_composite_primitive_affine_source(intel_screen_private *intel,
 }
 
 static void
-i915_emit_composite_primitive_constant_identity_mask(intel_screen_private *intel,
-						     int srcX, int srcY,
-						     int maskX, int maskY,
-						     int dstX, int dstY,
-						     int w, int h)
-{
-	OUT_VERTEX(dstX + w);
-	OUT_VERTEX(dstY + h);
-	OUT_VERTEX((maskX + w) * intel->scale_units[0][0]);
-	OUT_VERTEX((maskY + h) * intel->scale_units[0][1]);
-
-	OUT_VERTEX(dstX);
-	OUT_VERTEX(dstY + h);
-	OUT_VERTEX(maskX * intel->scale_units[0][0]);
-	OUT_VERTEX((maskY + h) * intel->scale_units[0][1]);
-
-	OUT_VERTEX(dstX);
-	OUT_VERTEX(dstY);
-	OUT_VERTEX(maskX * intel->scale_units[0][0]);
-	OUT_VERTEX(maskY * intel->scale_units[0][1]);
-}
-
-static void
 i915_emit_composite_primitive_identity_source_mask(intel_screen_private *intel,
 						   int srcX, int srcY,
 						   int maskX, int maskY,
@@ -536,63 +493,61 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 
 	per_vertex = 2;		/* dest x/y */
 
-	if (! intel->render_source_is_solid) {
-		src_unit = tex_unit++;
-
-		is_affine_src = intel_transform_is_affine(intel->transform[src_unit]);
-		if (is_affine_src) {
-			if (!intel_get_transformed_coordinates(srcX, srcY,
-							      intel->
-							      transform[src_unit],
-							      &src_x[0],
-							      &src_y[0]))
-				return;
-
-			if (!intel_get_transformed_coordinates(srcX, srcY + h,
-							      intel->
-							      transform[src_unit],
-							      &src_x[1],
-							      &src_y[1]))
-				return;
-
-			if (!intel_get_transformed_coordinates(srcX + w, srcY + h,
-							      intel->
-							      transform[src_unit],
-							      &src_x[2],
-							      &src_y[2]))
-				return;
-
-			per_vertex += 2;	/* src x/y */
-		} else {
-			if (!intel_get_transformed_coordinates_3d(srcX, srcY,
-								 intel->
-								 transform[src_unit],
-								 &src_x[0],
-								 &src_y[0],
-								 &src_w[0]))
-				return;
-
-			if (!intel_get_transformed_coordinates_3d(srcX, srcY + h,
-								 intel->
-								 transform[src_unit],
-								 &src_x[1],
-								 &src_y[1],
-								 &src_w[1]))
-				return;
-
-			if (!intel_get_transformed_coordinates_3d(srcX + w, srcY + h,
-								 intel->
-								 transform[src_unit],
-								 &src_x[2],
-								 &src_y[2],
-								 &src_w[2]))
-				return;
-
-			per_vertex += 4;	/* src x/y/z/w */
-		}
+	src_unit = tex_unit++;
+
+	is_affine_src = intel_transform_is_affine(intel->transform[src_unit]);
+	if (is_affine_src) {
+		if (!intel_get_transformed_coordinates(srcX, srcY,
+						      intel->
+						      transform[src_unit],
+						      &src_x[0],
+						      &src_y[0]))
+			return;
+
+		if (!intel_get_transformed_coordinates(srcX, srcY + h,
+						      intel->
+						      transform[src_unit],
+						      &src_x[1],
+						      &src_y[1]))
+			return;
+
+		if (!intel_get_transformed_coordinates(srcX + w, srcY + h,
+						      intel->
+						      transform[src_unit],
+						      &src_x[2],
+						      &src_y[2]))
+			return;
+
+		per_vertex += 2;	/* src x/y */
+	} else {
+		if (!intel_get_transformed_coordinates_3d(srcX, srcY,
+							 intel->
+							 transform[src_unit],
+							 &src_x[0],
+							 &src_y[0],
+							 &src_w[0]))
+			return;
+
+		if (!intel_get_transformed_coordinates_3d(srcX, srcY + h,
+							 intel->
+							 transform[src_unit],
+							 &src_x[1],
+							 &src_y[1],
+							 &src_w[1]))
+			return;
+
+		if (!intel_get_transformed_coordinates_3d(srcX + w, srcY + h,
+							 intel->
+							 transform[src_unit],
+							 &src_x[2],
+							 &src_y[2],
+							 &src_w[2]))
+			return;
+
+		per_vertex += 4;	/* src x/y/z/w */
 	}
 
-	if (intel->render_mask && ! intel->render_mask_is_solid) {
+	if (intel->render_mask) {
 		mask_unit = tex_unit++;
 
 		is_affine_mask = intel_transform_is_affine(intel->transform[mask_unit]);
@@ -650,15 +605,13 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 
 	OUT_VERTEX(dstX + w);
 	OUT_VERTEX(dstY + h);
-	if (! intel->render_source_is_solid) {
-	    OUT_VERTEX(src_x[2] * intel->scale_units[src_unit][0]);
-	    OUT_VERTEX(src_y[2] * intel->scale_units[src_unit][1]);
-	    if (!is_affine_src) {
+	OUT_VERTEX(src_x[2] * intel->scale_units[src_unit][0]);
+	OUT_VERTEX(src_y[2] * intel->scale_units[src_unit][1]);
+	if (!is_affine_src) {
 		OUT_VERTEX(0.0);
 		OUT_VERTEX(src_w[2]);
-	    }
 	}
-	if (intel->render_mask && ! intel->render_mask_is_solid) {
+	if (intel->render_mask) {
 		OUT_VERTEX(mask_x[2] * intel->scale_units[mask_unit][0]);
 		OUT_VERTEX(mask_y[2] * intel->scale_units[mask_unit][1]);
 		if (!is_affine_mask) {
@@ -669,15 +622,13 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 
 	OUT_VERTEX(dstX);
 	OUT_VERTEX(dstY + h);
-	if (! intel->render_source_is_solid) {
-	    OUT_VERTEX(src_x[1] * intel->scale_units[src_unit][0]);
-	    OUT_VERTEX(src_y[1] * intel->scale_units[src_unit][1]);
-	    if (!is_affine_src) {
+	OUT_VERTEX(src_x[1] * intel->scale_units[src_unit][0]);
+	OUT_VERTEX(src_y[1] * intel->scale_units[src_unit][1]);
+	if (!is_affine_src) {
 		OUT_VERTEX(0.0);
 		OUT_VERTEX(src_w[1]);
-	    }
 	}
-	if (intel->render_mask && ! intel->render_mask_is_solid) {
+	if (intel->render_mask) {
 		OUT_VERTEX(mask_x[1] * intel->scale_units[mask_unit][0]);
 		OUT_VERTEX(mask_y[1] * intel->scale_units[mask_unit][1]);
 		if (!is_affine_mask) {
@@ -688,15 +639,13 @@ i915_emit_composite_primitive(intel_screen_private *intel,
 
 	OUT_VERTEX(dstX);
 	OUT_VERTEX(dstY);
-	if (! intel->render_source_is_solid) {
-	    OUT_VERTEX(src_x[0] * intel->scale_units[src_unit][0]);
-	    OUT_VERTEX(src_y[0] * intel->scale_units[src_unit][1]);
-	    if (!is_affine_src) {
+	OUT_VERTEX(src_x[0] * intel->scale_units[src_unit][0]);
+	OUT_VERTEX(src_y[0] * intel->scale_units[src_unit][1]);
+	if (!is_affine_src) {
 		OUT_VERTEX(0.0);
 		OUT_VERTEX(src_w[0]);
-	    }
 	}
-	if (intel->render_mask && ! intel->render_mask_is_solid) {
+	if (intel->render_mask) {
 		OUT_VERTEX(mask_x[0] * intel->scale_units[mask_unit][0]);
 		OUT_VERTEX(mask_y[0] * intel->scale_units[mask_unit][1]);
 		if (!is_affine_mask) {
@@ -729,29 +678,11 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 	intel->render_dest_picture = dest_picture;
 	intel->render_dest = dest;
 
-	intel->render_source_is_solid = FALSE;
-	if (source_picture->pSourcePict) {
-		SourcePict *source = source_picture->pSourcePict;
-		if (source->type == SourcePictTypeSolidFill) {
-			intel->render_source_is_solid = TRUE;
-			intel->render_source_solid = source->solidFill.color;
-		}
-	}
-	if (!intel->render_source_is_solid && !intel_check_pitch_3d(source))
+	if (!intel_check_pitch_3d(source))
 		return FALSE;
 
-	intel->render_mask_is_solid = FALSE;
-	if (mask) {
-		if (mask_picture->pSourcePict) {
-			SourcePict *source = mask_picture->pSourcePict;
-			if (source->type == SourcePictTypeSolidFill) {
-				intel->render_mask_is_solid = TRUE;
-				intel->render_mask_solid = source->solidFill.color;
-			}
-		}
-		if (!intel->render_mask_is_solid && !intel_check_pitch_3d(mask))
-			return FALSE;
-	}
+	if (mask && !intel_check_pitch_3d(mask))
+		return FALSE;
 
 	if (!intel_check_pitch_3d(dest))
 		return FALSE;
@@ -787,31 +718,27 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 	intel->scale_units[1][1] = -1;
 
 	floats_per_vertex = 2;		/* dest x/y */
-	if (! intel->render_source_is_solid) {
-		if (!i915_texture_setup(source_picture, source, tex_unit++)) {
-			intel_debug_fallback(scrn, "fail to setup src texture\n");
-			return FALSE;
-		}
-
-		if (intel_transform_is_affine(source_picture->transform))
-			floats_per_vertex += 2;	/* src x/y */
-		else
-			floats_per_vertex += 4;	/* src x/y/z/w */
+	if (!i915_texture_setup(source_picture, source, tex_unit++)) {
+		intel_debug_fallback(scrn, "fail to setup src texture\n");
+		return FALSE;
 	}
 
-	if (mask != NULL) {
-		if (! intel->render_mask_is_solid) {
-			if (!i915_texture_setup(mask_picture, mask, tex_unit++)) {
-				intel_debug_fallback(scrn,
-						"fail to setup mask texture\n");
-				return FALSE;
-			}
+	if (intel_transform_is_affine(source_picture->transform))
+		floats_per_vertex += 2;	/* src x/y */
+	else
+		floats_per_vertex += 4;	/* src x/y/z/w */
 
-			if (intel_transform_is_affine(mask_picture->transform))
-				floats_per_vertex += 2;	/* mask x/y */
-			else
-				floats_per_vertex += 4;	/* mask x/y/z/w */
+	if (mask != NULL) {
+		if (!i915_texture_setup(mask_picture, mask, tex_unit++)) {
+			intel_debug_fallback(scrn,
+					     "fail to setup mask texture\n");
+			return FALSE;
 		}
+
+		if (intel_transform_is_affine(mask_picture->transform))
+			floats_per_vertex += 2;	/* mask x/y */
+		else
+			floats_per_vertex += 4;	/* mask x/y/z/w */
 	}
 
 	intel->i915_render_state.op = op;
@@ -827,17 +754,13 @@ i915_prepare_composite(int op, PicturePtr source_picture,
 
 	intel->prim_emit = i915_emit_composite_primitive;
 	if (!mask) {
-		if (intel->render_source_is_solid)
-			intel->prim_emit = i915_emit_composite_primitive_constant;
-		else if (intel->transform[0] == NULL)
+		if (intel->transform[0] == NULL)
 			intel->prim_emit = i915_emit_composite_primitive_identity_source;
 		else if (intel_transform_is_affine(intel->transform[0]))
 			intel->prim_emit = i915_emit_composite_primitive_affine_source;
 	} else {
 		if (intel->transform[0] == NULL) {
-			if (intel->render_source_is_solid)
-				intel->prim_emit = i915_emit_composite_primitive_constant_identity_mask;
-			else if (intel->transform[1] == NULL)
+			if (intel->transform[1] == NULL)
 				intel->prim_emit = i915_emit_composite_primitive_identity_source_mask;
 		}
 	}
@@ -856,39 +779,25 @@ i915_composite_emit_shader(intel_screen_private *intel, CARD8 op)
 	PicturePtr mask_picture = intel->render_mask_picture;
 	PixmapPtr mask = intel->render_mask;
 	int src_reg, mask_reg;
-	Bool is_solid_src, is_solid_mask;
 	Bool dest_is_alpha = PIXMAN_FORMAT_RGB(intel->render_dest_picture->format) == 0;
-	int tex_unit, t;
 	FS_LOCALS();
 
-	is_solid_src = intel->render_source_is_solid;
-	is_solid_mask = intel->render_mask_is_solid;
-
 	FS_BEGIN();
 
 	/* Declare the registers necessary for our program.  */
-	t = 0;
-	if (is_solid_src) {
-		i915_fs_dcl(FS_T8);
-		src_reg = FS_T8;
-	} else {
-		i915_fs_dcl(FS_T0);
-		i915_fs_dcl(FS_S0);
-		t++;
-	}
+	i915_fs_dcl(FS_T0);
+	i915_fs_dcl(FS_S0);
 	if (!mask) {
 		/* No mask, so load directly to output color */
-		if (! is_solid_src) {
-			if (dest_is_alpha)
-				src_reg = FS_R0;
-			else
-				src_reg = FS_OC;
+		if (dest_is_alpha)
+			src_reg = FS_R0;
+		else
+			src_reg = FS_OC;
 
-			if (intel_transform_is_affine(intel->transform[0]))
-				i915_fs_texld(src_reg, FS_S0, FS_T0);
-			else
-				i915_fs_texldp(src_reg, FS_S0, FS_T0);
-		}
+		if (intel_transform_is_affine(intel->transform[0]))
+			i915_fs_texld(src_reg, FS_S0, FS_T0);
+		else
+			i915_fs_texldp(src_reg, FS_S0, FS_T0);
 
 		if (src_reg != FS_OC) {
 			if (dest_is_alpha)
@@ -897,35 +806,24 @@ i915_composite_emit_shader(intel_screen_private *intel, CARD8 op)
 				i915_fs_mov(FS_OC, i915_fs_operand_reg(src_reg));
 		}
 	} else {
-		if (is_solid_mask) {
-			i915_fs_dcl(FS_T9);
-			mask_reg = FS_T9;
-		} else {
-			i915_fs_dcl(FS_T0 + t);
-			i915_fs_dcl(FS_S0 + t);
-		}
+		i915_fs_dcl(FS_T1);
+		i915_fs_dcl(FS_S1);
 
-		tex_unit = 0;
-		if (! is_solid_src) {
-			/* Load the source_picture texel */
-			if (intel_transform_is_affine(intel->transform[tex_unit]))
-				i915_fs_texld(FS_R0, FS_S0, FS_T0);
-			else
-				i915_fs_texldp(FS_R0, FS_S0, FS_T0);
+		/* Load the source_picture texel */
+		if (intel_transform_is_affine(intel->transform[0]))
+			i915_fs_texld(FS_R0, FS_S0, FS_T0);
+		else
+			i915_fs_texldp(FS_R0, FS_S0, FS_T0);
 
-			src_reg = FS_R0;
-			tex_unit++;
-		}
+		src_reg = FS_R0;
 
-		if (! is_solid_mask) {
-			/* Load the mask_picture texel */
-			if (intel_transform_is_affine(intel->transform[tex_unit]))
-				i915_fs_texld(FS_R1, FS_S0 + t, FS_T0 + t);
-			else
-				i915_fs_texldp(FS_R1, FS_S0 + t, FS_T0 + t);
+		/* Load the mask_picture texel */
+		if (intel_transform_is_affine(intel->transform[1]))
+			i915_fs_texld(FS_R1, FS_S1, FS_T1);
+		else
+			i915_fs_texldp(FS_R1, FS_S1, FS_T1);
 
-			mask_reg = FS_R1;
-		}
+		mask_reg = FS_R1;
 
 		if (dest_is_alpha) {
 			i915_fs_mul(FS_OC,
@@ -972,7 +870,6 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 	PicturePtr dest_picture = intel->render_dest_picture;
 	PixmapPtr mask = intel->render_mask;
 	PixmapPtr dest = intel->render_dest;
-	Bool is_solid_src, is_solid_mask;
 	int tex_count, t;
 
 	intel->needs_render_state_emit = FALSE;
@@ -980,12 +877,7 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 	IntelEmitInvarientState(scrn);
 	intel->last_3d = LAST_3D_RENDER;
 
-	is_solid_src = intel->render_source_is_solid;
-	is_solid_mask = intel->render_mask_is_solid;
-
-	tex_count = 0;
-	tex_count += ! is_solid_src;
-	tex_count += mask && ! is_solid_mask;
+	tex_count = 1 + (mask != NULL);
 
 	assert(intel->in_batch_atomic);
 
@@ -1007,15 +899,6 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 	    }
 	}
 
-	if (is_solid_src) {
-	    OUT_BATCH (_3DSTATE_DFLT_DIFFUSE_CMD);
-	    OUT_BATCH (intel->render_source_solid);
-	}
-	if (mask && is_solid_mask) {
-	    OUT_BATCH (_3DSTATE_DFLT_SPEC_CMD);
-	    OUT_BATCH (intel->render_mask_solid);
-	}
-
 	/* BUF_INFO is an implicit flush, so avoid if the target has not changed.
 	 * XXX However for reasons unfathomed, correct rendering in KDE requires
 	 * at least a MI_FLUSH | INHIBIT_RENDER_CACHE_FLUSH here.
@@ -1058,20 +941,15 @@ static void i915_emit_composite_setup(ScrnInfoPtr scrn)
 		uint32_t ss2;
 
 		ss2 = ~0;
-		t = 0;
-		if (! is_solid_src) {
-		    ss2 &= ~S2_TEXCOORD_FMT(t, TEXCOORDFMT_NOT_PRESENT);
-		    ss2 |= S2_TEXCOORD_FMT(t,
-					   intel_transform_is_affine(intel->transform[t]) ?
-					   TEXCOORDFMT_2D : TEXCOORDFMT_4D);
-		    t++;
-		}
-		if (mask && ! is_solid_mask) {
-		    ss2 &= ~S2_TEXCOORD_FMT(t, TEXCOORDFMT_NOT_PRESENT);
-		    ss2 |= S2_TEXCOORD_FMT(t,
-					   intel_transform_is_affine(intel->transform[t]) ?
+		ss2 &= ~S2_TEXCOORD_FMT(0, TEXCOORDFMT_NOT_PRESENT);
+		ss2 |= S2_TEXCOORD_FMT(0,
+				       intel_transform_is_affine(intel->transform[0]) ?
+				       TEXCOORDFMT_2D : TEXCOORDFMT_4D);
+		if (mask) {
+		    ss2 &= ~S2_TEXCOORD_FMT(1, TEXCOORDFMT_NOT_PRESENT);
+		    ss2 |= S2_TEXCOORD_FMT(1,
+					   intel_transform_is_affine(intel->transform[1]) ?
 					   TEXCOORDFMT_2D : TEXCOORDFMT_4D);
-		    t++;
 		}
 
 		if (intel->needs_render_ca_pass) {
commit 8f3066f0c70654f04f4acadf140e3c5c8dda051d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 27 13:58:58 2012 +0000

    sna/gen2; Initialise channel.is-opaque for fills
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 64b4e7c..c3a16cb 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1116,10 +1116,7 @@ gen2_composite_solid_init(struct sna *sna,
 {
 	channel->filter = PictFilterNearest;
 	channel->repeat = RepeatNormal;
-	channel->is_affine = TRUE;
 	channel->is_solid  = TRUE;
-	channel->is_linear = FALSE;
-	channel->transform = NULL;
 	channel->width  = 1;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1169,11 +1166,7 @@ gen2_composite_linear_init(struct sna *sna,
 
 	channel->filter = PictFilterNearest;
 	channel->repeat = picture->repeat ? picture->repeatType : RepeatNone;
-	channel->is_affine = TRUE;
-	channel->is_opaque = FALSE;
-	channel->is_solid  = FALSE;
 	channel->is_linear = TRUE;
-	channel->transform = NULL;
 	channel->width  = channel->bo->pitch / 4;
 	channel->height = 1;
 	channel->pict_format = PICT_a8r8g8b8;
@@ -1331,6 +1324,9 @@ gen2_composite_picture(struct sna *sna,
 
 	channel->is_solid = FALSE;
 	channel->is_linear = FALSE;
+	channel->is_opaque = FALSE;
+	channel->is_affine = TRUE;
+	channel->transform = NULL;
 
 	if (sna_picture_is_solid(picture, &color))
 		return gen2_composite_solid_init(sna, channel, color);
@@ -1726,7 +1722,7 @@ gen2_render_composite(struct sna *sna,
 		return FALSE;
 	}
 
-	if (mask == NULL && sna->kgem.mode == KGEM_BLT  &&
+	if (mask == NULL && sna->kgem.mode == KGEM_BLT &&
 	    sna_blt_composite(sna, op,
 			      src, dst,
 			      src_x, src_y,
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index a672c46..a9ec899 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -667,7 +667,7 @@ is_solid(PicturePtr picture)
 	}
 
 	if (picture->pDrawable) {
-		if (picture->pDrawable->width == 1 &&
+		if (picture->pDrawable->width  == 1 &&
 		    picture->pDrawable->height == 1 &&
 		    picture->repeat)
 			return TRUE;
commit 3640a0d4cb9e0f115fda9ea36212670f6ccafb22
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 27 12:28:22 2012 +0000

    Revert "meh"
    
    This reverts commit 4adb6967a84af8a04769c2d936a41f4a49ed1428.
    
    Oops, this debugging commit was not intended to be pushed along with the
    bugfix. :(
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 27b9327..3dd7863 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1376,10 +1376,15 @@ static void kgem_finish_partials(struct kgem *kgem)
 		if (!bo->base.exec)
 			continue;
 
-		if (bo->write & KGEM_BUFFER_WRITE_INPLACE) {
-			DBG(("%s: retaining partial upload buffer (%d/%d)\n",
-			     __FUNCTION__, bo->used, bytes(&bo->base)));
-			continue;
+		if (bo->mmapped) {
+			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
+			assert(!bo->need_io);
+			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
+				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
+				     __FUNCTION__, bo->used, bytes(&bo->base)));
+				continue;
+			}
+			goto decouple;
 		}
 
 		if (!bo->used) {
commit 6fd8d74a6aedb6484e53e704d9f44e0bf83ae9ab
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 27 12:06:50 2012 +0000

    sna: Upload the ordinary partial buffers!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 48c131b..27b9327 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1393,6 +1393,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 		assert(bo->base.rq == kgem->next_request);
 		assert(bo->base.domain != DOMAIN_GPU);
+		assert(bo->need_io);
 
 		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
 			struct kgem_bo *shrink;
@@ -1439,16 +1440,16 @@ static void kgem_finish_partials(struct kgem *kgem)
 				bubble_sort_partial(kgem, bo);
 				continue;
 			}
-
-			DBG(("%s: handle=%d, uploading %d/%d\n",
-			     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
-			assert(!kgem_busy(kgem, bo->base.handle));
-			assert(bo->used <= bytes(&bo->base));
-			gem_write(kgem->fd, bo->base.handle,
-				  0, bo->used, bo->mem);
-			bo->need_io = 0;
 		}
 
+		DBG(("%s: handle=%d, uploading %d/%d\n",
+		     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
+		assert(!kgem_busy(kgem, bo->base.handle));
+		assert(bo->used <= bytes(&bo->base));
+		gem_write(kgem->fd, bo->base.handle,
+			  0, bo->used, bo->mem);
+		bo->need_io = 0;
+
 decouple:
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
commit 4adb6967a84af8a04769c2d936a41f4a49ed1428
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 27 11:36:35 2012 +0000

    meh

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 2097994..48c131b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1376,15 +1376,10 @@ static void kgem_finish_partials(struct kgem *kgem)
 		if (!bo->base.exec)
 			continue;
 
-		if (bo->mmapped) {
-			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
-			assert(!bo->need_io);
-			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
-				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
-				     __FUNCTION__, bo->used, bytes(&bo->base)));
-				continue;
-			}
-			goto decouple;
+		if (bo->write & KGEM_BUFFER_WRITE_INPLACE) {
+			DBG(("%s: retaining partial upload buffer (%d/%d)\n",
+			     __FUNCTION__, bo->used, bytes(&bo->base)));
+			continue;
 		}
 
 		if (!bo->used) {
@@ -3471,8 +3466,6 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 			DBG(("%s: created handle=%d for buffer\n",
 			     __FUNCTION__, bo->base.handle));
-
-			bo->base.domain = DOMAIN_CPU;
 		}
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
commit 4fbb0baff525115037f7e5a1689541880a6cdafb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Feb 26 22:34:30 2012 +0000

    sna: Avoid reusing mmapped partial write buffers for readback
    
    An artefact of retaining the mmapped partial buffers is that it
    magnified the effect of stealing those for readback, causing extra
    writes on non-llc platforms.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4c70ad9..2097994 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1378,11 +1378,13 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 		if (bo->mmapped) {
 			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
+			assert(!bo->need_io);
 			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
 				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
 				     __FUNCTION__, bo->used, bytes(&bo->base)));
 				continue;
 			}
+			goto decouple;
 		}
 
 		if (!bo->used) {
@@ -1395,55 +1397,52 @@ static void kgem_finish_partials(struct kgem *kgem)
 		}
 
 		assert(bo->base.rq == kgem->next_request);
-		if (bo->used && bo->need_io) {
-			assert(bo->base.domain != DOMAIN_GPU);
-
-			if (bo->base.refcnt == 1 &&
-			    bo->used < bytes(&bo->base) / 2) {
-				struct kgem_bo *shrink;
-
-				shrink = search_linear_cache(kgem,
-							     PAGE_ALIGN(bo->used),
-							     CREATE_INACTIVE);
-				if (shrink) {
-					int n;
-
-					DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
-					     __FUNCTION__,
-					     bo->used, bytes(&bo->base), bytes(shrink),
-					     bo->base.handle, shrink->handle));
-
-					assert(bo->used <= bytes(shrink));
-					gem_write(kgem->fd, shrink->handle,
-						  0, bo->used, bo->mem);
-
-					for (n = 0; n < kgem->nreloc; n++) {
-						if (kgem->reloc[n].target_handle == bo->base.handle) {
-							kgem->reloc[n].target_handle = shrink->handle;
-							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
-							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
-								kgem->reloc[n].delta + shrink->presumed_offset;
-						}
+		assert(bo->base.domain != DOMAIN_GPU);
+
+		if (bo->base.refcnt == 1 && bo->used < bytes(&bo->base) / 2) {
+			struct kgem_bo *shrink;
+
+			shrink = search_linear_cache(kgem,
+						     PAGE_ALIGN(bo->used),
+						     CREATE_INACTIVE);
+			if (shrink) {
+				int n;
+
+				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
+				     __FUNCTION__,
+				     bo->used, bytes(&bo->base), bytes(shrink),
+				     bo->base.handle, shrink->handle));
+
+				assert(bo->used <= bytes(shrink));
+				gem_write(kgem->fd, shrink->handle,
+					  0, bo->used, bo->mem);
+
+				for (n = 0; n < kgem->nreloc; n++) {
+					if (kgem->reloc[n].target_handle == bo->base.handle) {
+						kgem->reloc[n].target_handle = shrink->handle;
+						kgem->reloc[n].presumed_offset = shrink->presumed_offset;
+						kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
+							kgem->reloc[n].delta + shrink->presumed_offset;
 					}
-
-					bo->base.exec->handle = shrink->handle;
-					bo->base.exec->offset = shrink->presumed_offset;
-					shrink->exec = bo->base.exec;
-					shrink->rq = bo->base.rq;
-					list_replace(&bo->base.request,
-						     &shrink->request);
-					list_init(&bo->base.request);
-					shrink->needs_flush = bo->base.dirty;
-
-					bo->base.exec = NULL;
-					bo->base.rq = NULL;
-					bo->base.dirty = false;
-					bo->base.needs_flush = false;
-					bo->used = 0;
-
-					bubble_sort_partial(kgem, bo);
-					continue;
 				}
+
+				bo->base.exec->handle = shrink->handle;
+				bo->base.exec->offset = shrink->presumed_offset;
+				shrink->exec = bo->base.exec;
+				shrink->rq = bo->base.rq;
+				list_replace(&bo->base.request,
+					     &shrink->request);
+				list_init(&bo->base.request);
+				shrink->needs_flush = bo->base.dirty;
+
+				bo->base.exec = NULL;
+				bo->base.rq = NULL;
+				bo->base.dirty = false;
+				bo->base.needs_flush = false;
+				bo->used = 0;
+
+				bubble_sort_partial(kgem, bo);
+				continue;
 			}
 
 			DBG(("%s: handle=%d, uploading %d/%d\n",
@@ -3243,29 +3242,19 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	assert(size <= kgem->max_cpu_size);
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
-		if (flags == KGEM_BUFFER_LAST && bo->write) {
-			/* We can reuse any write buffer which we can fit */
-			if (size <= bytes(&bo->base)) {
-				if (bo->base.refcnt == 1 && bo->base.exec) {
-					DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
-					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
-					gem_write(kgem->fd, bo->base.handle,
-						  0, bo->used, bo->mem);
-					bo->need_io = 0;
-					bo->write = 0;
-					offset = 0;
-					goto done;
-				} else if (bo->used + size <= bytes(&bo->base)) {
-					DBG(("%s: reusing unfinished write buffer for read of %d bytes? used=%d, total=%d\n",
-					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
-					gem_write(kgem->fd, bo->base.handle,
-						  0, bo->used, bo->mem);
-					bo->need_io = 0;
-					bo->write = 0;
-					offset = bo->used;
-					goto done;
-				}
-			}
+		/* We can reuse any write buffer which we can fit */
+		if (flags == KGEM_BUFFER_LAST &&
+		    bo->write == KGEM_BUFFER_WRITE && bo->base.exec &&
+		    size <= bytes(&bo->base)) {
+			assert(bo->base.refcnt == 1);
+			DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
+			     __FUNCTION__, size, bo->used, bytes(&bo->base)));
+			gem_write(kgem->fd, bo->base.handle,
+				  0, bo->used, bo->mem);
+			bo->need_io = 0;
+			bo->write = 0;
+			offset = bo->used = 0;
+			goto done;
 		}
 
 		if ((bo->write & KGEM_BUFFER_WRITE) != (flags & KGEM_BUFFER_WRITE) ||
commit a3c398a6731874ba47e0a46bbd42bf9378e12ab8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 25 10:59:14 2012 +0000

    sna: Retain unfinished partial buffers between batches
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4051892..4c70ad9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1131,6 +1131,56 @@ static void kgem_bo_unref(struct kgem *kgem, struct kgem_bo *bo)
 		__kgem_bo_destroy(kgem, bo);
 }
 
+static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
+{
+	int remain = bytes(&bo->base) - bo->used;
+
+	while (bo->base.list.prev != &kgem->partial) {
+		struct kgem_partial_bo *p;
+
+		p = list_entry(bo->base.list.prev,
+			       struct kgem_partial_bo,
+			       base.list);
+		if (remain <= bytes(&p->base) - p->used)
+			break;
+
+		assert(p->base.list.next == &bo->base.list);
+		bo->base.list.prev = p->base.list.prev;
+		p->base.list.prev->next = &bo->base.list;
+		p->base.list.prev = &bo->base.list;
+
+		p->base.list.next = bo->base.list.next;
+		bo->base.list.next->prev = &p->base.list;
+		bo->base.list.next = &p->base.list;
+
+		assert(p->base.list.next->prev == &p->base.list);
+		assert(bo->base.list.prev->next == &bo->base.list);
+	}
+}
+
+static void kgem_retire_partials(struct kgem *kgem)
+{
+	struct kgem_partial_bo *bo, *next;
+
+	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
+		if (bo->used == 0 || !bo->mmapped)
+			continue;
+		if (bo->base.refcnt != 1 || bo->base.rq)
+			continue;
+
+		DBG(("%s: handle=%d, used %d/%d\n", __FUNCTION__,
+		     bo->base.handle, bo->used, bytes(&bo->base)));
+
+		assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
+		assert(kgem->has_llc || !IS_CPU_MAP(bo->base.map));
+		bo->base.dirty = false;
+		bo->base.needs_flush = false;
+		bo->used = 0;
+
+		bubble_sort_partial(kgem, bo);
+	}
+}
+
 bool kgem_retire(struct kgem *kgem)
 {
 	struct kgem_bo *bo, *next;
@@ -1233,6 +1283,8 @@ bool kgem_retire(struct kgem *kgem)
 		free(rq);
 	}
 
+	kgem_retire_partials(kgem);
+
 	kgem->need_retire = !list_is_empty(&kgem->requests);
 	DBG(("%s -- need_retire=%d\n", __FUNCTION__, kgem->need_retire));
 
@@ -1311,33 +1363,6 @@ static void kgem_close_inactive(struct kgem *kgem)
 		kgem_close_list(kgem, &kgem->inactive[i]);
 }
 
-static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
-{
-	int remain = bytes(&bo->base) - bo->used;
-
-	while (bo->base.list.prev != &kgem->partial) {
-		struct kgem_partial_bo *p;
-
-		p = list_entry(bo->base.list.prev,
-			       struct kgem_partial_bo,
-			       base.list);
-		if (remain <= bytes(&p->base) - p->used)
-			break;
-
-		assert(p->base.list.next == &bo->base.list);
-		bo->base.list.prev = p->base.list.prev;
-		p->base.list.prev->next = &bo->base.list;
-		p->base.list.prev = &bo->base.list;
-
-		p->base.list.next = bo->base.list.next;
-		bo->base.list.next->prev = &p->base.list;
-		bo->base.list.next = &p->base.list;
-
-		assert(p->base.list.next->prev == &p->base.list);
-		assert(bo->base.list.prev->next == &bo->base.list);
-	}
-}
-
 static void kgem_finish_partials(struct kgem *kgem)
 {
 	struct kgem_partial_bo *bo, *next;
@@ -1348,10 +1373,18 @@ static void kgem_finish_partials(struct kgem *kgem)
 			goto decouple;
 		}
 
-		assert(bo->base.domain != DOMAIN_GPU);
 		if (!bo->base.exec)
 			continue;
 
+		if (bo->mmapped) {
+			assert(bo->write & KGEM_BUFFER_WRITE_INPLACE);
+			if (kgem->has_llc || !IS_CPU_MAP(bo->base.map)) {
+				DBG(("%s: retaining partial upload buffer (%d/%d)\n",
+				     __FUNCTION__, bo->used, bytes(&bo->base)));
+				continue;
+			}
+		}
+
 		if (!bo->used) {
 			/* Unless we replace the handle in the execbuffer,
 			 * then this bo will become active. So decouple it
@@ -1363,6 +1396,8 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 		assert(bo->base.rq == kgem->next_request);
 		if (bo->used && bo->need_io) {
+			assert(bo->base.domain != DOMAIN_GPU);
+
 			if (bo->base.refcnt == 1 &&
 			    bo->used < bytes(&bo->base) / 2) {
 				struct kgem_bo *shrink;
@@ -1768,7 +1803,7 @@ static void kgem_expire_partial(struct kgem *kgem)
 	struct kgem_partial_bo *bo, *next;
 
 	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
-		if (bo->base.refcnt > 1 || bo->base.exec)
+		if (bo->base.refcnt > 1 || bo->base.rq)
 			continue;
 
 		DBG(("%s: discarding unused partial buffer: %d/%d, write? %d\n",
@@ -3214,11 +3249,19 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				if (bo->base.refcnt == 1 && bo->base.exec) {
 					DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
 					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
+					gem_write(kgem->fd, bo->base.handle,
+						  0, bo->used, bo->mem);
+					bo->need_io = 0;
+					bo->write = 0;
 					offset = 0;
 					goto done;
 				} else if (bo->used + size <= bytes(&bo->base)) {
 					DBG(("%s: reusing unfinished write buffer for read of %d bytes? used=%d, total=%d\n",
 					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
+					gem_write(kgem->fd, bo->base.handle,
+						  0, bo->used, bo->mem);
+					bo->need_io = 0;
+					bo->write = 0;
 					offset = bo->used;
 					goto done;
 				}
commit 8d773b88f45594f45174dc6f1a264d968690ce84
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 25 09:32:20 2012 +0000

    sna/gen3+: Keep the vertex buffer resident between batches
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b50d067..8f597cf 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1612,20 +1612,33 @@ static int gen3_vertex_finish(struct sna *sna)
 
 static void gen3_vertex_close(struct sna *sna)
 {
-	struct kgem_bo *bo;
-	int delta = 0;
+	struct kgem_bo *bo, *free_bo = NULL;
+	unsigned int delta = 0;
+
+	assert(sna->render_state.gen3.vertex_offset == 0);
 
-	if (!sna->render.vertex_used) {
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+
+	if (sna->render.vertex_used == 0) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
 		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
 		return;
 	}
 
-	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
-
 	bo = sna->render.vbo;
-	if (bo == NULL) {
+	if (bo) {
+		if (IS_CPU_MAP(bo->map) ||
+		    sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding vbo (was CPU mapped)\n",
+			     __FUNCTION__));
+			sna->render.vbo = NULL;
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			free_bo = bo;
+		}
+	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -1636,36 +1649,37 @@ static void gen3_vertex_close(struct sna *sna)
 			bo = NULL;
 			sna->kgem.nbatch += sna->render.vertex_used;
 		} else {
-			bo = kgem_create_linear(&sna->kgem,
-						4*sna->render.vertex_used);
-			if (bo && !kgem_bo_write(&sna->kgem, bo,
-						 sna->render.vertex_data,
-						 4*sna->render.vertex_used)) {
-				kgem_bo_destroy(&sna->kgem, bo);
-				goto reset;
-			}
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
+			bo = kgem_create_linear(&sna->kgem,
+						4*sna->render.vertex_used);
+			if (bo)
+				kgem_bo_write(&sna->kgem, bo,
+					      sna->render.vertex_data,
+					      4*sna->render.vertex_used);
+			free_bo = bo;
 		}
 	}
 
 	DBG(("%s: reloc = %d\n", __FUNCTION__,
 	     sna->render.vertex_reloc[0]));
 
-	sna->kgem.batch[sna->render.vertex_reloc[0]] =
-		kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
-			       bo, I915_GEM_DOMAIN_VERTEX << 16, delta);
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
+	if (sna->render.vertex_reloc[0]) {
+		sna->kgem.batch[sna->render.vertex_reloc[0]] =
+			kgem_add_reloc(&sna->kgem, sna->render.vertex_reloc[0],
+				       bo, I915_GEM_DOMAIN_VERTEX << 16, delta);
+		sna->render.vertex_reloc[0] = 0;
+	}
 
-reset:
-	sna->render.vertex_reloc[0] = 0;
-	sna->render.vertex_used = 0;
-	sna->render.vertex_index = 0;
+	if (sna->render.vbo == NULL) {
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	}
 
-	sna->render.vbo = NULL;
-	sna->render.vertices = sna->render.vertex_data;
-	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	if (free_bo)
+		kgem_bo_destroy(&sna->kgem, free_bo);
 }
 
 static bool gen3_rectangle_begin(struct sna *sna,
@@ -1885,10 +1899,23 @@ gen3_render_reset(struct sna *sna)
 	state->last_floats_per_vertex = 0;
 	state->last_vertex_offset = 0;
 	state->vertex_offset = 0;
+}
 
-	assert(sna->render.vertex_used == 0);
-	assert(sna->render.vertex_index == 0);
-	assert(sna->render.vertex_reloc[0] == 0);
+static void
+gen3_render_retire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		kgem_bo_destroy(kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 }
 
 static Bool gen3_composite_channel_set_format(struct sna_composite_channel *channel,
@@ -4466,5 +4493,7 @@ Bool gen3_render_init(struct sna *sna)
 
 	render->max_3d_size = MAX_3D_SIZE;
 	render->max_3d_pitch = MAX_3D_PITCH;
+
+	sna->kgem.retire = gen3_render_retire;
 	return TRUE;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index a80ce0a..bcba0d8 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -410,11 +410,14 @@ static int gen5_vertex_finish(struct sna *sna)
 
 static void gen5_vertex_close(struct sna *sna)
 {
-	struct kgem_bo *bo;
+	struct kgem_bo *bo, *free_bo = NULL;
 	unsigned int i, delta = 0;
 
 	assert(sna->render_state.gen5.vertex_offset == 0);
 
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -422,10 +425,18 @@ static void gen5_vertex_close(struct sna *sna)
 		return;
 	}
 
-	DBG(("%s: used=%d\n", __FUNCTION__, sna->render.vertex_used));
-
 	bo = sna->render.vbo;
-	if (bo == NULL) {
+	if (bo) {
+		if (IS_CPU_MAP(bo->map) ||
+		    sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding vbo (was CPU mapped)\n",
+			     __FUNCTION__));
+			sna->render.vbo = NULL;
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			free_bo = bo;
+		}
+	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -441,10 +452,11 @@ static void gen5_vertex_close(struct sna *sna)
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
 				kgem_bo_destroy(&sna->kgem, bo);
-				goto reset;
+				bo = NULL;
 			}
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
+			free_bo = bo;
 		}
 	}
 
@@ -469,17 +481,13 @@ static void gen5_vertex_close(struct sna *sna)
 		}
 	}
 
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-
-reset:
-	sna->render.vertex_used = 0;
-	sna->render.vertex_index = 0;
-	sna->render_state.gen5.vb_id = 0;
+	if (sna->render.vbo == NULL) {
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 
-	sna->render.vbo = NULL;
-	sna->render.vertices = sna->render.vertex_data;
-	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	if (free_bo)
+		kgem_bo_destroy(&sna->kgem, free_bo);
 }
 
 static uint32_t gen5_get_blend(int op,
@@ -3470,6 +3478,23 @@ gen5_render_context_switch(struct kgem *kgem,
 	}
 }
 
+static void
+gen5_render_retire(struct kgem *kgem)
+{
+	struct sna *sna;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		kgem_bo_destroy(kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
+}
+
 static void gen5_render_reset(struct sna *sna)
 {
 	sna->render_state.gen5.needs_invariant = TRUE;
@@ -3730,6 +3755,7 @@ Bool gen5_render_init(struct sna *sna)
 		return FALSE;
 
 	sna->kgem.context_switch = gen5_render_context_switch;
+	sna->kgem.retire = gen5_render_retire;
 
 	sna->render.composite = gen5_render_composite;
 #if !NO_COMPOSITE_SPANS
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index b69b3a2..439fb52 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -989,9 +989,14 @@ static int gen6_vertex_finish(struct sna *sna)
 
 static void gen6_vertex_close(struct sna *sna)
 {
-	struct kgem_bo *bo;
+	struct kgem_bo *bo, *free_bo = NULL;
 	unsigned int i, delta = 0;
 
+	assert(sna->render_state.gen6.vertex_offset == 0);
+
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -999,13 +1004,16 @@ static void gen6_vertex_close(struct sna *sna)
 		return;
 	}
 
-	DBG(("%s: used=%d / %d\n", __FUNCTION__,
-	     sna->render.vertex_used, sna->render.vertex_size));
-
 	bo = sna->render.vbo;
-	if (bo == NULL) {
-		assert(sna->render.vertices == sna->render.vertex_data);
-		assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data));
+	if (bo) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding vbo (full)\n", __FUNCTION__));
+			sna->render.vbo = NULL;
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			free_bo = bo;
+		}
+	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -1021,10 +1029,11 @@ static void gen6_vertex_close(struct sna *sna)
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
 				kgem_bo_destroy(&sna->kgem, bo);
-				goto reset;
+				bo = NULL;
 			}
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
+			free_bo = bo;
 		}
 	}
 
@@ -1049,17 +1058,15 @@ static void gen6_vertex_close(struct sna *sna)
 		}
 	}
 
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-
-reset:
-	sna->render.vertex_used = 0;
-	sna->render.vertex_index = 0;
-	sna->render_state.gen6.vb_id = 0;
+	if (sna->render.vbo == NULL) {
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data));
+	}
 
-	sna->render.vbo = NULL;
-	sna->render.vertices = sna->render.vertex_data;
-	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	if (free_bo)
+		kgem_bo_destroy(&sna->kgem, free_bo);
 }
 
 typedef struct gen6_surface_state_padded {
@@ -4095,8 +4102,21 @@ gen6_render_context_switch(struct kgem *kgem,
 static void
 gen6_render_retire(struct kgem *kgem)
 {
+	struct sna *sna;
+
 	if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire))
 		kgem->ring = kgem->mode;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		kgem_bo_destroy(kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 }
 
 static void gen6_render_reset(struct sna *sna)
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 0d913f6..e3d9757 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1086,9 +1086,14 @@ static int gen7_vertex_finish(struct sna *sna)
 
 static void gen7_vertex_close(struct sna *sna)
 {
-	struct kgem_bo *bo;
+	struct kgem_bo *bo, *free_bo = NULL;
 	unsigned int i, delta = 0;
 
+	assert(sna->render_state.gen7.vertex_offset == 0);
+
+	DBG(("%s: used=%d, vbo active? %d\n",
+	     __FUNCTION__, sna->render.vertex_used, sna->render.vbo != NULL));
+
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -1096,11 +1101,16 @@ static void gen7_vertex_close(struct sna *sna)
 		return;
 	}
 
-	DBG(("%s: used=%d / %d\n", __FUNCTION__,
-	     sna->render.vertex_used, sna->render.vertex_size));
-
 	bo = sna->render.vbo;
-	if (bo == NULL) {
+	if (bo) {
+		if (sna->render.vertex_size - sna->render.vertex_used < 64) {
+			DBG(("%s: discarding vbo (full)\n", __FUNCTION__));
+			sna->render.vbo = NULL;
+			sna->render.vertices = sna->render.vertex_data;
+			sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+			free_bo = bo;
+		}
+	} else {
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -1116,10 +1126,11 @@ static void gen7_vertex_close(struct sna *sna)
 						 sna->render.vertex_data,
 						 4*sna->render.vertex_used)) {
 				kgem_bo_destroy(&sna->kgem, bo);
-				goto reset;
+				bo = NULL;
 			}
 			DBG(("%s: new vbo: %d\n", __FUNCTION__,
 			     sna->render.vertex_used));
+			free_bo = bo;
 		}
 	}
 
@@ -1144,17 +1155,13 @@ static void gen7_vertex_close(struct sna *sna)
 		}
 	}
 
-	if (bo)
-		kgem_bo_destroy(&sna->kgem, bo);
-
-reset:
-	sna->render.vertex_used = 0;
-	sna->render.vertex_index = 0;
-	sna->render_state.gen7.vb_id = 0;
+	if (sna->render.vbo == NULL) {
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 
-	sna->render.vbo = NULL;
-	sna->render.vertices = sna->render.vertex_data;
-	sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+	if (free_bo)
+		kgem_bo_destroy(&sna->kgem, free_bo);
 }
 
 static void null_create(struct sna_static_stream *stream)
@@ -4080,8 +4087,21 @@ gen7_render_context_switch(struct kgem *kgem,
 static void
 gen7_render_retire(struct kgem *kgem)
 {
+	struct sna *sna;
+
 	if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire))
 		kgem->ring = kgem->mode;
+
+	sna = container_of(kgem, struct sna, kgem);
+	if (!kgem->need_retire && kgem->nbatch == 0 && sna->render.vbo) {
+		DBG(("%s: discarding vbo\n", __FUNCTION__));
+		kgem_bo_destroy(kgem, sna->render.vbo);
+		sna->render.vbo = NULL;
+		sna->render.vertices = sna->render.vertex_data;
+		sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data);
+		sna->render.vertex_used = 0;
+		sna->render.vertex_index = 0;
+	}
 }
 
 static void gen7_render_reset(struct sna *sna)
commit 8cb773e7c809e1de23cd64d3db862d1f8e7e955a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 25 11:07:16 2012 +0000

    sna: Ensure we trigger a retire for search_linear_cache
    
    Bo used for batch buffers are handled differently and not tracked
    through the active cache, so we failed to notice when we might be able
    to run retire and recover a suitable buffer for reuse. So simply always
    run retire when we might need to create a new linear buffer.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d73fc30..4051892 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1922,22 +1922,32 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 	bool use_active = (flags & CREATE_INACTIVE) == 0;
 	struct list *cache;
 
+	DBG(("%s: num_pages=%d, flags=%x, use_active? %d\n",
+	     __FUNCTION__, num_pages, flags, use_active));
+
 	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
 		return NULL;
 
 	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
-		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE)))
-			return NULL;
+		DBG(("%s: inactive and cache bucket empty\n",
+		     __FUNCTION__));
 
-		if (!kgem_retire(kgem))
+		if (!kgem->need_retire || !kgem_retire(kgem)) {
+			DBG(("%s: nothing retired\n", __FUNCTION__));
 			return NULL;
+		}
 
-		if (list_is_empty(inactive(kgem, num_pages)))
+		if (list_is_empty(inactive(kgem, num_pages))) {
+			DBG(("%s: active cache bucket still empty after retire\n",
+			     __FUNCTION__));
 			return NULL;
+		}
 	}
 
 	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
+		DBG(("%s: searching for inactive %s map\n",
+		     __FUNCTION__, for_cpu ? "cpu" : "gtt"));
 		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
 		list_for_each_entry(bo, cache, vma) {
 			assert(IS_CPU_MAP(bo->map) == for_cpu);
@@ -2111,7 +2121,7 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
 	if (handle == 0)
 		return NULL;
 
-	DBG(("%s: new handle=%d\n", __FUNCTION__, handle));
+	DBG(("%s: new handle=%d, num_pages=%d\n", __FUNCTION__, handle, size));
 	bo = __kgem_bo_alloc(handle, size);
 	if (bo == NULL) {
 		gem_close(kgem->fd, handle);
commit b1b4db8942e69d47aabfad3751165dc2252fa448
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 25 00:43:30 2012 +0000

    sna: Skip a tiled bo when searching the cache for a linear mmap
    
    If we change tiling on a bo, we are effectively discarding the cached
    mmap so it is preferable to look for another.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 007dc04..d73fc30 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1982,7 +1982,9 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		if (num_pages > num_pages(bo))
 			continue;
 
-		if (use_active && bo->tiling != I915_TILING_NONE)
+		if (use_active &&
+		    kgem->gen <= 40 &&
+		    bo->tiling != I915_TILING_NONE)
 			continue;
 
 		if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) {
@@ -1991,7 +1993,10 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 		}
 
 		if (I915_TILING_NONE != bo->tiling) {
-			if (use_active)
+			if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP))
+				continue;
+
+			if (first)
 				continue;
 
 			if (gem_set_tiling(kgem->fd, bo->handle,
commit 85e48d2e5eb029d8c17714cb5e7db39ea06a2455
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 24 21:40:44 2012 +0000

    legacy: Rename XF86DRI to HAVE_DRI1 to avoid conflicts with xorg-server.h
    
    We use the XF86DRI as a user configurable option to control whether to
    build DRI support for i810, but it is also used internally within xorg
    and there exists a public define in xorg-server.h which overrides our
    configure option. So rename our define to HAVE_DRI1 to avoid the
    conflict.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=46590
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index b76ab90..03a35bd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -217,7 +217,7 @@ AC_MSG_RESULT([${DRI-yes}])
 
 AM_CONDITIONAL(DRI, test x$DRI != xno)
 if test "x$DRI" != "xno"; then
-        AC_DEFINE(XF86DRI,1,[Enable DRI driver support])
+        AC_DEFINE(HAVE_DRI1,1,[Enable DRI driver support])
 else
         DRI_CFLAGS=""
         DRI_LIBS=""
diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h
index 183c701..2c0b53e 100644
--- a/src/legacy/i810/i810.h
+++ b/src/legacy/i810/i810.h
@@ -51,7 +51,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "xorg-server.h"
 #include <pciaccess.h>
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
 #include "xf86drm.h"
 #include "sarea.h"
 #define _XF86DRI_SERVER_
@@ -209,7 +209,7 @@ typedef struct _I810Rec {
    Bool directRenderingDisabled;        /* DRI disabled in PreInit */
    Bool directRenderingEnabled;		/* false if XF86DRI not defined. */
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    int LockHeld;
    DRIInfoPtr pDRIInfo;
    int drmSubFD;
@@ -248,7 +248,7 @@ typedef struct _I810Rec {
 #define I810_SELECT_BACK	1
 #define I810_SELECT_DEPTH	2
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
 extern Bool I810DRIScreenInit(ScreenPtr pScreen);
 extern void I810DRICloseScreen(ScreenPtr pScreen);
 extern Bool I810DRIFinishScreenInit(ScreenPtr pScreen);
diff --git a/src/legacy/i810/i810_accel.c b/src/legacy/i810/i810_accel.c
index 9aa3e42..6b57dbb 100644
--- a/src/legacy/i810/i810_accel.c
+++ b/src/legacy/i810/i810_accel.c
@@ -213,7 +213,7 @@ I810WaitLpRing(ScrnInfoPtr pScrn, int n, int timeout_millis)
 		start);
 	 I810PrintErrorState(pScrn);
 	 ErrorF("space: %d wanted %d\n", ring->space, n);
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
 	 if (pI810->directRenderingEnabled) {
 	    DRIUnlock(screenInfo.screens[pScrn->scrnIndex]);
 	    DRICloseScreen(screenInfo.screens[pScrn->scrnIndex]);
@@ -245,7 +245,7 @@ I810Sync(ScrnInfoPtr pScrn)
    if (I810_DEBUG & (DEBUG_VERBOSE_ACCEL | DEBUG_VERBOSE_SYNC))
       ErrorF("I810Sync\n");
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    /* VT switching tries to do this.  
     */
    if (!pI810->LockHeld && pI810->directRenderingEnabled) {
diff --git a/src/legacy/i810/i810_driver.c b/src/legacy/i810/i810_driver.c
index d8f7c45..02da574 100644
--- a/src/legacy/i810/i810_driver.c
+++ b/src/legacy/i810/i810_driver.c
@@ -71,7 +71,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #include "i810.h"
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
 #include "dri.h"
 #endif
 
@@ -132,7 +132,7 @@ int I810_DEBUG = (0
       );
 #endif
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
 static int i810_pitches[] = {
    512,
    1024,
@@ -352,7 +352,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
       }
    }
    
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    pI810->directRenderingDisabled =
      !xf86ReturnOptValBool(pI810->Options, OPTION_DRI, TRUE);
 
@@ -551,7 +551,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
 
    i = xf86ValidateModes(pScrn, pScrn->monitor->Modes,
 			 pScrn->display->modes, clockRanges,
-#ifndef XF86DRI
+#ifndef HAVE_DRI1
 			 0, 320, 1600, 64 * pScrn->bitsPerPixel,
 #else
 			 i810_pitches, 0, 0, 64 * pScrn->bitsPerPixel,
@@ -607,7 +607,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
    pI810->allowPageFlip=FALSE;
    enable = xf86ReturnOptValBool(pI810->Options, OPTION_PAGEFLIP, FALSE);   
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (!pI810->directRenderingDisabled) {
      pI810->allowPageFlip = enable;
      if (pI810->allowPageFlip == TRUE)
@@ -645,7 +645,7 @@ I810PreInit(ScrnInfoPtr pScrn, int flags)
       pI810->numSurfaces = 0;
    }
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    /* Load the dri module if requested. */
    if (xf86ReturnOptValBool(pI810->Options, OPTION_DRI, FALSE)) {
       xf86LoadSubModule(pScrn, "dri");
@@ -1351,7 +1351,7 @@ I810ModeInit(ScrnInfoPtr pScrn, DisplayModePtr mode)
    if (!I810SetMode(pScrn, mode))
       return FALSE;
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       DRILock(screenInfo.screens[pScrn->scrnIndex], 0);
       pI810->LockHeld = 1;
@@ -1360,7 +1360,7 @@ I810ModeInit(ScrnInfoPtr pScrn, DisplayModePtr mode)
 
    DoRestore(pScrn, &hwp->ModeReg, &pI810->ModeReg, FALSE);
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       DRIUnlock(screenInfo.screens[pScrn->scrnIndex]);
       pI810->LockHeld = 0;
@@ -1610,7 +1610,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
     * memory.  Wonder if this is going to be a problem...
     */
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    /*
     * Setup DRI after visuals have been established, but before fbScreenInit
     * is called.   fbScreenInit will eventually call into the drivers
@@ -1676,7 +1676,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
    xf86SetBlackWhitePixels(pScreen);
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->LpRing->mem.Start == 0 && pI810->directRenderingEnabled) {
       pI810->directRenderingEnabled = FALSE;
       I810DRICloseScreen(pScreen);
@@ -1753,7 +1753,7 @@ I810ScreenInit(int scrnIndex, ScreenPtr pScreen, int argc, char **argv)
 
    I810InitVideo(pScreen);
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       /* Now that mi, fb, drm and others have done their thing,
        * complete the DRI setup.
@@ -1803,7 +1803,7 @@ I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
  * If lockups on mode switch are still seen revisit this code. (EE)
  */
 
-# ifdef XF86DRI
+# ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       if (I810_DEBUG & DEBUG_VERBOSE_DRI)
 	 ErrorF("calling dri lock\n");
@@ -1818,7 +1818,7 @@ I810SwitchMode(int scrnIndex, DisplayModePtr mode, int flags)
    }
    I810Restore(pScrn);
 
-# ifdef XF86DRI
+# ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
        if (!I810DRILeave(pScrn))
 	   return FALSE;
@@ -1892,7 +1892,7 @@ I810EnterVT(int scrnIndex, int flags)
 {
    ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    I810Ptr pI810 = I810PTR(pScrn);
 #endif
 
@@ -1902,7 +1902,7 @@ I810EnterVT(int scrnIndex, int flags)
    if (!I810BindGARTMemory(pScrn)) {
       return FALSE;
    }
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (!I810DRIEnter(pScrn)) {
       return FALSE;
    }
@@ -1930,7 +1930,7 @@ I810LeaveVT(int scrnIndex, int flags)
    if (I810_DEBUG & DEBUG_VERBOSE_DRI)
       ErrorF("\n\n\nLeave VT\n");
 
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       if (I810_DEBUG & DEBUG_VERBOSE_DRI)
 	 ErrorF("calling dri lock\n");
@@ -1948,7 +1948,7 @@ I810LeaveVT(int scrnIndex, int flags)
 
    if (!I810UnbindGARTMemory(pScrn))
       return;
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (!I810DRILeave(pScrn))
       return;
 #endif
@@ -1973,7 +1973,7 @@ I810CloseScreen(int scrnIndex, ScreenPtr pScreen)
       I810Restore(pScrn);
       vgaHWLock(hwp);
    }
-#ifdef XF86DRI
+#ifdef HAVE_DRI1
    if (pI810->directRenderingEnabled) {
       I810DRICloseScreen(pScreen);
       pI810->directRenderingEnabled = FALSE;
commit 96db90e819a1990c2d139725e522055e92def959
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 24 21:36:30 2012 +0000

    legacy: Delete unused XF86DRI_DEVEL #define
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46590
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 6eb3289..b76ab90 100644
--- a/configure.ac
+++ b/configure.ac
@@ -218,7 +218,6 @@ AC_MSG_RESULT([${DRI-yes}])
 AM_CONDITIONAL(DRI, test x$DRI != xno)
 if test "x$DRI" != "xno"; then
         AC_DEFINE(XF86DRI,1,[Enable DRI driver support])
-        AC_DEFINE(XF86DRI_DEVEL,1,[Enable developmental DRI driver support])
 else
         DRI_CFLAGS=""
         DRI_LIBS=""
commit b870a3e5cd922ce37050ece73e2469802dd21da2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 24 11:21:49 2012 +0000

    configure, NEWS: Bump version to 2.18.0 for release
    
    Another quarter, a bit late as I was debugging a few regressions,
    another release.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/NEWS b/NEWS
index 2bd476b..9d2b15e 100644
--- a/NEWS
+++ b/NEWS
@@ -1,3 +1,29 @@
+Release 2.18.0 (2012-02-24)
+===========================
+Time passes, a few more bugs have crept out of the woodwork that are a
+compelling reason to update.
+
+Bugs fixed in this release (compared to 2.17.0)
+-----------------------------------------------
+
+* Limit maximum object size so that all of the source, mask and
+  destination can be mapped into the aperture simultaneously by basing the
+  limit on the mappable aperture size rather than the size of the total
+  GATT.
+
+* Incorrect clipping of polygons
+  https://bugs.freedesktop.org/show_bug.cgi?id=43649
+  Regression from 2.15.901
+
+* Limit number of VMA cached to avoid hitting the per-process VMA limit
+  There still is a residual bug in that we seem to have so many objects
+  floating around in the first place and that still leads to exhaustion
+  of system limits.
+  https://bugs.freedesktop.org/show_bug.cgi?id=43075
+  https://bugs.freedesktop.org/show_bug.cgi?id=40066
+
+* Latency in processing user-input during continuous rendering
+
 Release 2.17.0 (2011-11-16)
 ==============================
 A few months have passed, and we have accumulated a surprising number of
diff --git a/configure.ac b/configure.ac
index 2c8bee9..6eb3289 100644
--- a/configure.ac
+++ b/configure.ac
@@ -23,7 +23,7 @@
 # Initialize Autoconf
 AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
-        [2.17.0],
+        [2.18.0],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
commit 5b5cd6780ef7cae8f49d71d7c8532597291402d8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 24 11:14:26 2012 +0000

    uxa: Add a option to disable the bo cache
    
    If you are suffering from regular X crashes and rendering corruption
    with a flood of ENOSPC or even EFILE reported in the Xorg.log, try
    adding this snippet to your xorg.conf:
    
    Section "Driver"
      Option "BufferCache" "False"
    EndSection
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=39552
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_driver.c b/src/intel_driver.c
index 1837509..e2e43fa 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -105,6 +105,7 @@ typedef enum {
    OPTION_DEBUG_WAIT,
    OPTION_HOTPLUG,
    OPTION_RELAXED_FENCING,
+   OPTION_BUFFER_CACHE,
 } I830Opts;
 
 static OptionInfoRec I830Options[] = {
@@ -126,6 +127,7 @@ static OptionInfoRec I830Options[] = {
    {OPTION_DEBUG_WAIT, "DebugWait", OPTV_BOOLEAN, {0}, FALSE},
    {OPTION_HOTPLUG,	"HotPlug",	OPTV_BOOLEAN,	{0},	TRUE},
    {OPTION_RELAXED_FENCING,	"RelaxedFencing",	OPTV_BOOLEAN,	{0},	TRUE},
+   {OPTION_BUFFER_CACHE,	"BufferCache",	OPTV_BOOLEAN,	{0},	TRUE},
    {-1,			NULL,		OPTV_NONE,	{0},	FALSE}
 };
 /* *INDENT-ON* */
@@ -394,7 +396,8 @@ static int intel_init_bufmgr(intel_screen_private *intel)
 	if (!intel->bufmgr)
 		return FALSE;
 
-	drm_intel_bufmgr_gem_enable_reuse(intel->bufmgr);
+	if (xf86ReturnOptValBool(intel->Options, OPTION_BUFFER_CACHE, TRUE))
+		drm_intel_bufmgr_gem_enable_reuse(intel->bufmgr);
 	drm_intel_bufmgr_gem_set_vma_cache_size(intel->bufmgr, 512);
 	drm_intel_bufmgr_gem_enable_fenced_relocs(intel->bufmgr);
 
commit f8ca50818cd4f82e0ad8eeb245e8ee838b083056
Author: Gaetan Nadon <memsize at videotron.ca>
Date:   Sat Feb 18 13:49:02 2012 -0500

    Revert "Update autotools configuration"
    
    This reverts commit 9184af921bc2f332fcb6c9b47001414378eab8e2.
    
    All X.Org modules must be able to be configured with autoconf 2.60.
    In addition, version 2.63 has GPL licensing issues which prevents
    some vendor to release software based on it.
    
    The AM_SILENT_RULES are already handled by XORG_DEFAULT_OPTIONS.
    
    All X.Org modules must be able to be configured with libtool 1.5.
    
    AM_MAINTAINER_MODE default value is "enabled" already.
    
    We use the same autogen script for all x.org modules.
    There are proposals for changes which should be reviewed and eventually
    applied to all modules together.
    
    The lt*.m4 patterns are already included in the root .gitignore file.
    This can be proposed as a change to all modules, but it invloves
    changing the topvel .gitignore, the m4/.gitignore, the ACLOCAL_AMFLAGS
    and the AC_CONFIG_MACRO_DIR together.
    
    For more information on project wide configuration guidelines,
    consult http://www.x.org/wiki/ModularDevelopersGuide
    and http://www.x.org/wiki/NewModuleGuidelines.
    
    Acked-by: Matthieu Herrb <matthieu.herrb at laas.fr>
    Signed-off-by: Gaetan Nadon <memsize at videotron.ca>

diff --git a/Makefile.am b/Makefile.am
index 48c3477..b3d37b2 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -18,7 +18,6 @@
 #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
-ACLOCAL_AMFLAGS = -I m4 ${ACLOCAL_FLAGS}
 
 SUBDIRS = man
 
diff --git a/autogen.sh b/autogen.sh
index 30d679f..904cd67 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -1,6 +1,12 @@
 #! /bin/sh
 
-test -n "$srcdir" || srcdir=`dirname "$0"`
-test -n "$srcdir" || srcdir=.
-autoreconf --force --install --verbose "$srcdir"
-test -n "$NOCONFIGURE" || "$srcdir/configure" "$@"
+srcdir=`dirname $0`
+test -z "$srcdir" && srcdir=.
+
+ORIGDIR=`pwd`
+cd $srcdir
+
+autoreconf -v --install || exit 1
+cd $ORIGDIR || exit $?
+
+$srcdir/configure --enable-maintainer-mode "$@"
diff --git a/configure.ac b/configure.ac
index 1e77faf..2c8bee9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -21,24 +21,18 @@
 # Process this file with autoconf to produce a configure script
 
 # Initialize Autoconf
-AC_PREREQ([2.63])
+AC_PREREQ([2.60])
 AC_INIT([xf86-video-intel],
         [2.17.0],
         [https://bugs.freedesktop.org/enter_bug.cgi?product=xorg],
         [xf86-video-intel])
 AC_CONFIG_SRCDIR([Makefile.am])
 AC_CONFIG_HEADERS([config.h])
-AC_CONFIG_AUX_DIR([build-aux])
-AC_CONFIG_MACRO_DIR([m4])
+AC_CONFIG_AUX_DIR(.)
 
 # Initialize Automake
-AM_INIT_AUTOMAKE([1.10 foreign dist-bzip2])
-AM_MAINTAINER_MODE([enable])
-
-# Support silent build rules, requires at least automake-1.11. Disable
-# by either passing --disable-silent-rules to configure or passing V=1
-# to make
-m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
+AM_INIT_AUTOMAKE([foreign dist-bzip2])
+AM_MAINTAINER_MODE
 
 # Require X.Org macros 1.8 or later for MAN_SUBSTS set by XORG_MANPAGE_SECTIONS
 m4_ifndef([XORG_MACROS_VERSION],
@@ -56,8 +50,8 @@ m4_ifndef([XORG_DRIVER_CHECK_EXT],
   depending on your distribution, try package 'xserver-xorg-dev' or 'xorg-x11-server-devel'])])
 
 # Initialize libtool
-LT_PREREQ([2.2])
-LT_INIT([disable-static])
+AC_DISABLE_STATIC
+AC_PROG_LIBTOOL
 
 # Are we in a git checkout?
 dot_git=no
diff --git a/m4/.gitignore b/m4/.gitignore
deleted file mode 100644
index 464ba5c..0000000
--- a/m4/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-libtool.m4
-lt~obsolete.m4
-ltoptions.m4
-ltsugar.m4
-ltversion.m4
commit a647aff5124674b638ef52152a5c16c27466eed7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 12:04:09 2012 +0000

    sna/gen3: Silence the compiler complaining with DBG enabled
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 6828a16..b50d067 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1732,7 +1732,7 @@ inline static int gen3_get_rectangles(struct sna *sna,
 	int rem;
 
 	DBG(("%s: want=%d, rem=%d\n",
-	     __FUNCTION__, want*op->floats_per_rect, rem));
+	     __FUNCTION__, want*op->floats_per_rect, vertex_space(sna)));
 
 	assert(sna->render.vertex_index * op->floats_per_vertex == sna->render.vertex_used);
 
@@ -1742,12 +1742,12 @@ start:
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen3_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (unlikely(rem == 0))
 			goto flush;
 	}
 
-	if (sna->render_state.gen3.vertex_offset == 0 &&
-	    !gen3_rectangle_begin(sna, op))
+	if (unlikely(sna->render_state.gen3.vertex_offset == 0 &&
+		     !gen3_rectangle_begin(sna, op)))
 		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
commit cd3a618f583a546bafbe0c171a267774adc4d72b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:40:57 2012 +0000

    sna/gen4 Refactor get_rectangles() to re-emit state after a flush
    
    Condense the work performed by each caller into the callee.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index cccdf4c..6ba59ee 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1116,19 +1116,12 @@ static bool gen4_rectangle_begin(struct sna *sna,
 	int id = op->u.gen4.ve_id;
 	int ndwords;
 
-	ndwords = 0;
+	/* 7xpipelined pointers + 6xprimitive + 1xflush */
+	ndwords = op->need_magic_ca_pass? 20 : 6;
 	if (FLUSH_EVERY_VERTEX)
 		ndwords += 1;
 	if ((sna->render_state.gen4.vb_id & (1 << id)) == 0)
 		ndwords += 5;
-	if (sna->render_state.gen4.vertex_offset == 0)
-		ndwords += 6;
-	if (ndwords == 0)
-		return true;
-
-	if (op->need_magic_ca_pass)
-		/* 7xpipelined pointers + 6xprimitive + 1xflush */
-		ndwords += 14;
 
 	if (!kgem_check_batch(&sna->kgem, ndwords))
 		return false;
@@ -1159,19 +1152,23 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 
 inline static int gen4_get_rectangles(struct sna *sna,
 				      const struct sna_composite_op *op,
-				      int want)
+				      int want,
+				      void (*emit_state)(struct sna *sna, const struct sna_composite_op *op))
 {
-	int rem = vertex_space(sna);
+	int rem;
 
+start:
+	rem = vertex_space(sna);
 	if (rem < 3*op->floats_per_vertex) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, 3*op->floats_per_vertex));
 		rem = gen4_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (unlikely(rem == 0))
 			goto flush;
 	}
 
-	if (!gen4_rectangle_begin(sna, op))
+	if (unlikely(sna->render_state.gen4.vertex_offset == 0 &&
+		     !gen4_rectangle_begin(sna, op)))
 		goto flush;
 
 	if (want > 1 && want * op->floats_per_vertex*3 > rem)
@@ -1186,7 +1183,8 @@ flush:
 		gen4_magic_ca_pass(sna, op);
 	}
 	_kgem_submit(&sna->kgem);
-	return 0;
+	emit_state(sna, op);
+	goto start;
 }
 
 static uint32_t *gen4_composite_get_binding_table(struct sna *sna,
@@ -1541,11 +1539,7 @@ gen4_render_composite_blt(struct sna *sna,
 			gen4_bind_surfaces(sna, op);
 	}
 
-	if (!gen4_get_rectangles(sna, op, 1)) {
-		gen4_bind_surfaces(sna, op);
-		gen4_get_rectangles(sna, op, 1);
-	}
-
+	gen4_get_rectangles(sna, op, 1, gen4_bind_surfaces);
 	op->prim_emit(sna, op, r);
 
 	/* XXX are the shaders fubar? */
@@ -1629,9 +1623,9 @@ static uint32_t gen4_bind_video_source(struct sna *sna,
 }
 
 static void gen4_video_bind_surfaces(struct sna *sna,
-				     const struct sna_composite_op *op,
-				     struct sna_video_frame *frame)
+				     const struct sna_composite_op *op)
 {
+	struct sna_video_frame *frame = op->priv;
 	uint32_t src_surf_format;
 	uint32_t src_surf_base[6];
 	int src_width[6];
@@ -1732,13 +1726,14 @@ gen4_render_video(struct sna *sna,
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.ve_id = 1;
+	tmp.priv = frame;
 
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
 	}
 
-	gen4_video_bind_surfaces(sna, &tmp, frame);
+	gen4_video_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
 
 	/* Set up the offset for translating from the given region (in screen
@@ -1769,10 +1764,7 @@ gen4_render_video(struct sna *sna,
 		r.y1 = box->y1 + pix_yoff;
 		r.y2 = box->y2 + pix_yoff;
 
-		if (!gen4_get_rectangles(sna, &tmp, 1)) {
-			gen4_video_bind_surfaces(sna, &tmp, frame);
-			gen4_get_rectangles(sna, &tmp, 1);
-		}
+		gen4_get_rectangles(sna, &tmp, 1, gen4_video_bind_surfaces);
 
 		OUT_VERTEX(r.x2, r.y2);
 		OUT_VERTEX_F((box->x2 - dxo) * src_scale_x);
@@ -2393,10 +2385,7 @@ gen4_render_copy_one(struct sna *sna,
 		     int w, int h,
 		     int dx, int dy)
 {
-	if (!gen4_get_rectangles(sna, op, 1)) {
-		gen4_copy_bind_surfaces(sna, op);
-		gen4_get_rectangles(sna, op, 1);
-	}
+	gen4_get_rectangles(sna, op, 1, gen4_copy_bind_surfaces);
 
 	OUT_VERTEX(dx+w, dy+h);
 	OUT_VERTEX_F((sx+w)*op->src.scale[0]);
@@ -2725,10 +2714,7 @@ gen4_render_fill_rectangle(struct sna *sna,
 			   const struct sna_composite_op *op,
 			   int x, int y, int w, int h)
 {
-	if (!gen4_get_rectangles(sna, op, 1)) {
-		gen4_fill_bind_surfaces(sna, op);
-		gen4_get_rectangles(sna, op, 1);
-	}
+	gen4_get_rectangles(sna, op, 1, gen4_fill_bind_surfaces);
 
 	OUT_VERTEX(x+w, y+h);
 	OUT_VERTEX_F(1);
commit 6a3fa4d1b6849b666a9232b017ce82329494621b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:40:57 2012 +0000

    sna/gen7 Refactor get_rectangles() to re-emit state after a flush
    
    Condense the work performed by each caller into the callee.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index d039a48..0d913f6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1664,20 +1664,23 @@ static int gen7_get_rectangles__flush(struct sna *sna,
 
 inline static int gen7_get_rectangles(struct sna *sna,
 				      const struct sna_composite_op *op,
-				      int want)
+				      int want,
+				      void (*emit_state)(struct sna *sna, const struct sna_composite_op *op))
 {
 	int rem = vertex_space(sna);
 
+start:
+	rem = vertex_space(sna);
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen7_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (unlikely(rem == 0))
 			goto flush;
 	}
 
-	if (sna->render_state.gen7.vertex_offset == 0 &&
-	    !gen7_rectangle_begin(sna, op))
+	if (unlikely(sna->render_state.gen7.vertex_offset == 0 &&
+		     !gen7_rectangle_begin(sna, op)))
 		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
@@ -1692,7 +1695,8 @@ flush:
 		gen7_magic_ca_pass(sna, op);
 	}
 	_kgem_submit(&sna->kgem);
-	return 0;
+	emit_state(sna, op);
+	goto start;
 }
 
 inline static uint32_t *gen7_composite_get_binding_table(struct sna *sna,
@@ -1801,11 +1805,7 @@ gen7_render_composite_blt(struct sna *sna,
 			  const struct sna_composite_op *op,
 			  const struct sna_composite_rectangles *r)
 {
-	if (unlikely(!gen7_get_rectangles(sna, op, 1))) {
-		gen7_emit_composite_state(sna, op);
-		gen7_get_rectangles(sna, op, 1);
-	}
-
+	gen7_get_rectangles(sna, op, 1, gen7_emit_composite_state);
 	op->prim_emit(sna, op, r);
 }
 
@@ -1816,10 +1816,7 @@ gen7_render_composite_box(struct sna *sna,
 {
 	struct sna_composite_rectangles r;
 
-	if (unlikely(!gen7_get_rectangles(sna, op, 1))) {
-		gen7_emit_composite_state(sna, op);
-		gen7_get_rectangles(sna, op, 1);
-	}
+	gen7_get_rectangles(sna, op, 1, gen7_emit_composite_state);
 
 	DBG(("  %s: (%d, %d), (%d, %d)\n",
 	     __FUNCTION__,
@@ -1842,12 +1839,12 @@ gen7_render_composite_boxes(struct sna *sna,
 	DBG(("composite_boxes(%d)\n", nbox));
 
 	do {
-		int nbox_this_time = gen7_get_rectangles(sna, op, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen7_emit_composite_state(sna, op);
-			nbox_this_time = gen7_get_rectangles(sna, op, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen7_get_rectangles(sna, op, nbox,
+						     gen7_emit_composite_state);
 		nbox -= nbox_this_time;
+
 		do {
 			struct sna_composite_rectangles r;
 
@@ -1934,9 +1931,9 @@ static uint32_t gen7_bind_video_source(struct sna *sna,
 }
 
 static void gen7_emit_video_state(struct sna *sna,
-				  struct sna_composite_op *op,
-				  struct sna_video_frame *frame)
+				  const struct sna_composite_op *op)
 {
+	struct sna_video_frame *frame = op->priv;
 	uint32_t src_surf_format;
 	uint32_t src_surf_base[6];
 	int src_width[6];
@@ -2055,6 +2052,7 @@ gen7_render_video(struct sna *sna,
 	}
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
+	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
@@ -2063,7 +2061,7 @@ gen7_render_video(struct sna *sna,
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	gen7_emit_video_state(sna, &tmp, frame);
+	gen7_emit_video_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
 	/* Set up the offset for translating from the given region (in screen
@@ -2094,10 +2092,7 @@ gen7_render_video(struct sna *sna,
 		r.y1 = box->y1 + pix_yoff;
 		r.y2 = box->y2 + pix_yoff;
 
-		if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-			gen7_emit_video_state(sna, &tmp, frame);
-			gen7_get_rectangles(sna, &tmp, 1);
-		}
+		gen7_get_rectangles(sna, &tmp, 1, gen7_emit_video_state);
 
 		OUT_VERTEX(r.x2, r.y2);
 		OUT_VERTEX_F((box->x2 - dxo) * src_scale_x);
@@ -2941,11 +2936,7 @@ gen7_render_composite_spans_box(struct sna *sna,
 	     box->x2 - box->x1,
 	     box->y2 - box->y1));
 
-	if (unlikely(gen7_get_rectangles(sna, &op->base, 1) == 0)) {
-		gen7_emit_composite_state(sna, &op->base);
-		gen7_get_rectangles(sna, &op->base, 1);
-	}
-
+	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_composite_state);
 	op->prim_emit(sna, op, box, opacity);
 }
 
@@ -2964,11 +2955,8 @@ gen7_render_composite_spans_boxes(struct sna *sna,
 	do {
 		int nbox_this_time;
 
-		nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen7_emit_composite_state(sna, &op->base);
-			nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
-		}
+		nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
+						     gen7_emit_composite_state);
 		nbox -= nbox_this_time;
 
 		do {
@@ -3356,11 +3344,10 @@ fallback_blt:
 
 	do {
 		float *v;
-		int n_this_time = gen7_get_rectangles(sna, &tmp, n);
-		if (unlikely(n_this_time == 0)) {
-			gen7_emit_copy_state(sna, &tmp);
-			n_this_time = gen7_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time;
+
+		n_this_time = gen7_get_rectangles(sna, &tmp, n,
+						  gen7_emit_copy_state);
 		n -= n_this_time;
 
 		v = sna->render.vertices + sna->render.vertex_used;
@@ -3410,10 +3397,7 @@ gen7_render_copy_blt(struct sna *sna,
 		     int16_t w,  int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		gen7_emit_copy_state(sna, &op->base);
-		gen7_get_rectangles(sna, &op->base, 1);
-	}
+	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_copy_state);
 
 	OUT_VERTEX(dx+w, dy+h);
 	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
@@ -3680,12 +3664,12 @@ gen7_render_fill_boxes(struct sna *sna,
 	gen7_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen7_get_rectangles(sna, &tmp, n);
-		if (unlikely(n_this_time == 0)) {
-			gen7_emit_fill_state(sna, &tmp);
-			n_this_time = gen7_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time;
+
+		n_this_time = gen7_get_rectangles(sna, &tmp, n,
+						  gen7_emit_fill_state);
 		n -= n_this_time;
+
 		do {
 			DBG(("	(%d, %d), (%d, %d)\n",
 			     box->x1, box->y1, box->x2, box->y2));
@@ -3717,10 +3701,7 @@ gen7_render_fill_op_blt(struct sna *sna,
 {
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
-	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		gen7_emit_fill_state(sna, &op->base);
-		gen7_get_rectangles(sna, &op->base, 1);
-	}
+	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
 
 	OUT_VERTEX(x+w, y+h);
 	OUT_VERTEX_F(1);
@@ -3743,10 +3724,7 @@ gen7_render_fill_op_box(struct sna *sna,
 	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
-	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		gen7_emit_fill_state(sna, &op->base);
-		gen7_get_rectangles(sna, &op->base, 1);
-	}
+	gen7_get_rectangles(sna, &op->base, 1, gen7_emit_fill_state);
 
 	OUT_VERTEX(box->x2, box->y2);
 	OUT_VERTEX_F(1);
@@ -3771,11 +3749,10 @@ gen7_render_fill_op_boxes(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2, nbox));
 
 	do {
-		int nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen7_emit_fill_state(sna, &op->base);
-			nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox,
+						     gen7_emit_fill_state);
 		nbox -= nbox_this_time;
 
 		do {
@@ -3967,10 +3944,7 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen7_emit_fill_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
-	if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-		gen7_emit_fill_state(sna, &tmp);
-		gen7_get_rectangles(sna, &tmp, 1);
-	}
+	gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
 	OUT_VERTEX(x2, y2);
@@ -4065,10 +4039,7 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen7_emit_fill_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
-	if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-		gen7_emit_fill_state(sna, &tmp);
-		gen7_get_rectangles(sna, &tmp, 1);
-	}
+	gen7_get_rectangles(sna, &tmp, 1, gen7_emit_fill_state);
 
 	OUT_VERTEX(dst->drawable.width, dst->drawable.height);
 	OUT_VERTEX_F(1);
commit fe914eaca4b9178ab3d14053a703772f93295895
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:40:57 2012 +0000

    sna/gen5 Refactor get_rectangles() to re-emit state after a flush
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 8d6a3e9..a80ce0a 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1130,13 +1130,9 @@ static bool gen5_rectangle_begin(struct sna *sna,
 
 	assert((unsigned)id <= 3);
 
-	ndwords = 0;
+	ndwords = op->need_magic_ca_pass ? 20 : 6;
 	if ((sna->render_state.gen5.vb_id & (1 << id)) == 0)
 		ndwords += 5;
-	if (sna->render_state.gen5.vertex_offset == 0)
-		ndwords += op->need_magic_ca_pass ? 20 : 6;
-	if (ndwords == 0)
-		return true;
 
 	if (!kgem_check_batch(&sna->kgem, ndwords))
 		return false;
@@ -1167,19 +1163,24 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 
 inline static int gen5_get_rectangles(struct sna *sna,
 				      const struct sna_composite_op *op,
-				      int want)
+				      int want,
+				      void (*emit_state)(struct sna *sna,
+							 const struct sna_composite_op *op))
 {
-	int rem = vertex_space(sna);
+	int rem;
 
+start:
+	rem = vertex_space(sna);
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen5_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (unlikely (rem == 0))
 			goto flush;
 	}
 
-	if (!gen5_rectangle_begin(sna, op))
+	if (unlikely(sna->render_state.gen5.vertex_offset == 0 &&
+		     !gen5_rectangle_begin(sna, op)))
 		goto flush;
 
 	if (want * op->floats_per_rect > rem)
@@ -1194,7 +1195,8 @@ flush:
 		gen5_magic_ca_pass(sna, op);
 	}
 	_kgem_submit(&sna->kgem);
-	return 0;
+	emit_state(sna, op);
+	goto start;
 }
 
 static uint32_t *
@@ -1562,11 +1564,7 @@ gen5_render_composite_blt(struct sna *sna,
 	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
 	     r->width, r->height));
 
-	if (!gen5_get_rectangles(sna, op, 1)) {
-		gen5_bind_surfaces(sna, op);
-		gen5_get_rectangles(sna, op, 1);
-	}
-
+	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
 	op->prim_emit(sna, op, r);
 }
 
@@ -1581,10 +1579,7 @@ gen5_render_composite_box(struct sna *sna,
 	     __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
-	if (!gen5_get_rectangles(sna, op, 1)) {
-		gen5_bind_surfaces(sna, op);
-		gen5_get_rectangles(sna, op, 1);
-	}
+	gen5_get_rectangles(sna, op, 1, gen5_bind_surfaces);
 
 	r.dst.x = box->x1;
 	r.dst.y = box->y1;
@@ -1608,11 +1603,10 @@ gen5_render_composite_boxes(struct sna *sna,
 	     op->mask.width, op->mask.height));
 
 	do {
-		int nbox_this_time = gen5_get_rectangles(sna, op, nbox);
-		if (nbox_this_time == 0) {
-			gen5_bind_surfaces(sna, op);
-			nbox_this_time = gen5_get_rectangles(sna, op, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen5_get_rectangles(sna, op, nbox,
+						     gen5_bind_surfaces);
 		nbox -= nbox_this_time;
 
 		do {
@@ -1669,9 +1663,9 @@ static uint32_t gen5_bind_video_source(struct sna *sna,
 }
 
 static void gen5_video_bind_surfaces(struct sna *sna,
-				     struct sna_composite_op *op,
-				     struct sna_video_frame *frame)
+				     const struct sna_composite_op *op)
 {
+	struct sna_video_frame *frame = op->priv;
 	uint32_t src_surf_format;
 	uint32_t src_surf_base[6];
 	int src_width[6];
@@ -1775,13 +1769,14 @@ gen5_render_video(struct sna *sna,
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
+	tmp.priv = frame;
 
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
 	}
 
-	gen5_video_bind_surfaces(sna, &tmp, frame);
+	gen5_video_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
 
 	/* Set up the offset for translating from the given region (in screen
@@ -1812,10 +1807,7 @@ gen5_render_video(struct sna *sna,
 		r.y1 = box->y1 + pix_yoff;
 		r.y2 = box->y2 + pix_yoff;
 
-		if (!gen5_get_rectangles(sna, &tmp, 1)) {
-			gen5_video_bind_surfaces(sna, &tmp, frame);
-			gen5_get_rectangles(sna, &tmp, 1);
-		}
+		gen5_get_rectangles(sna, &tmp, 1, gen5_video_bind_surfaces);
 
 		OUT_VERTEX(r.x2, r.y2);
 		OUT_VERTEX_F((box->x2 - dxo) * src_scale_x);
@@ -2549,11 +2541,7 @@ gen5_render_composite_spans_box(struct sna *sna,
 	     box->x2 - box->x1,
 	     box->y2 - box->y1));
 
-	if (gen5_get_rectangles(sna, &op->base, 1) == 0) {
-		gen5_bind_surfaces(sna, &op->base);
-		gen5_get_rectangles(sna, &op->base, 1);
-	}
-
+	gen5_get_rectangles(sna, &op->base, 1, gen5_bind_surfaces);
 	op->prim_emit(sna, op, box, opacity);
 }
 
@@ -2572,11 +2560,8 @@ gen5_render_composite_spans_boxes(struct sna *sna,
 	do {
 		int nbox_this_time;
 
-		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox);
-		if (nbox_this_time == 0) {
-			gen5_bind_surfaces(sna, &op->base);
-			nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox);
-		}
+		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
+						     gen5_bind_surfaces);
 		nbox -= nbox_this_time;
 
 		do {
@@ -2871,11 +2856,10 @@ fallback_blt:
 	gen5_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen5_get_rectangles(sna, &tmp, n);
-		if (n_this_time == 0) {
-			gen5_copy_bind_surfaces(sna, &tmp);
-			n_this_time = gen5_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time;
+
+		n_this_time = gen5_get_rectangles(sna, &tmp, n,
+						  gen5_copy_bind_surfaces);
 		n -= n_this_time;
 
 		do {
@@ -2926,10 +2910,7 @@ gen5_render_copy_blt(struct sna *sna,
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=(%d, %d)\n", __FUNCTION__,
 	     sx, sy, dx, dy, w, h));
 
-	if (!gen5_get_rectangles(sna, &op->base, 1)) {
-		gen5_copy_bind_surfaces(sna, &op->base);
-		gen5_get_rectangles(sna, &op->base, 1);
-	}
+	gen5_get_rectangles(sna, &op->base, 1, gen5_copy_bind_surfaces);
 
 	OUT_VERTEX(dx+w, dy+h);
 	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
@@ -3173,12 +3154,12 @@ gen5_render_fill_boxes(struct sna *sna,
 	gen5_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen5_get_rectangles(sna, &tmp, n);
-		if (n_this_time == 0) {
-			gen5_fill_bind_surfaces(sna, &tmp);
-			n_this_time = gen5_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time;
+
+		n_this_time = gen5_get_rectangles(sna, &tmp, n,
+						  gen5_fill_bind_surfaces);
 		n -= n_this_time;
+
 		do {
 			DBG(("	(%d, %d), (%d, %d)\n",
 			     box->x1, box->y1, box->x2, box->y2));
@@ -3210,10 +3191,7 @@ gen5_render_fill_op_blt(struct sna *sna,
 {
 	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x,y,w,h));
 
-	if (!gen5_get_rectangles(sna, &op->base, 1)) {
-		gen5_fill_bind_surfaces(sna, &op->base);
-		gen5_get_rectangles(sna, &op->base, 1);
-	}
+	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
 
 	OUT_VERTEX(x+w, y+h);
 	OUT_VERTEX_F(1);
@@ -3236,10 +3214,7 @@ gen5_render_fill_op_box(struct sna *sna,
 	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
-	if (!gen5_get_rectangles(sna, &op->base, 1)) {
-		gen5_fill_bind_surfaces(sna, &op->base);
-		gen5_get_rectangles(sna, &op->base, 1);
-	}
+	gen5_get_rectangles(sna, &op->base, 1, gen5_fill_bind_surfaces);
 
 	OUT_VERTEX(box->x2, box->y2);
 	OUT_VERTEX_F(1);
@@ -3264,11 +3239,10 @@ gen5_render_fill_op_boxes(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2, nbox));
 
 	do {
-		int nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox);
-		if (nbox_this_time == 0) {
-			gen5_fill_bind_surfaces(sna, &op->base);
-			nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen5_get_rectangles(sna, &op->base, nbox,
+						     gen5_fill_bind_surfaces);
 		nbox -= nbox_this_time;
 
 		do {
@@ -3452,10 +3426,7 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen5_fill_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
 
-	if (!gen5_get_rectangles(sna, &tmp, 1)) {
-		gen5_fill_bind_surfaces(sna, &tmp);
-		gen5_get_rectangles(sna, &tmp, 1);
-	}
+	gen5_get_rectangles(sna, &tmp, 1, gen5_fill_bind_surfaces);
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
 	OUT_VERTEX(x2, y2);
commit 4ecf882c838c1f044b4ed3add486b6579ea2c431
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:40:57 2012 +0000

    sna/gen6: Refactor get_rectangles() to re-emit state after a flush
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index e975328..b69b3a2 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1630,20 +1630,23 @@ static int gen6_get_rectangles__flush(struct sna *sna,
 
 inline static int gen6_get_rectangles(struct sna *sna,
 				      const struct sna_composite_op *op,
-				      int want)
+				      int want,
+				      void (*emit_state)(struct sna *, const struct sna_composite_op *op))
 {
-	int rem = vertex_space(sna);
+	int rem;
 
+start:
+	rem = vertex_space(sna);
 	if (rem < op->floats_per_rect) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen6_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (unlikely(rem == 0))
 			goto flush;
 	}
 
-	if (sna->render_state.gen6.vertex_offset == 0 &&
-	    !gen6_rectangle_begin(sna, op))
+	if (unlikely(sna->render_state.gen6.vertex_offset == 0 &&
+		     !gen6_rectangle_begin(sna, op)))
 		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
@@ -1659,7 +1662,8 @@ flush:
 		gen6_magic_ca_pass(sna, op);
 	}
 	_kgem_submit(&sna->kgem);
-	return 0;
+	emit_state(sna, op);
+	goto start;
 }
 
 inline static uint32_t *gen6_composite_get_binding_table(struct sna *sna,
@@ -1772,11 +1776,7 @@ gen6_render_composite_blt(struct sna *sna,
 			  const struct sna_composite_op *op,
 			  const struct sna_composite_rectangles *r)
 {
-	if (unlikely(!gen6_get_rectangles(sna, op, 1))) {
-		gen6_emit_composite_state(sna, op);
-		gen6_get_rectangles(sna, op, 1);
-	}
-
+	gen6_get_rectangles(sna, op, 1, gen6_emit_composite_state);
 	op->prim_emit(sna, op, r);
 }
 
@@ -1787,10 +1787,7 @@ gen6_render_composite_box(struct sna *sna,
 {
 	struct sna_composite_rectangles r;
 
-	if (unlikely(!gen6_get_rectangles(sna, op, 1))) {
-		gen6_emit_composite_state(sna, op);
-		gen6_get_rectangles(sna, op, 1);
-	}
+	gen6_get_rectangles(sna, op, 1, gen6_emit_composite_state);
 
 	DBG(("  %s: (%d, %d), (%d, %d)\n",
 	     __FUNCTION__,
@@ -1813,13 +1810,12 @@ gen6_render_composite_boxes(struct sna *sna,
 	DBG(("composite_boxes(%d)\n", nbox));
 
 	do {
-		int nbox_this_time = gen6_get_rectangles(sna, op, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen6_emit_composite_state(sna, op);
-			nbox_this_time = gen6_get_rectangles(sna, op, nbox);
-			assert(nbox_this_time);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen6_get_rectangles(sna, op, nbox,
+						     gen6_emit_composite_state);
 		nbox -= nbox_this_time;
+
 		do {
 			struct sna_composite_rectangles r;
 
@@ -1906,9 +1902,9 @@ static uint32_t gen6_bind_video_source(struct sna *sna,
 }
 
 static void gen6_emit_video_state(struct sna *sna,
-				  struct sna_composite_op *op,
-				  struct sna_video_frame *frame)
+				  const struct sna_composite_op *op)
 {
+	struct sna_video_frame *frame = op->priv;
 	uint32_t src_surf_format;
 	uint32_t src_surf_base[6];
 	int src_width[6];
@@ -2029,6 +2025,7 @@ gen6_render_video(struct sna *sna,
 	}
 	tmp.u.gen6.nr_inputs = 1;
 	tmp.u.gen6.ve_id = 1;
+	tmp.priv = frame;
 
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
@@ -2037,7 +2034,7 @@ gen6_render_video(struct sna *sna,
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
-	gen6_emit_video_state(sna, &tmp, frame);
+	gen6_emit_video_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
 	/* Set up the offset for translating from the given region (in screen
@@ -2068,10 +2065,7 @@ gen6_render_video(struct sna *sna,
 		r.y1 = box->y1 + pix_yoff;
 		r.y2 = box->y2 + pix_yoff;
 
-		if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-			gen6_emit_video_state(sna, &tmp, frame);
-			gen6_get_rectangles(sna, &tmp, 1);
-		}
+		gen6_get_rectangles(sna, &tmp, 1, gen6_emit_video_state);
 
 		OUT_VERTEX(r.x2, r.y2);
 		OUT_VERTEX_F((box->x2 - dxo) * src_scale_x);
@@ -2949,11 +2943,7 @@ gen6_render_composite_spans_box(struct sna *sna,
 	     box->x2 - box->x1,
 	     box->y2 - box->y1));
 
-	if (unlikely(gen6_get_rectangles(sna, &op->base, 1) == 0)) {
-		gen6_emit_composite_state(sna, &op->base);
-		gen6_get_rectangles(sna, &op->base, 1);
-	}
-
+	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_composite_state);
 	op->prim_emit(sna, op, box, opacity);
 }
 
@@ -2972,12 +2962,8 @@ gen6_render_composite_spans_boxes(struct sna *sna,
 	do {
 		int nbox_this_time;
 
-		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen6_emit_composite_state(sna, &op->base);
-			nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
-			assert(nbox_this_time);
-		}
+		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox,
+						     gen6_emit_composite_state);
 		nbox -= nbox_this_time;
 
 		do {
@@ -3371,11 +3357,8 @@ fallback_blt:
 
 	do {
 		float *v;
-		int n_this_time = gen6_get_rectangles(sna, &tmp, n);
-		if (unlikely(n_this_time == 0)) {
-			gen6_emit_copy_state(sna, &tmp);
-			n_this_time = gen6_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time = gen6_get_rectangles(sna, &tmp, n,
+						      gen6_emit_copy_state);
 		n -= n_this_time;
 
 		v = sna->render.vertices + sna->render.vertex_used;
@@ -3425,10 +3408,7 @@ gen6_render_copy_blt(struct sna *sna,
 		     int16_t w,  int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		gen6_emit_copy_state(sna, &op->base);
-		gen6_get_rectangles(sna, &op->base, 1);
-	}
+	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_copy_state);
 
 	OUT_VERTEX(dx+w, dy+h);
 	OUT_VERTEX_F((sx+w)*op->base.src.scale[0]);
@@ -3698,11 +3678,8 @@ gen6_render_fill_boxes(struct sna *sna,
 	gen6_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen6_get_rectangles(sna, &tmp, n);
-		if (unlikely(n_this_time == 0)) {
-			gen6_emit_fill_state(sna, &tmp);
-			n_this_time = gen6_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time = gen6_get_rectangles(sna, &tmp, n,
+						      gen6_emit_fill_state);
 		n -= n_this_time;
 		do {
 			DBG(("	(%d, %d), (%d, %d)\n",
@@ -3735,10 +3712,7 @@ gen6_render_op_fill_blt(struct sna *sna,
 {
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
-	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		gen6_emit_fill_state(sna, &op->base);
-		gen6_get_rectangles(sna, &op->base, 1);
-	}
+	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_fill_state);
 
 	OUT_VERTEX(x+w, y+h);
 	OUT_VERTEX_F(1);
@@ -3761,10 +3735,7 @@ gen6_render_op_fill_box(struct sna *sna,
 	DBG(("%s: (%d, %d),(%d, %d)\n", __FUNCTION__,
 	     box->x1, box->y1, box->x2, box->y2));
 
-	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		gen6_emit_fill_state(sna, &op->base);
-		gen6_get_rectangles(sna, &op->base, 1);
-	}
+	gen6_get_rectangles(sna, &op->base, 1, gen6_emit_fill_state);
 
 	OUT_VERTEX(box->x2, box->y2);
 	OUT_VERTEX_F(1);
@@ -3789,11 +3760,10 @@ gen6_render_op_fill_boxes(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2, nbox));
 
 	do {
-		int nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
-		if (unlikely(nbox_this_time == 0)) {
-			gen6_emit_fill_state(sna, &op->base);
-			nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox,
+						     gen6_emit_fill_state);
 		nbox -= nbox_this_time;
 
 		do {
@@ -3987,10 +3957,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen6_emit_fill_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
-	if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-		gen6_emit_fill_state(sna, &tmp);
-		gen6_get_rectangles(sna, &tmp, 1);
-	}
+	gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
 
 	DBG(("	(%d, %d), (%d, %d)\n", x1, y1, x2, y2));
 	OUT_VERTEX(x2, y2);
@@ -4085,10 +4052,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen6_emit_fill_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
-	if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-		gen6_emit_fill_state(sna, &tmp);
-		gen6_get_rectangles(sna, &tmp, 1);
-	}
+	gen6_get_rectangles(sna, &tmp, 1, gen6_emit_fill_state);
 
 	OUT_VERTEX(dst->drawable.width, dst->drawable.height);
 	OUT_VERTEX_F(1);
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 7243042..71a6fc5 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -134,9 +134,9 @@ struct sna_composite_op {
 			int nr_inputs;
 			int ve_id;
 		} gen7;
-
-		void *priv;
 	} u;
+
+	void *priv;
 };
 
 struct sna_composite_spans_op {
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 17ecaea..493e313 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -89,7 +89,7 @@ sna_tiling_composite_blt(struct sna *sna,
 			 const struct sna_composite_op *op,
 			 const struct sna_composite_rectangles *r)
 {
-	sna_tiling_composite_add_rect(op->u.priv, r);
+	sna_tiling_composite_add_rect(op->priv, r);
 	(void)sna;
 }
 
@@ -107,7 +107,7 @@ sna_tiling_composite_box(struct sna *sna,
 	r.width  = box->x2 - box->x1;
 	r.height = box->y2 - box->y1;
 
-	sna_tiling_composite_add_rect(op->u.priv, &r);
+	sna_tiling_composite_add_rect(op->priv, &r);
 	(void)sna;
 }
 
@@ -126,7 +126,7 @@ sna_tiling_composite_boxes(struct sna *sna,
 		r.width  = box->x2 - box->x1;
 		r.height = box->y2 - box->y1;
 
-		sna_tiling_composite_add_rect(op->u.priv, &r);
+		sna_tiling_composite_add_rect(op->priv, &r);
 		box++;
 	}
 	(void)sna;
@@ -136,7 +136,7 @@ static void
 sna_tiling_composite_done(struct sna *sna,
 			  const struct sna_composite_op *op)
 {
-	struct sna_tile_state *tile = op->u.priv;
+	struct sna_tile_state *tile = op->priv;
 	struct sna_composite_op tmp;
 	int x, y, n, step;
 
@@ -312,7 +312,7 @@ sna_tiling_composite(uint32_t op,
 	tmp->boxes = sna_tiling_composite_boxes;
 	tmp->done  = sna_tiling_composite_done;
 
-	tmp->u.priv = tile;
+	tmp->priv = tile;
 	return TRUE;
 }
 
commit dfa21713c27eb9135cb5e1745807324e49422fde
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:29:38 2012 +0000

    sna/gen3: Refactor get_rectangles() to emit composite state and retry
    
    As gen3 only uses the single state emission block, and uniformly calls
    get_rectangles(), we can move that caller protocol into the callee.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index cb85cb9..6828a16 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1729,12 +1729,15 @@ inline static int gen3_get_rectangles(struct sna *sna,
 				      const struct sna_composite_op *op,
 				      int want)
 {
-	int rem = vertex_space(sna);
+	int rem;
 
 	DBG(("%s: want=%d, rem=%d\n",
 	     __FUNCTION__, want*op->floats_per_rect, rem));
 
 	assert(sna->render.vertex_index * op->floats_per_vertex == sna->render.vertex_used);
+
+start:
+	rem = vertex_space(sna);
 	if (op->floats_per_rect > rem) {
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
@@ -1762,7 +1765,8 @@ flush:
 		gen3_magic_ca_pass(sna, op);
 	}
 	_kgem_submit(&sna->kgem);
-	return 0;
+	gen3_emit_composite_state(sna, op);
+	goto start;
 }
 
 fastcall static void
@@ -1776,10 +1780,7 @@ gen3_render_composite_blt(struct sna *sna,
 	     r->dst.x, r->dst.y, op->dst.x, op->dst.y,
 	     r->width, r->height));
 
-	if (!gen3_get_rectangles(sna, op, 1)) {
-		gen3_emit_composite_state(sna, op);
-		gen3_get_rectangles(sna, op, 1);
-	}
+	gen3_get_rectangles(sna, op, 1);
 
 	op->prim_emit(sna, op, r);
 }
@@ -1797,10 +1798,7 @@ gen3_render_composite_box(struct sna *sna,
 	     op->mask.offset[0], op->mask.offset[1],
 	     op->dst.x, op->dst.y));
 
-	if (!gen3_get_rectangles(sna, op, 1)) {
-		gen3_emit_composite_state(sna, op);
-		gen3_get_rectangles(sna, op, 1);
-	}
+	gen3_get_rectangles(sna, op, 1);
 
 	r.dst.x  = box->x1;
 	r.dst.y  = box->y1;
@@ -1826,10 +1824,6 @@ gen3_render_composite_boxes(struct sna *sna,
 		int nbox_this_time;
 
 		nbox_this_time = gen3_get_rectangles(sna, op, nbox);
-		if (nbox_this_time == 0) {
-			gen3_emit_composite_state(sna, op);
-			nbox_this_time = gen3_get_rectangles(sna, op, nbox);
-		}
 		nbox -= nbox_this_time;
 
 		do {
@@ -3110,11 +3104,7 @@ gen3_render_composite_spans_box(struct sna *sna,
 	     box->x2 - box->x1,
 	     box->y2 - box->y1));
 
-	if (!gen3_get_rectangles(sna, &op->base, 1)) {
-		gen3_emit_composite_state(sna, &op->base);
-		gen3_get_rectangles(sna, &op->base, 1);
-	}
-
+	gen3_get_rectangles(sna, &op->base, 1);
 	op->prim_emit(sna, op, box, opacity);
 }
 
@@ -3134,10 +3124,6 @@ gen3_render_composite_spans_boxes(struct sna *sna,
 		int nbox_this_time;
 
 		nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
-		if (nbox_this_time == 0) {
-			gen3_emit_composite_state(sna, &op->base);
-			nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
-		}
 		nbox -= nbox_this_time;
 
 		do {
@@ -3918,10 +3904,6 @@ fallback_blt:
 		int n_this_time;
 
 		n_this_time = gen3_get_rectangles(sna, &tmp, n);
-		if (n_this_time == 0) {
-			gen3_emit_composite_state(sna, &tmp);
-			n_this_time = gen3_get_rectangles(sna, &tmp, n);
-		}
 		n -= n_this_time;
 
 		do {
@@ -3966,10 +3948,7 @@ gen3_render_copy_blt(struct sna *sna,
 		     int16_t w, int16_t h,
 		     int16_t dx, int16_t dy)
 {
-	if (!gen3_get_rectangles(sna, &op->base, 1)) {
-		gen3_emit_composite_state(sna, &op->base);
-		gen3_get_rectangles(sna, &op->base, 1);
-	}
+	gen3_get_rectangles(sna, &op->base, 1);
 
 	OUT_VERTEX(dx+w);
 	OUT_VERTEX(dy+h);
@@ -4215,11 +4194,9 @@ gen3_render_fill_boxes(struct sna *sna,
 	gen3_align_vertex(sna, &tmp);
 
 	do {
-		int n_this_time = gen3_get_rectangles(sna, &tmp, n);
-		if (n_this_time == 0) {
-			gen3_emit_composite_state(sna, &tmp);
-			n_this_time = gen3_get_rectangles(sna, &tmp, n);
-		}
+		int n_this_time;
+
+		n_this_time = gen3_get_rectangles(sna, &tmp, n);
 		n -= n_this_time;
 
 		do {
@@ -4244,10 +4221,7 @@ gen3_render_fill_op_blt(struct sna *sna,
 			const struct sna_fill_op *op,
 			int16_t x, int16_t y, int16_t w, int16_t h)
 {
-	if (!gen3_get_rectangles(sna, &op->base, 1)) {
-		gen3_emit_composite_state(sna, &op->base);
-		gen3_get_rectangles(sna, &op->base, 1);
-	}
+	gen3_get_rectangles(sna, &op->base, 1);
 
 	OUT_VERTEX(x+w);
 	OUT_VERTEX(y+h);
@@ -4262,10 +4236,7 @@ gen3_render_fill_op_box(struct sna *sna,
 			const struct sna_fill_op *op,
 			const BoxRec *box)
 {
-	if (!gen3_get_rectangles(sna, &op->base, 1)) {
-		gen3_emit_composite_state(sna, &op->base);
-		gen3_get_rectangles(sna, &op->base, 1);
-	}
+	gen3_get_rectangles(sna, &op->base, 1);
 
 	OUT_VERTEX(box->x2);
 	OUT_VERTEX(box->y2);
@@ -4285,11 +4256,9 @@ gen3_render_fill_op_boxes(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2, nbox));
 
 	do {
-		int nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
-		if (nbox_this_time == 0) {
-			gen3_emit_composite_state(sna, &op->base);
-			nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
-		}
+		int nbox_this_time;
+
+		nbox_this_time = gen3_get_rectangles(sna, &op->base, nbox);
 		nbox -= nbox_this_time;
 
 		do {
commit a48e6e0db970ad07cd7452ba24c362d0c2fcf1bf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 10:17:34 2012 +0000

    sna/gen3+: Force a batch flush when run out of CA vbo
    
    As we prematurely end the batch if we bail on extending the vbo for CA
    glyphs, we need to force the flush.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 5253a8c..cb85cb9 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1761,6 +1761,7 @@ flush:
 		gen3_vertex_flush(sna);
 		gen3_magic_ca_pass(sna, op);
 	}
+	_kgem_submit(&sna->kgem);
 	return 0;
 }
 
@@ -3109,7 +3110,7 @@ gen3_render_composite_spans_box(struct sna *sna,
 	     box->x2 - box->x1,
 	     box->y2 - box->y1));
 
-	if (gen3_get_rectangles(sna, &op->base, 1) == 0) {
+	if (!gen3_get_rectangles(sna, &op->base, 1)) {
 		gen3_emit_composite_state(sna, &op->base);
 		gen3_get_rectangles(sna, &op->base, 1);
 	}
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 4bc2938..cccdf4c 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -1185,6 +1185,7 @@ flush:
 		gen4_vertex_flush(sna);
 		gen4_magic_ca_pass(sna, op);
 	}
+	_kgem_submit(&sna->kgem);
 	return 0;
 }
 
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index ac55b09..8d6a3e9 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1193,6 +1193,7 @@ flush:
 		gen5_vertex_flush(sna);
 		gen5_magic_ca_pass(sna, op);
 	}
+	_kgem_submit(&sna->kgem);
 	return 0;
 }
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 035da78..e975328 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1658,6 +1658,7 @@ flush:
 		gen6_vertex_flush(sna);
 		gen6_magic_ca_pass(sna, op);
 	}
+	_kgem_submit(&sna->kgem);
 	return 0;
 }
 
@@ -1772,7 +1773,6 @@ gen6_render_composite_blt(struct sna *sna,
 			  const struct sna_composite_rectangles *r)
 {
 	if (unlikely(!gen6_get_rectangles(sna, op, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_composite_state(sna, op);
 		gen6_get_rectangles(sna, op, 1);
 	}
@@ -1788,7 +1788,6 @@ gen6_render_composite_box(struct sna *sna,
 	struct sna_composite_rectangles r;
 
 	if (unlikely(!gen6_get_rectangles(sna, op, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_composite_state(sna, op);
 		gen6_get_rectangles(sna, op, 1);
 	}
@@ -1816,7 +1815,6 @@ gen6_render_composite_boxes(struct sna *sna,
 	do {
 		int nbox_this_time = gen6_get_rectangles(sna, op, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_composite_state(sna, op);
 			nbox_this_time = gen6_get_rectangles(sna, op, nbox);
 			assert(nbox_this_time);
@@ -2071,7 +2069,6 @@ gen6_render_video(struct sna *sna,
 		r.y2 = box->y2 + pix_yoff;
 
 		if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_video_state(sna, &tmp, frame);
 			gen6_get_rectangles(sna, &tmp, 1);
 		}
@@ -2953,7 +2950,6 @@ gen6_render_composite_spans_box(struct sna *sna,
 	     box->y2 - box->y1));
 
 	if (unlikely(gen6_get_rectangles(sna, &op->base, 1) == 0)) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_composite_state(sna, &op->base);
 		gen6_get_rectangles(sna, &op->base, 1);
 	}
@@ -2978,7 +2974,6 @@ gen6_render_composite_spans_boxes(struct sna *sna,
 
 		nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_composite_state(sna, &op->base);
 			nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
 			assert(nbox_this_time);
@@ -3378,7 +3373,6 @@ fallback_blt:
 		float *v;
 		int n_this_time = gen6_get_rectangles(sna, &tmp, n);
 		if (unlikely(n_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_copy_state(sna, &tmp);
 			n_this_time = gen6_get_rectangles(sna, &tmp, n);
 		}
@@ -3432,7 +3426,6 @@ gen6_render_copy_blt(struct sna *sna,
 		     int16_t dx, int16_t dy)
 {
 	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_copy_state(sna, &op->base);
 		gen6_get_rectangles(sna, &op->base, 1);
 	}
@@ -3707,7 +3700,6 @@ gen6_render_fill_boxes(struct sna *sna,
 	do {
 		int n_this_time = gen6_get_rectangles(sna, &tmp, n);
 		if (unlikely(n_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_fill_state(sna, &tmp);
 			n_this_time = gen6_get_rectangles(sna, &tmp, n);
 		}
@@ -3744,7 +3736,6 @@ gen6_render_op_fill_blt(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_fill_state(sna, &op->base);
 		gen6_get_rectangles(sna, &op->base, 1);
 	}
@@ -3771,7 +3762,6 @@ gen6_render_op_fill_box(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	if (unlikely(!gen6_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_fill_state(sna, &op->base);
 		gen6_get_rectangles(sna, &op->base, 1);
 	}
@@ -3801,7 +3791,6 @@ gen6_render_op_fill_boxes(struct sna *sna,
 	do {
 		int nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen6_emit_fill_state(sna, &op->base);
 			nbox_this_time = gen6_get_rectangles(sna, &op->base, nbox);
 		}
@@ -3999,7 +3988,6 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen6_align_vertex(sna, &tmp);
 
 	if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_fill_state(sna, &tmp);
 		gen6_get_rectangles(sna, &tmp, 1);
 	}
@@ -4098,7 +4086,6 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen6_align_vertex(sna, &tmp);
 
 	if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen6_emit_fill_state(sna, &tmp);
 		gen6_get_rectangles(sna, &tmp, 1);
 	}
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index c872c63..d039a48 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1691,6 +1691,7 @@ flush:
 		gen7_vertex_flush(sna);
 		gen7_magic_ca_pass(sna, op);
 	}
+	_kgem_submit(&sna->kgem);
 	return 0;
 }
 
@@ -1801,7 +1802,6 @@ gen7_render_composite_blt(struct sna *sna,
 			  const struct sna_composite_rectangles *r)
 {
 	if (unlikely(!gen7_get_rectangles(sna, op, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_composite_state(sna, op);
 		gen7_get_rectangles(sna, op, 1);
 	}
@@ -1817,7 +1817,6 @@ gen7_render_composite_box(struct sna *sna,
 	struct sna_composite_rectangles r;
 
 	if (unlikely(!gen7_get_rectangles(sna, op, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_composite_state(sna, op);
 		gen7_get_rectangles(sna, op, 1);
 	}
@@ -1845,7 +1844,6 @@ gen7_render_composite_boxes(struct sna *sna,
 	do {
 		int nbox_this_time = gen7_get_rectangles(sna, op, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_composite_state(sna, op);
 			nbox_this_time = gen7_get_rectangles(sna, op, nbox);
 		}
@@ -2097,7 +2095,6 @@ gen7_render_video(struct sna *sna,
 		r.y2 = box->y2 + pix_yoff;
 
 		if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_video_state(sna, &tmp, frame);
 			gen7_get_rectangles(sna, &tmp, 1);
 		}
@@ -2945,7 +2942,6 @@ gen7_render_composite_spans_box(struct sna *sna,
 	     box->y2 - box->y1));
 
 	if (unlikely(gen7_get_rectangles(sna, &op->base, 1) == 0)) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_composite_state(sna, &op->base);
 		gen7_get_rectangles(sna, &op->base, 1);
 	}
@@ -2970,7 +2966,6 @@ gen7_render_composite_spans_boxes(struct sna *sna,
 
 		nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_composite_state(sna, &op->base);
 			nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
 		}
@@ -3363,7 +3358,6 @@ fallback_blt:
 		float *v;
 		int n_this_time = gen7_get_rectangles(sna, &tmp, n);
 		if (unlikely(n_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_copy_state(sna, &tmp);
 			n_this_time = gen7_get_rectangles(sna, &tmp, n);
 		}
@@ -3417,7 +3411,6 @@ gen7_render_copy_blt(struct sna *sna,
 		     int16_t dx, int16_t dy)
 {
 	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_copy_state(sna, &op->base);
 		gen7_get_rectangles(sna, &op->base, 1);
 	}
@@ -3689,7 +3682,6 @@ gen7_render_fill_boxes(struct sna *sna,
 	do {
 		int n_this_time = gen7_get_rectangles(sna, &tmp, n);
 		if (unlikely(n_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_fill_state(sna, &tmp);
 			n_this_time = gen7_get_rectangles(sna, &tmp, n);
 		}
@@ -3726,7 +3718,6 @@ gen7_render_fill_op_blt(struct sna *sna,
 	DBG(("%s: (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
 
 	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_fill_state(sna, &op->base);
 		gen7_get_rectangles(sna, &op->base, 1);
 	}
@@ -3753,7 +3744,6 @@ gen7_render_fill_op_box(struct sna *sna,
 	     box->x1, box->y1, box->x2, box->y2));
 
 	if (unlikely(!gen7_get_rectangles(sna, &op->base, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_fill_state(sna, &op->base);
 		gen7_get_rectangles(sna, &op->base, 1);
 	}
@@ -3783,7 +3773,6 @@ gen7_render_fill_op_boxes(struct sna *sna,
 	do {
 		int nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
 		if (unlikely(nbox_this_time == 0)) {
-			_kgem_submit(&sna->kgem);
 			gen7_emit_fill_state(sna, &op->base);
 			nbox_this_time = gen7_get_rectangles(sna, &op->base, nbox);
 		}
@@ -3979,7 +3968,6 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	gen7_align_vertex(sna, &tmp);
 
 	if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_fill_state(sna, &tmp);
 		gen7_get_rectangles(sna, &tmp, 1);
 	}
@@ -4078,7 +4066,6 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	gen7_align_vertex(sna, &tmp);
 
 	if (unlikely(!gen7_get_rectangles(sna, &tmp, 1))) {
-		_kgem_submit(&sna->kgem);
 		gen7_emit_fill_state(sna, &tmp);
 		gen7_get_rectangles(sna, &tmp, 1);
 	}
commit 57c19b10db2b512c52a593fad98b5ac5db4f1497
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 23 00:33:16 2012 +0000

    sna: Use a CPU mapping if the bo is already in the CPU domain
    
    The heuristic of using the mapping only before the first use in an
    execbuffer was suboptimal and broken by the change in bo initialisation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5ded904..007dc04 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2893,7 +2893,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 	assert(list_is_empty(&bo->list));
 
 	if (bo->tiling == I915_TILING_NONE &&
-	    (kgem->has_llc || bo->domain == bo->presumed_offset)) {
+	    (kgem->has_llc || bo->domain == DOMAIN_CPU)) {
 		DBG(("%s: converting request for GTT map into CPU map\n",
 		     __FUNCTION__));
 		ptr = kgem_bo_map__cpu(kgem, bo);
commit 510767e213c2f44563f5c438ad1234113567be90
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 18:33:09 2012 +0000

    sna/gen4: Fix vertex flushing across batch flushing
    
    Due to the w/a for its buggy shaders, gen4 is significantly different
    that backporting the simple patch from gen5 was prone to failure. We
    need to check that the vertices have not already been flushed prior to
    flushing again.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index f6a47a0..4bc2938 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -351,8 +351,7 @@ static void gen4_magic_ca_pass(struct sna *sna,
 
 static void gen4_vertex_flush(struct sna *sna)
 {
-	if (sna->render_state.gen4.vertex_offset == 0)
-		return;
+	assert(sna->render_state.gen4.vertex_offset);
 
 	DBG(("%s[%x] = %d\n", __FUNCTION__,
 	     4*sna->render_state.gen4.vertex_offset,
@@ -1182,8 +1181,10 @@ inline static int gen4_get_rectangles(struct sna *sna,
 	return want;
 
 flush:
-	gen4_vertex_flush(sna);
-	gen4_magic_ca_pass(sna, op);
+	if (sna->render_state.gen4.vertex_offset) {
+		gen4_vertex_flush(sna);
+		gen4_magic_ca_pass(sna, op);
+	}
 	return 0;
 }
 
@@ -1202,14 +1203,6 @@ static uint32_t *gen4_composite_get_binding_table(struct sna *sna,
 }
 
 static void
-gen4_emit_sip(struct sna *sna)
-{
-	/* Set system instruction pointer */
-	OUT_BATCH(GEN4_STATE_SIP | 0);
-	OUT_BATCH(0);
-}
-
-static void
 gen4_emit_urb(struct sna *sna)
 {
 	int urb_vs_start, urb_vs_size;
@@ -1282,7 +1275,6 @@ gen4_emit_invariant(struct sna *sna)
 	else
 		OUT_BATCH(GEN4_PIPELINE_SELECT | PIPELINE_SELECT_3D);
 
-	gen4_emit_sip(sna);
 	gen4_emit_state_base_address(sna);
 
 	sna->render_state.gen4.needs_invariant = FALSE;
@@ -1803,7 +1795,8 @@ gen4_render_video(struct sna *sna,
 	}
 	priv->clear = false;
 
-	gen4_vertex_flush(sna);
+	if (sna->render_state.gen4.vertex_offset)
+		gen4_vertex_flush(sna);
 	return TRUE;
 }
 
@@ -1920,8 +1913,10 @@ gen4_render_composite_done(struct sna *sna,
 {
 	DBG(("%s()\n", __FUNCTION__));
 
-	gen4_vertex_flush(sna);
-	gen4_magic_ca_pass(sna, op);
+	if (sna->render_state.gen4.vertex_offset) {
+		gen4_vertex_flush(sna);
+		gen4_magic_ca_pass(sna, op);
+	}
 
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
@@ -2240,7 +2235,7 @@ gen4_render_composite(struct sna *sna,
 	if (too_large(tmp->dst.width, tmp->dst.height) &&
 	    !sna_render_composite_redirect(sna, tmp,
 					   dst_x, dst_y, width, height))
-			return FALSE;
+		return FALSE;
 
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
@@ -2600,7 +2595,8 @@ gen4_render_copy_blt(struct sna *sna,
 static void
 gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen4_vertex_flush(sna);
+	if (sna->render_state.gen4.vertex_offset)
+		gen4_vertex_flush(sna);
 }
 
 static Bool
@@ -2765,7 +2761,9 @@ gen4_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt(sna) || too_large(dst->drawable.width, dst->drawable.height)) {
+	if (prefer_blt(sna) ||
+	    too_large(dst->drawable.width, dst->drawable.height) ||
+	    !gen4_check_dst_format(format)) {
 		uint8_t alu = -1;
 
 		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
@@ -2806,11 +2804,11 @@ gen4_render_fill_boxes(struct sna *sna,
 	if (op == PictOpClear)
 		pixel = 0;
 	else if (!sna_get_pixel_from_rgba(&pixel,
-				     color->red,
-				     color->green,
-				     color->blue,
-				     color->alpha,
-				     PICT_a8r8g8b8))
+					  color->red,
+					  color->green,
+					  color->blue,
+					  color->alpha,
+					  PICT_a8r8g8b8))
 		return FALSE;
 
 	DBG(("%s(%08x x %d)\n", __FUNCTION__, pixel, n));
@@ -2888,7 +2886,8 @@ gen4_render_fill_op_boxes(struct sna *sna,
 static void
 gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen4_vertex_flush(sna);
+	if (sna->render_state.gen4.vertex_offset)
+		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
@@ -2949,7 +2948,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
 	op->base.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))  {
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
 	}
@@ -3048,7 +3047,8 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen4_render_fill_rectangle(sna, &tmp, x1, y1, x2 - x1, y2 - y1);
 
-	gen4_vertex_flush(sna);
+	if (sna->render_state.gen4.vertex_offset)
+		gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
 	return TRUE;
@@ -3113,7 +3113,6 @@ static uint32_t gen4_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->thread4.stats_enable = 1;
 	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN4_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 64be24b..ac55b09 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1549,7 +1549,6 @@ static void gen5_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-
 fastcall static void
 gen5_render_composite_blt(struct sna *sna,
 			  const struct sna_composite_op *op,
@@ -2273,11 +2272,10 @@ gen5_render_composite(struct sna *sna,
 
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
-	if (too_large(tmp->dst.width, tmp->dst.height)) {
-		if (!sna_render_composite_redirect(sna, tmp,
-						   dst_x, dst_y, width, height))
-			return FALSE;
-	}
+	if (too_large(tmp->dst.width, tmp->dst.height) &&
+	    !sna_render_composite_redirect(sna, tmp,
+					   dst_x, dst_y, width, height))
+		return FALSE;
 
 	DBG(("%s: preparing source\n", __FUNCTION__));
 	switch (gen5_composite_picture(sna, src, &tmp->src,
commit f6392048e3e761b35644268ef161045524cfa369
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 17:00:38 2012 +0000

    sna/blt: Avoid clobbering the composite state if we fail to setup the BLT
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 03b80c5..5253a8c 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2282,7 +2282,7 @@ source_use_blt(struct sna *sna, PicturePtr picture)
 	if (sna->kgem.has_vmap)
 		return false;
 
-	return is_cpu(picture->pDrawable);
+	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
 static Bool
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index ff9e462..64be24b 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2012,7 +2012,7 @@ picture_is_cpu(PicturePtr picture)
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
 		return TRUE;
 
-	return is_cpu(picture->pDrawable);
+	return is_cpu(picture->pDrawable) || is_dirty(picture->pDrawable);
 }
 
 static Bool
@@ -2377,19 +2377,6 @@ gen5_render_composite(struct sna *sna,
 			goto cleanup_mask;
 	}
 
-	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
-		if (mask == NULL &&
-		    tmp->redirect.real_bo == NULL &&
-		    sna_blt_composite(sna, op,
-				      src, dst,
-				      src_x, src_y,
-				      dst_x, dst_y,
-				      width, height, tmp)) {
-			kgem_bo_destroy(&sna->kgem, tmp->src.bo);
-			return TRUE;
-		}
-	}
-
 	gen5_bind_surfaces(sna, tmp);
 	gen5_align_vertex(sna, tmp);
 	return TRUE;
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index a7ea95c..a672c46 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -1552,8 +1552,8 @@ sna_blt_composite(struct sna *sna,
 		  int16_t width, int16_t height,
 		  struct sna_composite_op *tmp)
 {
-	struct sna_blt_state *blt = &tmp->u.blt;
 	PictFormat src_format = src->format;
+	PixmapPtr src_pixmap;
 	struct sna_pixmap *priv;
 	int16_t tx, ty;
 	uint32_t alpha_fixup;
@@ -1688,30 +1688,31 @@ sna_blt_composite(struct sna *sna,
 		return FALSE;
 	}
 
-	blt->src_pixmap = get_drawable_pixmap(src->pDrawable);
-	get_drawable_deltas(src->pDrawable, blt->src_pixmap, &tx, &ty);
+	src_pixmap = get_drawable_pixmap(src->pDrawable);
+	get_drawable_deltas(src->pDrawable, src_pixmap, &tx, &ty);
 	x += tx + src->pDrawable->x;
 	y += ty + src->pDrawable->y;
 	if (x < 0 || y < 0 ||
-	    x + width > blt->src_pixmap->drawable.width ||
-	    y + height > blt->src_pixmap->drawable.height) {
+	    x + width  > src_pixmap->drawable.width ||
+	    y + height > src_pixmap->drawable.height) {
 		DBG(("%s: source extends outside (%d, %d), (%d, %d) of valid pixmap %dx%d\n",
 		     __FUNCTION__,
-		     x, y, x+width, y+width, blt->src_pixmap->drawable.width, blt->src_pixmap->drawable.height));
+		     x, y, x+width, y+width, src_pixmap->drawable.width, src_pixmap->drawable.height));
 		return FALSE;
 	}
 
+	tmp->u.blt.src_pixmap = src_pixmap;
 	tmp->u.blt.sx = x - dst_x;
 	tmp->u.blt.sy = y - dst_y;
 	DBG(("%s: blt dst offset (%d, %d), source offset (%d, %d), with alpha fixup? %x\n",
 	     __FUNCTION__,
 	     tmp->dst.x, tmp->dst.y, tmp->u.blt.sx, tmp->u.blt.sy, alpha_fixup));
 
-	if (has_gpu_area(blt->src_pixmap, x, y, width, height))
+	if (has_gpu_area(src_pixmap, x, y, width, height))
 		ret = prepare_blt_copy(sna, tmp, alpha_fixup);
-	else if (has_cpu_area(blt->src_pixmap, x, y, width, height))
+	else if (has_cpu_area(src_pixmap, x, y, width, height))
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
-	else if (sna_pixmap_move_to_gpu(blt->src_pixmap, MOVE_READ))
+	else if (sna_pixmap_move_to_gpu(src_pixmap, MOVE_READ))
 		ret = prepare_blt_copy(sna, tmp, alpha_fixup);
 	else
 		ret = prepare_blt_put(sna, tmp, alpha_fixup);
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 2805a01..6c8f66a 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -89,6 +89,13 @@ is_cpu(DrawablePtr drawable)
 }
 
 static inline Bool
+is_dirty(DrawablePtr drawable)
+{
+	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+	return priv == NULL || kgem_bo_is_dirty(priv->gpu_bo);
+}
+
+static inline Bool
 too_small(DrawablePtr drawable)
 {
 	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
commit 4e86da64be947fe4b8b5f285d26d200827847a8c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 14:36:35 2012 +0000

    sna/gen3+: Flush the vertices during vertex-finish
    
    But only when finishing the vbo, which is the tricky part of the recent
    CA glyph bugs.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 3c3de12..03b80c5 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1574,6 +1574,9 @@ static int gen3_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen3.vertex_offset)
+			gen3_vertex_flush(sna);
+
 		DBG(("%s: reloc = %d\n", __FUNCTION__,
 		     sna->render.vertex_reloc[0]));
 
@@ -1736,20 +1739,13 @@ inline static int gen3_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen3_get_rectangles__flush(sna, op);
-		if (rem == 0) {
-			if (sna->render_state.gen3.vertex_offset) {
-				gen3_vertex_flush(sna);
-				gen3_magic_ca_pass(sna, op);
-			}
-			return 0;
-		}
+		if (rem == 0)
+			goto flush;
 	}
 
 	if (sna->render_state.gen3.vertex_offset == 0 &&
-	    !gen3_rectangle_begin(sna, op)) {
-		DBG(("%s: flushing batch\n", __FUNCTION__));
-		return 0;
-	}
+	    !gen3_rectangle_begin(sna, op))
+		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
@@ -1758,6 +1754,14 @@ inline static int gen3_get_rectangles(struct sna *sna,
 	assert(want);
 	assert(sna->render.vertex_index * op->floats_per_vertex <= sna->render.vertex_size);
 	return want;
+
+flush:
+	DBG(("%s: flushing batch\n", __FUNCTION__));
+	if (sna->render_state.gen3.vertex_offset) {
+		gen3_vertex_flush(sna);
+		gen3_magic_ca_pass(sna, op);
+	}
+	return 0;
 }
 
 fastcall static void
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c9f10a8..f6a47a0 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -64,7 +64,8 @@
 
 #if FLUSH_EVERY_VERTEX
 #define FLUSH(OP) do { \
-	gen4_vertex_flush(sna, OP); \
+	gen4_vertex_flush(sna); \
+	gen4_magic_ca_pass(sna, OP); \
 	OUT_BATCH(MI_FLUSH | MI_INHIBIT_RENDER_CACHE_FLUSH); \
 } while (0)
 #else
@@ -348,8 +349,7 @@ static void gen4_magic_ca_pass(struct sna *sna,
 	state->last_primitive = sna->kgem.nbatch;
 }
 
-static void gen4_vertex_flush(struct sna *sna,
-			      const struct sna_composite_op *op)
+static void gen4_vertex_flush(struct sna *sna)
 {
 	if (sna->render_state.gen4.vertex_offset == 0)
 		return;
@@ -360,8 +360,6 @@ static void gen4_vertex_flush(struct sna *sna,
 	sna->kgem.batch[sna->render_state.gen4.vertex_offset] =
 		sna->render.vertex_index - sna->render.vertex_start;
 	sna->render_state.gen4.vertex_offset = 0;
-
-	gen4_magic_ca_pass(sna, op);
 }
 
 static int gen4_vertex_finish(struct sna *sna)
@@ -375,6 +373,9 @@ static int gen4_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen4.vertex_offset)
+			gen4_vertex_flush(sna);
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1167,20 +1168,23 @@ inline static int gen4_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, 3*op->floats_per_vertex));
 		rem = gen4_get_rectangles__flush(sna, op);
-		if (rem == 0) {
-			gen4_vertex_flush(sna, op);
-			return 0;
-		}
+		if (rem == 0)
+			goto flush;
 	}
 
 	if (!gen4_rectangle_begin(sna, op))
-		return 0;
+		goto flush;
 
 	if (want > 1 && want * op->floats_per_vertex*3 > rem)
 		want = rem / (3*op->floats_per_vertex);
 
 	sna->render.vertex_index += 3*want;
 	return want;
+
+flush:
+	gen4_vertex_flush(sna);
+	gen4_magic_ca_pass(sna, op);
+	return 0;
 }
 
 static uint32_t *gen4_composite_get_binding_table(struct sna *sna,
@@ -1799,7 +1803,7 @@ gen4_render_video(struct sna *sna,
 	}
 	priv->clear = false;
 
-	gen4_vertex_flush(sna, &tmp);
+	gen4_vertex_flush(sna);
 	return TRUE;
 }
 
@@ -1916,7 +1920,8 @@ gen4_render_composite_done(struct sna *sna,
 {
 	DBG(("%s()\n", __FUNCTION__));
 
-	gen4_vertex_flush(sna, op);
+	gen4_vertex_flush(sna);
+	gen4_magic_ca_pass(sna, op);
 
 	if (op->mask.bo)
 		kgem_bo_destroy(&sna->kgem, op->mask.bo);
@@ -2595,7 +2600,7 @@ gen4_render_copy_blt(struct sna *sna,
 static void
 gen4_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
-	gen4_vertex_flush(sna, &op->base);
+	gen4_vertex_flush(sna);
 }
 
 static Bool
@@ -2883,7 +2888,7 @@ gen4_render_fill_op_boxes(struct sna *sna,
 static void
 gen4_render_fill_op_done(struct sna *sna, const struct sna_fill_op *op)
 {
-	gen4_vertex_flush(sna, &op->base);
+	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 }
 
@@ -3043,7 +3048,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	gen4_render_fill_rectangle(sna, &tmp, x1, y1, x2 - x1, y2 - y1);
 
-	gen4_vertex_flush(sna, &tmp);
+	gen4_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 
 	return TRUE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 251eb39..ff9e462 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -66,8 +66,8 @@
 #define URB_CS_ENTRY_SIZE     1
 #define URB_CS_ENTRIES	      0
 
-#define URB_VS_ENTRY_SIZE     1	// each 512-bit row
-#define URB_VS_ENTRIES	      8	// we needs at least 8 entries
+#define URB_VS_ENTRY_SIZE     1
+#define URB_VS_ENTRIES	      128 /* minimum of 8 */
 
 #define URB_GS_ENTRY_SIZE     0
 #define URB_GS_ENTRIES	      0
@@ -76,7 +76,7 @@
 #define URB_CLIP_ENTRIES      0
 
 #define URB_SF_ENTRY_SIZE     2
-#define URB_SF_ENTRIES	      1
+#define URB_SF_ENTRIES	      32
 
 /*
  * this program computes dA/dx and dA/dy for the texture coordinates along
@@ -358,6 +358,9 @@ static int gen5_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen5.vertex_offset)
+			gen5_vertex_flush(sna);
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -410,6 +413,8 @@ static void gen5_vertex_close(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i, delta = 0;
 
+	assert(sna->render_state.gen5.vertex_offset == 0);
+
 	if (!sna->render.vertex_used) {
 		assert(sna->render.vbo == NULL);
 		assert(sna->render.vertices == sna->render.vertex_data);
@@ -421,7 +426,6 @@ static void gen5_vertex_close(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo == NULL) {
-
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -1082,6 +1086,8 @@ static void gen5_emit_vertex_buffer(struct sna *sna,
 {
 	int id = op->u.gen5.ve_id;
 
+	assert((unsigned)id <= 3);
+
 	OUT_BATCH(GEN5_3DSTATE_VERTEX_BUFFERS | 3);
 	OUT_BATCH((id << VB0_BUFFER_INDEX_SHIFT) | VB0_VERTEXDATA |
 		  (4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT));
@@ -1122,6 +1128,8 @@ static bool gen5_rectangle_begin(struct sna *sna,
 	int id = op->u.gen5.ve_id;
 	int ndwords;
 
+	assert((unsigned)id <= 3);
+
 	ndwords = 0;
 	if ((sna->render_state.gen5.vb_id & (1 << id)) == 0)
 		ndwords += 5;
@@ -1167,23 +1175,25 @@ inline static int gen5_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen5_get_rectangles__flush(sna, op);
-		if (rem == 0) {
-			if (sna->render_state.gen5.vertex_offset) {
-				gen5_vertex_flush(sna);
-				gen5_magic_ca_pass(sna, op);
-			}
-			return 0;
-		}
+		if (rem == 0)
+			goto flush;
 	}
 
 	if (!gen5_rectangle_begin(sna, op))
-		return 0;
+		goto flush;
 
 	if (want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
 
 	sna->render.vertex_index += 3*want;
 	return want;
+
+flush:
+	if (sna->render_state.gen5.vertex_offset) {
+		gen5_vertex_flush(sna);
+		gen5_magic_ca_pass(sna, op);
+	}
+	return 0;
 }
 
 static uint32_t *
@@ -1414,8 +1424,9 @@ gen5_emit_vertex_elements(struct sna *sna,
 	int selem = is_affine ? 2 : 3;
 	uint32_t w_component;
 	uint32_t src_format;
-	int id = op->u.gen5.ve_id;;
+	int id = op->u.gen5.ve_id;
 
+	assert((unsigned)id <= 3);
 	if (!DBG_NO_STATE_CACHE && render->ve_id == id)
 		return;
 
@@ -3554,7 +3565,6 @@ static uint32_t gen5_create_sf_state(struct sna_static_stream *stream,
 	sf_state->thread4.max_threads = SF_MAX_THREADS - 1;
 	sf_state->thread4.urb_entry_allocation_size = URB_SF_ENTRY_SIZE - 1;
 	sf_state->thread4.nr_urb_entries = URB_SF_ENTRIES;
-	sf_state->thread4.stats_enable = 1;
 	sf_state->sf5.viewport_transform = FALSE;	/* skip viewport */
 	sf_state->sf6.cull_mode = GEN5_CULLMODE_NONE;
 	sf_state->sf6.scissor = 0;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index e2c78e1..035da78 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -932,6 +932,9 @@ static int gen6_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen6.vertex_offset)
+			gen6_vertex_flush(sna);
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1635,18 +1638,13 @@ inline static int gen6_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen6_get_rectangles__flush(sna, op);
-		if (rem == 0) {
-			if (sna->render_state.gen6.vertex_offset) {
-				gen6_vertex_flush(sna);
-				gen6_magic_ca_pass(sna, op);
-			}
-			return 0;
-		}
+		if (rem == 0)
+			goto flush;
 	}
 
 	if (sna->render_state.gen6.vertex_offset == 0 &&
 	    !gen6_rectangle_begin(sna, op))
-		return 0;
+		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
@@ -1654,6 +1652,13 @@ inline static int gen6_get_rectangles(struct sna *sna,
 	assert(want > 0);
 	sna->render.vertex_index += 3*want;
 	return want;
+
+flush:
+	if (sna->render_state.gen6.vertex_offset) {
+		gen6_vertex_flush(sna);
+		gen6_magic_ca_pass(sna, op);
+	}
+	return 0;
 }
 
 inline static uint32_t *gen6_composite_get_binding_table(struct sna *sna,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index faeedf0..c872c63 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1033,6 +1033,9 @@ static int gen7_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen7.vertex_offset)
+			gen7_vertex_flush(sna);
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1669,24 +1672,26 @@ inline static int gen7_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen7_get_rectangles__flush(sna, op);
-		if (rem == 0) {
-			if (sna->render_state.gen7.vertex_offset) {
-				gen7_vertex_flush(sna);
-				gen7_magic_ca_pass(sna, op);
-			}
-			return 0;
-		}
+		if (rem == 0)
+			goto flush;
 	}
 
 	if (sna->render_state.gen7.vertex_offset == 0 &&
 	    !gen7_rectangle_begin(sna, op))
-		return 0;
+		goto flush;
 
 	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
 
 	sna->render.vertex_index += 3*want;
 	return want;
+
+flush:
+	if (sna->render_state.gen7.vertex_offset) {
+		gen7_vertex_flush(sna);
+		gen7_magic_ca_pass(sna, op);
+	}
+	return 0;
 }
 
 inline static uint32_t *gen7_composite_get_binding_table(struct sna *sna,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index f8df2a4..2d8b3b9 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2216,18 +2216,23 @@ composite_unaligned_box(struct sna *sna,
 			float opacity,
 			pixman_region16_t *clip)
 {
-	pixman_region16_t region;
+	if (clip) {
+		pixman_region16_t region;
 
-	pixman_region_init_rects(&region, box, 1);
-	RegionIntersect(&region, &region, clip);
-	if (REGION_NUM_RECTS(&region)) {
-		tmp->boxes(sna, tmp,
-			  REGION_RECTS(&region),
-			  REGION_NUM_RECTS(&region),
-			  opacity);
-		apply_damage(&tmp->base, &region);
+		pixman_region_init_rects(&region, box, 1);
+		RegionIntersect(&region, &region, clip);
+		if (REGION_NUM_RECTS(&region)) {
+			tmp->boxes(sna, tmp,
+				   REGION_RECTS(&region),
+				   REGION_NUM_RECTS(&region),
+				   opacity);
+			apply_damage(&tmp->base, &region);
+		}
+		pixman_region_fini(&region);
+	} else {
+		tmp->box(sna, tmp, box, opacity);
+		apply_damage_box(&tmp->base, box);
 	}
-	pixman_region_fini(&region);
 }
 
 static void
@@ -2244,17 +2249,19 @@ composite_unaligned_trap_row(struct sna *sna,
 	if (covered == 0)
 		return;
 
-	if (y2 > clip->extents.y2)
-		y2 = clip->extents.y2;
-	if (y1 < clip->extents.y1)
-		y1 = clip->extents.y1;
-	if (y1 >= y2)
-		return;
-
 	x1 = dx + pixman_fixed_to_int(trap->left.p1.x);
 	x2 = dx + pixman_fixed_to_int(trap->right.p1.x);
-	if (x2 < clip->extents.x1 || x1 > clip->extents.x2)
-		return;
+	if (clip) {
+		if (y2 > clip->extents.y2)
+			y2 = clip->extents.y2;
+		if (y1 < clip->extents.y1)
+			y1 = clip->extents.y1;
+		if (y1 >= y2)
+			return;
+
+		if (x2 < clip->extents.x1 || x1 > clip->extents.x2)
+			return;
+	}
 
 	box.y1 = y1;
 	box.y2 = y2;
@@ -2528,7 +2535,7 @@ composite_unaligned_boxes(struct sna *sna,
 {
 	BoxRec extents;
 	struct sna_composite_spans_op tmp;
-	pixman_region16_t clip;
+	pixman_region16_t clip, *c;
 	int dst_x, dst_y;
 	int dx, dy, n;
 
@@ -2584,6 +2591,11 @@ composite_unaligned_boxes(struct sna *sna,
 		return true;
 	}
 
+	c = NULL;
+	if (extents.x2 - extents.x1 > clip.extents.x2 - clip.extents.x1 ||
+	    extents.y2 - extents.y1 > clip.extents.y2 - clip.extents.y1)
+		c = &clip;
+
 	extents = *RegionExtents(&clip);
 	dx = dst->pDrawable->x;
 	dy = dst->pDrawable->y;
@@ -2611,7 +2623,7 @@ composite_unaligned_boxes(struct sna *sna,
 	}
 
 	for (n = 0; n < ntrap; n++)
-		composite_unaligned_trap(sna, &tmp, &traps[n], dx, dy, &clip);
+		composite_unaligned_trap(sna, &tmp, &traps[n], dx, dy, c);
 	tmp.done(sna, &tmp);
 
 	REGION_UNINIT(NULL, &clip);
commit 71512b2438e06b2bd256aff3657797152fbef38e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 13:44:24 2012 +0000

    sna: Clear the flush flag on release of scanout/dri
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=46445
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f79deff..5ded904 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3665,6 +3665,7 @@ void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
 {
 	bo->needs_flush = true;
 	bo->reusable = true;
+	bo->flush = false;
 
 	if (!bo->scanout)
 		return;
commit 025d3d46dc28e967b4f42e5748d13c7fb3ad283b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 13:17:01 2012 +0000

    sna/trapezoids: Only the inplace PictOpIn is unbounded
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index fafb16f..f8df2a4 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3494,7 +3494,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	inplace.opacity = color >> 24;
 
 	tor_render(NULL, &tor, (void*)&inplace,
-		   dst->pCompositeClip, span, true);
+		   dst->pCompositeClip, span, op == PictOpIn);
 
 	tor_fini(&tor);
 
commit b02f866d67d60538368619849f2acda4c1706476
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 22 09:15:25 2012 +0000

    sna: Ensure we restore the shadow pixels before uploading CPU data
    
    Reported-by: Joe Nahmias <joe at nahmias.net>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=46425
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9447cf7..8fa59a6 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1618,8 +1618,12 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 	if (sna_damage_is_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
-			      pixmap->drawable.height))
+			      pixmap->drawable.height)) {
+		sna_damage_destroy(&priv->cpu_damage);
+		priv->undamaged = false;
+		list_del(&priv->list);
 		goto done;
+	}
 
 	if (priv->gpu_bo == NULL) {
 		unsigned flags;
@@ -1659,10 +1663,13 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 	if (priv->mapped) {
+		pixmap->devPrivate.ptr = NULL;
+		priv->mapped = false;
+	}
+	if (pixmap->devPrivate.ptr == NULL) {
 		assert(priv->stride);
 		pixmap->devPrivate.ptr = priv->ptr;
 		pixmap->devKind = priv->stride;
-		priv->mapped = false;
 	}
 	assert(pixmap->devPrivate.ptr != NULL);
 
commit be5df7b5ab487a1282c96dda92f3799e35cd19af
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 23:27:33 2012 +0000

    sna/gen5: Remove CA glyph workaround
    
    The root cause has been found and destroyed, so the w/a is now
    redundant.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 91273c9..2733a1a 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1248,10 +1248,6 @@ sna_glyphs(CARD8 op,
 	_mask = mask;
 	/* XXX discard the mask for non-overlapping glyphs? */
 
-	/* XXX more shader breakage?: CA to dst is fubar on ilk */
-	if (sna->kgem.gen == 50 && !_mask)
-		_mask = list[0].format;
-
 	if (!_mask ||
 	    (((nlist == 1 && list->len == 1) || op == PictOpAdd) &&
 	     dst->format == (_mask->depth << 24 | _mask->format))) {
commit 6038cede83e7f360428b4625d288411794f9d052
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 21:26:29 2012 +0000

    sna/gen3+: Re-emit composite state after flushing CA vertices
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=42891
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 189653d..3c3de12 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1566,8 +1566,7 @@ static void gen3_vertex_flush(struct sna *sna)
 	sna->render_state.gen3.vertex_offset = 0;
 }
 
-static int gen3_vertex_finish(struct sna *sna,
-			      const struct sna_composite_op *op)
+static int gen3_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 
@@ -1575,11 +1574,6 @@ static int gen3_vertex_finish(struct sna *sna,
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (sna->render_state.gen3.vertex_offset) {
-			gen3_vertex_flush(sna);
-			gen3_magic_ca_pass(sna, op);
-		}
-
 		DBG(("%s: reloc = %d\n", __FUNCTION__,
 		     sna->render.vertex_reloc[0]));
 
@@ -1722,7 +1716,10 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	return gen3_vertex_finish(sna, op);
+	if (op->need_magic_ca_pass && sna->render.vbo)
+		return 0;
+
+	return gen3_vertex_finish(sna);
 }
 
 inline static int gen3_get_rectangles(struct sna *sna,
@@ -2318,7 +2315,7 @@ gen3_align_vertex(struct sna *sna,
 {
 	if (op->floats_per_vertex != sna->render_state.gen3.last_floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen3_vertex_finish(sna, op);
+			gen3_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen3.last_floats_per_vertex,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 24de833..c9f10a8 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -364,8 +364,7 @@ static void gen4_vertex_flush(struct sna *sna,
 	gen4_magic_ca_pass(sna, op);
 }
 
-static int gen4_vertex_finish(struct sna *sna,
-			      const struct sna_composite_op *op)
+static int gen4_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -376,8 +375,6 @@ static int gen4_vertex_finish(struct sna *sna,
 
 	bo = sna->render.vbo;
 	if (bo) {
-		gen4_vertex_flush(sna, op);
-
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1154,7 +1151,10 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	return gen4_vertex_finish(sna, op);
+	if (op->need_magic_ca_pass && sna->render.vbo)
+		return 0;
+
+	return gen4_vertex_finish(sna);
 }
 
 inline static int gen4_get_rectangles(struct sna *sna,
@@ -1306,7 +1306,7 @@ gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 6*op->floats_per_vertex)
-			gen4_vertex_finish(sna, op);
+			gen4_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen4.floats_per_vertex,
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index d4b6313..251eb39 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -347,8 +347,7 @@ static void gen5_vertex_flush(struct sna *sna)
 	sna->render_state.gen5.vertex_offset = 0;
 }
 
-static int gen5_vertex_finish(struct sna *sna,
-			      const struct sna_composite_op *op)
+static int gen5_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -359,11 +358,6 @@ static int gen5_vertex_finish(struct sna *sna,
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (sna->render_state.gen5.vertex_offset) {
-			gen5_vertex_flush(sna);
-			gen5_magic_ca_pass(sna, op);
-		}
-
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1157,7 +1151,10 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen5_vertex_finish(sna, op);
+	if (op->need_magic_ca_pass && sna->render.vbo)
+		return 0;
+
+	return gen5_vertex_finish(sna);
 }
 
 inline static int gen5_get_rectangles(struct sna *sna,
@@ -1312,7 +1309,7 @@ gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen5_vertex_finish(sna, op);
+			gen5_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen5.floats_per_vertex,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 56a0f4a..e2c78e1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -919,8 +919,7 @@ static void gen6_vertex_flush(struct sna *sna)
 	sna->render_state.gen6.vertex_offset = 0;
 }
 
-static int gen6_vertex_finish(struct sna *sna,
-			      const struct sna_composite_op *op)
+static int gen6_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -933,11 +932,6 @@ static int gen6_vertex_finish(struct sna *sna,
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (sna->render_state.gen6.vertex_offset) {
-			gen6_vertex_flush(sna);
-			gen6_magic_ca_pass(sna, op);
-		}
-
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1625,7 +1619,10 @@ static int gen6_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen6_vertex_finish(sna, op);
+	if (op->need_magic_ca_pass && sna->render.vbo)
+		return 0;
+
+	return gen6_vertex_finish(sna);
 }
 
 inline static int gen6_get_rectangles(struct sna *sna,
@@ -1751,7 +1748,7 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 	if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
 			/* XXX propagate failure */
-			gen6_vertex_finish(sna, op);
+			gen6_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen6.floats_per_vertex,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index fce19fe..faeedf0 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1022,8 +1022,7 @@ static void gen7_vertex_flush(struct sna *sna)
 	sna->render_state.gen7.vertex_offset = 0;
 }
 
-static int gen7_vertex_finish(struct sna *sna,
-			      const struct sna_composite_op *op)
+static int gen7_vertex_finish(struct sna *sna)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -1034,11 +1033,6 @@ static int gen7_vertex_finish(struct sna *sna,
 
 	bo = sna->render.vbo;
 	if (bo) {
-		if (sna->render_state.gen7.vertex_offset) {
-			gen7_vertex_flush(sna);
-			gen7_magic_ca_pass(sna, op);
-		}
-
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1659,7 +1653,10 @@ static int gen7_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen7_vertex_finish(sna, op);
+	if (op->need_magic_ca_pass && sna->render.vbo)
+		return 0;
+
+	return gen7_vertex_finish(sna);
 }
 
 inline static int gen7_get_rectangles(struct sna *sna,
@@ -1780,7 +1777,7 @@ gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen7_vertex_finish(sna, op);
+			gen7_vertex_finish(sna);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen7.floats_per_vertex,
commit fe6602cbbc4eed1b88ac731a30b46cc970ea444f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 21:26:29 2012 +0000

    sna/gen3+: Only flush the vertices after checking for end-of-batch
    
    Or upon actually closing the vertex buffer.
    
    However, the underlying issue remains. That is we are failing to re-emit
    the first-pass for CA text after flushing the vertex buffer (and so
    emitting the second-pass for the flushed vertices).
    
    Reported-by: lemens Eisserer <linuxhippy at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=42891
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 97e5839..189653d 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1566,7 +1566,8 @@ static void gen3_vertex_flush(struct sna *sna)
 	sna->render_state.gen3.vertex_offset = 0;
 }
 
-static int gen3_vertex_finish(struct sna *sna)
+static int gen3_vertex_finish(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo;
 
@@ -1574,6 +1575,11 @@ static int gen3_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen3.vertex_offset) {
+			gen3_vertex_flush(sna);
+			gen3_magic_ca_pass(sna, op);
+		}
+
 		DBG(("%s: reloc = %d\n", __FUNCTION__,
 		     sna->render.vertex_reloc[0]));
 
@@ -1709,11 +1715,6 @@ static bool gen3_rectangle_begin(struct sna *sna,
 static int gen3_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	if (sna->render_state.gen3.vertex_offset) {
-		gen3_vertex_flush(sna);
-		gen3_magic_ca_pass(sna, op);
-	}
-
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 105: 5))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 2)
@@ -1721,7 +1722,7 @@ static int gen3_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	return gen3_vertex_finish(sna);
+	return gen3_vertex_finish(sna, op);
 }
 
 inline static int gen3_get_rectangles(struct sna *sna,
@@ -1738,8 +1739,13 @@ inline static int gen3_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen3_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (rem == 0) {
+			if (sna->render_state.gen3.vertex_offset) {
+				gen3_vertex_flush(sna);
+				gen3_magic_ca_pass(sna, op);
+			}
 			return 0;
+		}
 	}
 
 	if (sna->render_state.gen3.vertex_offset == 0 &&
@@ -2312,7 +2318,7 @@ gen3_align_vertex(struct sna *sna,
 {
 	if (op->floats_per_vertex != sna->render_state.gen3.last_floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen3_vertex_finish(sna);
+			gen3_vertex_finish(sna, op);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen3.last_floats_per_vertex,
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6e7d4be..24de833 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -364,7 +364,8 @@ static void gen4_vertex_flush(struct sna *sna,
 	gen4_magic_ca_pass(sna, op);
 }
 
-static int gen4_vertex_finish(struct sna *sna)
+static int gen4_vertex_finish(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -375,6 +376,8 @@ static int gen4_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		gen4_vertex_flush(sna, op);
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1144,8 +1147,6 @@ static bool gen4_rectangle_begin(struct sna *sna,
 static int gen4_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	gen4_vertex_flush(sna, op);
-
 	if (!kgem_check_batch(&sna->kgem, 25))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
@@ -1153,7 +1154,7 @@ static int gen4_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 1)
 		return 0;
 
-	return gen4_vertex_finish(sna);
+	return gen4_vertex_finish(sna, op);
 }
 
 inline static int gen4_get_rectangles(struct sna *sna,
@@ -1166,8 +1167,10 @@ inline static int gen4_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, 3*op->floats_per_vertex));
 		rem = gen4_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (rem == 0) {
+			gen4_vertex_flush(sna, op);
 			return 0;
+		}
 	}
 
 	if (!gen4_rectangle_begin(sna, op))
@@ -1303,7 +1306,7 @@ gen4_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen4.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 6*op->floats_per_vertex)
-			gen4_vertex_finish(sna);
+			gen4_vertex_finish(sna, op);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen4.floats_per_vertex,
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 7ac993c..d4b6313 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -347,7 +347,8 @@ static void gen5_vertex_flush(struct sna *sna)
 	sna->render_state.gen5.vertex_offset = 0;
 }
 
-static int gen5_vertex_finish(struct sna *sna)
+static int gen5_vertex_finish(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -358,6 +359,11 @@ static int gen5_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen5.vertex_offset) {
+			gen5_vertex_flush(sna);
+			gen5_magic_ca_pass(sna, op);
+		}
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1144,11 +1150,6 @@ static bool gen5_rectangle_begin(struct sna *sna,
 static int gen5_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	if (sna->render_state.gen5.vertex_offset) {
-		gen5_vertex_flush(sna);
-		gen5_magic_ca_pass(sna, op);
-	}
-
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 20 : 6))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
@@ -1156,7 +1157,7 @@ static int gen5_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen5_vertex_finish(sna);
+	return gen5_vertex_finish(sna, op);
 }
 
 inline static int gen5_get_rectangles(struct sna *sna,
@@ -1169,8 +1170,13 @@ inline static int gen5_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen5_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (rem == 0) {
+			if (sna->render_state.gen5.vertex_offset) {
+				gen5_vertex_flush(sna);
+				gen5_magic_ca_pass(sna, op);
+			}
 			return 0;
+		}
 	}
 
 	if (!gen5_rectangle_begin(sna, op))
@@ -1306,7 +1312,7 @@ gen5_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen5.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen5_vertex_finish(sna);
+			gen5_vertex_finish(sna, op);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen5.floats_per_vertex,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1e99709..56a0f4a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -919,7 +919,8 @@ static void gen6_vertex_flush(struct sna *sna)
 	sna->render_state.gen6.vertex_offset = 0;
 }
 
-static int gen6_vertex_finish(struct sna *sna)
+static int gen6_vertex_finish(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -932,6 +933,11 @@ static int gen6_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen6.vertex_offset) {
+			gen6_vertex_flush(sna);
+			gen6_magic_ca_pass(sna, op);
+		}
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1612,11 +1618,6 @@ static bool gen6_rectangle_begin(struct sna *sna,
 static int gen6_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	if (sna->render_state.gen6.vertex_offset) {
-		gen6_vertex_flush(sna);
-		gen6_magic_ca_pass(sna, op);
-	}
-
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 5))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
@@ -1624,7 +1625,7 @@ static int gen6_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen6_vertex_finish(sna);
+	return gen6_vertex_finish(sna, op);
 }
 
 inline static int gen6_get_rectangles(struct sna *sna,
@@ -1637,8 +1638,13 @@ inline static int gen6_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen6_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (rem == 0) {
+			if (sna->render_state.gen6.vertex_offset) {
+				gen6_vertex_flush(sna);
+				gen6_magic_ca_pass(sna, op);
+			}
 			return 0;
+		}
 	}
 
 	if (sna->render_state.gen6.vertex_offset == 0 &&
@@ -1745,7 +1751,7 @@ gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 	if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
 			/* XXX propagate failure */
-			gen6_vertex_finish(sna);
+			gen6_vertex_finish(sna, op);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen6.floats_per_vertex,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5294547..fce19fe 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -1022,7 +1022,8 @@ static void gen7_vertex_flush(struct sna *sna)
 	sna->render_state.gen7.vertex_offset = 0;
 }
 
-static int gen7_vertex_finish(struct sna *sna)
+static int gen7_vertex_finish(struct sna *sna,
+			      const struct sna_composite_op *op)
 {
 	struct kgem_bo *bo;
 	unsigned int i;
@@ -1033,6 +1034,11 @@ static int gen7_vertex_finish(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo) {
+		if (sna->render_state.gen7.vertex_offset) {
+			gen7_vertex_flush(sna);
+			gen7_magic_ca_pass(sna, op);
+		}
+
 		for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) {
 			if (sna->render.vertex_reloc[i]) {
 				DBG(("%s: reloc[%d] = %d\n", __FUNCTION__,
@@ -1646,11 +1652,6 @@ static bool gen7_rectangle_begin(struct sna *sna,
 static int gen7_get_rectangles__flush(struct sna *sna,
 				      const struct sna_composite_op *op)
 {
-	if (sna->render_state.gen7.vertex_offset) {
-		gen7_vertex_flush(sna);
-		gen7_magic_ca_pass(sna, op);
-	}
-
 	if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 6))
 		return 0;
 	if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1)
@@ -1658,7 +1659,7 @@ static int gen7_get_rectangles__flush(struct sna *sna,
 	if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2)
 		return 0;
 
-	return gen7_vertex_finish(sna);
+	return gen7_vertex_finish(sna, op);
 }
 
 inline static int gen7_get_rectangles(struct sna *sna,
@@ -1671,8 +1672,13 @@ inline static int gen7_get_rectangles(struct sna *sna,
 		DBG(("flushing vbo for %s: %d < %d\n",
 		     __FUNCTION__, rem, op->floats_per_rect));
 		rem = gen7_get_rectangles__flush(sna, op);
-		if (rem == 0)
+		if (rem == 0) {
+			if (sna->render_state.gen7.vertex_offset) {
+				gen7_vertex_flush(sna);
+				gen7_magic_ca_pass(sna, op);
+			}
 			return 0;
+		}
 	}
 
 	if (sna->render_state.gen7.vertex_offset == 0 &&
@@ -1774,7 +1780,7 @@ gen7_align_vertex(struct sna *sna, const struct sna_composite_op *op)
 {
 	if (op->floats_per_vertex != sna->render_state.gen7.floats_per_vertex) {
 		if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect)
-			gen7_vertex_finish(sna);
+			gen7_vertex_finish(sna, op);
 
 		DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n",
 		     sna->render_state.gen7.floats_per_vertex,
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index ae5bec6..676125d 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -1927,6 +1927,7 @@ sna_mode_fini(struct sna *sna)
 #endif
 
 	sna_mode_remove_fb(sna);
+	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(sna->front));
 
 	/* mode->shadow_fb_id should have been destroyed already */
 }
commit 168c87a340119e65b1d7ccbbf59da820044ca936
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 13:16:43 2012 +0000

    sna: Clear the scanout flag after releasing the scanout pixmap
    
    In the future, this will be a good place to restore the cache level of
    the bo as well.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e7c4987..f79deff 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1067,8 +1067,9 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	assert(list_is_empty(&bo->list));
 	assert(bo->vmap == false && bo->sync == false);
 	assert(bo->io == false);
+	assert(bo->scanout == false);
+	assert(bo->flush == false);
 
-	bo->scanout = bo->flush = false;
 	if (bo->rq) {
 		struct list *cache;
 
@@ -3660,6 +3661,17 @@ void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset)
 	}
 }
 
+void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo)
+{
+	bo->needs_flush = true;
+	bo->reusable = true;
+
+	if (!bo->scanout)
+		return;
+
+	bo->scanout = false;
+}
+
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
 		struct kgem_bo *src,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f3a7b94..30303ce 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -499,6 +499,8 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 				      void **ret);
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
 
+void kgem_bo_clear_scanout(struct kgem *kgem, struct kgem_bo *bo);
+
 void kgem_throttle(struct kgem *kgem);
 #define MAX_INACTIVE_TIME 10
 bool kgem_expire_cache(struct kgem *kgem);
diff --git a/src/sna/sna_display.c b/src/sna/sna_display.c
index b2779aa..ae5bec6 100644
--- a/src/sna/sna_display.c
+++ b/src/sna/sna_display.c
@@ -648,7 +648,6 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 		     scrn->depth, scrn->bitsPerPixel));
 
 		assert(bo->tiling != I915_TILING_Y);
-		bo->scanout = true;
 		ret = drmModeAddFB(sna->kgem.fd,
 				   scrn->virtualX, scrn->virtualY,
 				   scrn->depth, scrn->bitsPerPixel,
@@ -664,6 +663,7 @@ sna_crtc_set_mode_major(xf86CrtcPtr crtc, DisplayModePtr mode,
 
 		DBG(("%s: handle %d attached to fb %d\n",
 		     __FUNCTION__, bo->handle, sna_mode->fb_id));
+		bo->scanout = true;
 		sna_mode->fb_pixmap = sna->front->drawable.serialNumber;
 	}
 
@@ -765,7 +765,6 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 	}
 
 	assert(bo->tiling != I915_TILING_Y);
-	bo->scanout = true;
 	if (drmModeAddFB(sna->kgem.fd,
 			 width, height, scrn->depth, scrn->bitsPerPixel,
 			 bo->pitch, bo->handle,
@@ -780,6 +779,7 @@ sna_crtc_shadow_allocate(xf86CrtcPtr crtc, int width, int height)
 
 	DBG(("%s: attached handle %d to fb %d\n",
 	     __FUNCTION__, bo->handle, sna_crtc->shadow_fb_id));
+	bo->scanout = true;
 	return sna_crtc->shadow = shadow;
 }
 
@@ -806,6 +806,7 @@ sna_crtc_shadow_destroy(xf86CrtcPtr crtc, PixmapPtr pixmap, void *data)
 	drmModeRmFB(sna->kgem.fd, sna_crtc->shadow_fb_id);
 	sna_crtc->shadow_fb_id = 0;
 
+	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(pixmap));
 	pixmap->drawable.pScreen->DestroyPixmap(pixmap);
 	sna_crtc->shadow = NULL;
 }
@@ -1674,7 +1675,6 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 		goto fail;
 
 	assert(bo->tiling != I915_TILING_Y);
-	bo->scanout = true;
 	if (drmModeAddFB(sna->kgem.fd, width, height,
 			 scrn->depth, scrn->bitsPerPixel,
 			 bo->pitch, bo->handle,
@@ -1690,6 +1690,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	     __FUNCTION__, bo->handle,
 	     sna->front->drawable.serialNumber, mode->fb_id));
 
+	bo->scanout = true;
 	for (i = 0; i < xf86_config->num_crtc; i++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[i];
 
@@ -1711,7 +1712,7 @@ sna_crtc_resize(ScrnInfoPtr scrn, int width, int height)
 
 	if (old_fb_id)
 		drmModeRmFB(sna->kgem.fd, old_fb_id);
-	sna_pixmap_get_bo(old_front)->needs_flush = true;
+	kgem_bo_clear_scanout(&sna->kgem, sna_pixmap_get_bo(old_front));
 	scrn->pScreen->DestroyPixmap(old_front);
 
 	return TRUE;
@@ -1842,13 +1843,7 @@ sna_page_flip(struct sna *sna,
 	count = do_page_flip(sna, data, ref_crtc_hw_id);
 	DBG(("%s: page flipped %d crtcs\n", __FUNCTION__, count));
 	if (count) {
-		/* Although the kernel performs an implicit flush upon
-		 * page-flipping, marking the bo as requiring a flush
-		 * here ensures that the buffer goes into the active cache
-		 * upon release.
-		 */
-		bo->needs_flush = true;
-		bo->reusable = true;
+		bo->scanout = true;
 	} else {
 		drmModeRmFB(sna->kgem.fd, mode->fb_id);
 		mode->fb_id = *old_fb;
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 1def833..fe3d1cf 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -717,12 +717,18 @@ sna_dri_add_frame_event(struct sna_dri_frame_event *info)
 static void
 sna_dri_frame_event_release_bo(struct kgem *kgem, struct kgem_bo *bo)
 {
-	bo->needs_flush = true; /* has been used externally, reset domains */
-	bo->reusable = true; /* No longer in use by an external client */
+	kgem_bo_clear_scanout(kgem, bo);
 	kgem_bo_destroy(kgem, bo);
 }
 
 static void
+sna_dri_frame_event_finish(struct sna_dri_frame_event *info)
+{
+	sna_mode_delete_fb(info->sna, info->old_fb);
+	kgem_bo_clear_scanout(&info->sna->kgem, info->old_front.bo);
+}
+
+static void
 sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 {
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
@@ -1028,12 +1034,12 @@ static void sna_dri_flip_event(struct sna *sna,
 					 flip->event_data);
 		}
 
-		sna_mode_delete_fb(flip->sna, flip->old_fb);
+		sna_dri_frame_event_finish(flip);
 		sna_dri_frame_event_info_free(flip);
 		break;
 
 	case DRI2_FLIP_THROTTLE:
-		sna_mode_delete_fb(sna, flip->old_fb);
+		sna_dri_frame_event_finish(flip);
 
 		assert(sna->dri.flip_pending[flip->pipe] == flip);
 		sna->dri.flip_pending[flip->pipe] = NULL;
@@ -1071,7 +1077,7 @@ static void sna_dri_flip_event(struct sna *sna,
 		     flip->front->name != flip->old_front.name));
 		assert(sna->dri.flip_pending[flip->pipe] == flip);
 
-		sna_mode_delete_fb(flip->sna, flip->old_fb);
+		sna_dri_frame_event_finish(flip);
 
 		if (flip->front->name != flip->next_front.name) {
 			DBG(("%s: async flip continuing\n", __FUNCTION__));
commit d7415742a5f78958489216f450411603b1eff9a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 16:33:26 2012 +0000

    sna/dri: Queue a flush on the back DRI2 when enqueing a flip
    
    As we may wait upon the bo having finished rendering before we can
    execute the flip, flushing the render cache as early as possible is
    beneficial
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 78d9a6e..1def833 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1636,6 +1636,7 @@ blit:
 		}
 		info->front->name = info->back->name;
 		get_private(info->front)->bo = get_private(info->back)->bo;
+		__kgem_flush(&sna->kgem, get_private(info->back)->bo);
 	}
 
 	if (bo == NULL) {
commit 2715a455f7bfbecd7a6977184dc6180a09d06e1f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 14:49:30 2012 +0000

    uxa: Prevent laggy applications by throttling after rendering
    
    Before blocking and waiting for further input, we need to make sure that
    we have not developed too large a queue of outstanding rendering. As we
    rendering to the front-buffer with no natural throttling and allow X
    clients to render as fast as they wish, it is entirely possible for a
    large queue of outstanding rendering to develop. For such an example,
    watch firefox rendering the fishietank demo and notice the delay that
    can build up before the tooltips appear.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index 0cb8df3..ed4f375 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -32,6 +32,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #endif
 
 #include <xf86.h>
+#include <xf86drm.h>
 #include <xaarop.h>
 #include <string.h>
 #include <errno.h>
@@ -1001,6 +1002,11 @@ static void intel_flush_rendering(intel_screen_private *intel)
 	intel->needs_flush = 0;
 }
 
+static void intel_throttle(intel_screen_private *intel)
+{
+	drmCommandNone(intel->drmSubFD, DRM_I915_GEM_THROTTLE);
+}
+
 void intel_uxa_block_handler(intel_screen_private *intel)
 {
 	if (intel->shadow_damage &&
@@ -1015,6 +1021,7 @@ void intel_uxa_block_handler(intel_screen_private *intel)
 	 */
 	intel_glamor_flush(intel);
 	intel_flush_rendering(intel);
+	intel_throttle(intel);
 }
 
 static PixmapPtr
commit 26721893cb41cef66db7ef626881d1eba8a5bdea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 13:31:16 2012 +0000

    sna: Mark the pixmap as active for the force-to-gpu short-circuit
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3b417cc..9447cf7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2002,6 +2002,15 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	return pixmap;
 }
 
+static inline struct sna_pixmap *
+sna_pixmap_mark_active(struct sna *sna, struct sna_pixmap *priv)
+{
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
+		list_move(&priv->inactive, &sna->active_pixmaps);
+	priv->clear = false;
+	return priv;
+}
+
 struct sna_pixmap *
 sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 {
@@ -2015,7 +2024,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		DBG(("%s: GPU all-damaged\n", __FUNCTION__));
-		return priv;
+		return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
 	}
 
 	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
@@ -2197,10 +2206,7 @@ done:
 			sna_pixmap_free_cpu(sna, priv);
 	}
 active:
-	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
-		list_move(&priv->inactive, &sna->active_pixmaps);
-	priv->clear = false;
-	return priv;
+	return sna_pixmap_mark_active(to_sna_from_pixmap(pixmap), priv);
 }
 
 static bool must_check sna_validate_pixmap(DrawablePtr draw, PixmapPtr pixmap)
commit 1541f1afca028453379083e564bc82debf0ba39a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 13:17:35 2012 +0000

    sna: Skip the CPU synchronization when marking a pixmap as inactive
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 575c213..00fc80a 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -442,6 +442,7 @@ struct kgem_bo *sna_pixmap_change_tiling(PixmapPtr pixmap, uint32_t tiling);
 #define MOVE_WRITE 0x1
 #define MOVE_READ 0x2
 #define MOVE_INPLACE_HINT 0x4
+#define MOVE_ASYNC_HINT 0x8
 bool must_check _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags);
 static inline bool must_check sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned flags)
 {
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 72c3907..3b417cc 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1058,7 +1058,7 @@ skip_inplace_map:
 	}
 
 done:
-	if (priv->cpu_bo) {
+	if ((flags & MOVE_ASYNC_HINT) == 0 && priv->cpu_bo) {
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
@@ -11763,7 +11763,7 @@ static void sna_accel_inactive(struct sna *sna)
 			DBG(("%s: discarding inactive GPU bo handle=%d\n",
 			     __FUNCTION__, priv->gpu_bo->handle));
 			if (!sna_pixmap_move_to_cpu(priv->pixmap,
-						    MOVE_READ | MOVE_WRITE))
+						    MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT))
 				list_add(&priv->inactive, &preserve);
 		}
 	}
commit 74fd55a96bd6c63e9c49a4b7fee7ef91f6b00a2c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 13:11:32 2012 +0000

    sna/dri: Ensure that we reattach to the DRI2 front buffer after modeswitch
    
    If we change the Screen pixmap due to a change of mode, we lose the
    flag that we've attached a DRI2 buffer to it. So the next time we try to
    copy from/to it, reassert its DRI2 status.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 58a3e3c..78d9a6e 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -164,7 +164,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 		return NULL;
 
 	if (priv->flush)
-		return ref(priv->gpu_bo);
+		return priv->gpu_bo;
 
 	tiling = color_tiling(sna, &pixmap->drawable);
 	if (tiling < 0)
@@ -182,7 +182,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	/* Don't allow this named buffer to be replaced */
 	priv->pinned = 1;
 
-	return ref(priv->gpu_bo);
+	return priv->gpu_bo;
 }
 
 static DRI2Buffer2Ptr
@@ -201,16 +201,15 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	     __FUNCTION__, attachment, format,
 	     drawable->width, drawable->height));
 
-	buffer = calloc(1, sizeof *buffer + sizeof *private);
-	if (buffer == NULL)
-		return NULL;
-
 	pixmap = NULL;
-	bo = NULL;
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
 		pixmap = get_drawable_pixmap(drawable);
 		bo = sna_pixmap_set_dri(sna, pixmap);
+		if (bo == NULL)
+			return NULL;
+
+		bo = ref(bo);
 		bpp = pixmap->drawable.bitsPerPixel;
 		DBG(("%s: attaching to front buffer %dx%d [%p:%d]\n",
 		     __FUNCTION__,
@@ -276,9 +275,13 @@ sna_dri_create_buffer(DrawablePtr drawable,
 		break;
 
 	default:
-		break;
+		return NULL;
 	}
 	if (bo == NULL)
+		return NULL;
+
+	buffer = calloc(1, sizeof *buffer + sizeof *private);
+	if (buffer == NULL)
 		goto err;
 
 	private = get_private(buffer);
@@ -293,10 +296,8 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	private->pixmap = pixmap;
 	private->bo = bo;
 
-	if (buffer->name == 0) {
-		kgem_bo_destroy(&sna->kgem, bo);
+	if (buffer->name == 0)
 		goto err;
-	}
 
 	if (pixmap)
 		pixmap->refcnt++;
@@ -304,6 +305,7 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	return buffer;
 
 err:
+	kgem_bo_destroy(&sna->kgem, bo);
 	free(buffer);
 	return NULL;
 }
@@ -505,22 +507,27 @@ sna_dri_copy_region(DrawablePtr draw,
 		    DRI2BufferPtr dst_buffer,
 		    DRI2BufferPtr src_buffer)
 {
+	PixmapPtr pixmap = get_drawable_pixmap(draw);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct kgem_bo *src, *dst;
 
 	if (dst_buffer->attachment == DRI2BufferFrontLeft)
-		dst = sna_pixmap_get_bo(get_drawable_pixmap(draw));
+		dst = sna_pixmap_set_dri(sna, pixmap);
 	else
 		dst = get_private(dst_buffer)->bo;
 
 	if (src_buffer->attachment == DRI2BufferFrontLeft)
-		src = sna_pixmap_get_bo(get_drawable_pixmap(draw));
+		src = sna_pixmap_set_dri(sna, pixmap);
 	else
 		src = get_private(src_buffer)->bo;
 
+	assert(dst != NULL);
+	assert(src != NULL);
+
 	DBG(("%s: dst -- attachment=%d, name=%d, handle=%d [screen=%d]\n",
 	     __FUNCTION__,
 	     dst_buffer->attachment, dst_buffer->name, dst->handle,
-	     sna_pixmap_get_bo(to_sna_from_drawable(draw)->front)->handle));
+	     sna_pixmap_get_bo(sna->front)->handle));
 	DBG(("%s: src -- attachment=%d, name=%d, handle=%d\n",
 	     __FUNCTION__,
 	     src_buffer->attachment, src_buffer->name, src->handle));
@@ -529,10 +536,8 @@ sna_dri_copy_region(DrawablePtr draw,
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2,
 	     REGION_NUM_RECTS(region)));
-	assert(dst != NULL);
-	assert(src != NULL);
 
-	sna_dri_copy(to_sna_from_drawable(draw), draw, region, dst, src, false);
+	sna_dri_copy(sna, draw, region, dst, src, false);
 }
 
 #if DRI2INFOREC_VERSION >= 4
commit d051793b9194060f5408503b1fac56958c6e58e4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 11:42:31 2012 +0000

    sna/dri: Improve error handling of failing to create a DRI2 pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 96324e6..58a3e3c 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -210,7 +210,6 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	switch (attachment) {
 	case DRI2BufferFrontLeft:
 		pixmap = get_drawable_pixmap(drawable);
-		pixmap->refcnt++;
 		bo = sna_pixmap_set_dri(sna, pixmap);
 		bpp = pixmap->drawable.bitsPerPixel;
 		DBG(("%s: attaching to front buffer %dx%d [%p:%d]\n",
@@ -295,14 +294,13 @@ sna_dri_create_buffer(DrawablePtr drawable,
 	private->bo = bo;
 
 	if (buffer->name == 0) {
-		/* failed to name buffer */
-		if (pixmap)
-			pixmap->drawable.pScreen->DestroyPixmap(pixmap);
-		else
-			kgem_bo_destroy(&sna->kgem, bo);
+		kgem_bo_destroy(&sna->kgem, bo);
 		goto err;
 	}
 
+	if (pixmap)
+		pixmap->refcnt++;
+
 	return buffer;
 
 err:
@@ -531,6 +529,8 @@ sna_dri_copy_region(DrawablePtr draw,
 	     region->extents.x1, region->extents.y1,
 	     region->extents.x2, region->extents.y2,
 	     REGION_NUM_RECTS(region)));
+	assert(dst != NULL);
+	assert(src != NULL);
 
 	sna_dri_copy(to_sna_from_drawable(draw), draw, region, dst, src, false);
 }
commit 580ae520cad749fb86a1bddd0fa2bcadfd60abb6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 10:55:46 2012 +0000

    sna: Short-circuit repeated calls to force-to-gpu
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 215dbca..72c3907 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2013,6 +2013,11 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 	if (priv == NULL)
 		return NULL;
 
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+		DBG(("%s: GPU all-damaged\n", __FUNCTION__));
+		return priv;
+	}
+
 	/* Unlike move-to-gpu, we ignore wedged and always create the GPU bo */
 	if (priv->gpu_bo == NULL) {
 		struct sna *sna = to_sna_from_pixmap(pixmap);
commit f2aafb98026b5c476b7f84aa2dc4c1f9ba2e573d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 10:43:11 2012 +0000

    uxa: Silence compiler warning for const arguments
    
    i965_video.c: In function 'gen6_create_cc_state':
    i965_video.c:1374:12: warning: passing argument 4 of
    'intel_bo_alloc_for_data' discards 'const' qualifier from pointer target
    type [enabled by default]
    
    Repeated ad nauseam.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel.h b/src/intel.h
index e5f8bc8..69f7c72 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -605,7 +605,7 @@ intel_emit_reloc(drm_intel_bo * bo, uint32_t offset,
 static inline drm_intel_bo *intel_bo_alloc_for_data(intel_screen_private *intel,
 						    const void *data,
 						    unsigned int size,
-						    char *name)
+						    const char *name)
 {
 	drm_intel_bo *bo;
 
commit 507f72d6d44963cae5d8d4b9da68165f73c6fd36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 10:39:48 2012 +0000

    uxa: Remove DPRINTF stubs
    
    It wasn't being used for anything non-trivial and was throwing compiler
    warnings, so remove it.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/common.h b/src/common.h
index f9f2300..06b2192 100644
--- a/src/common.h
+++ b/src/common.h
@@ -51,20 +51,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define PFX __FILE__,__LINE__,__FUNCTION__
 #define FUNCTION_NAME __FUNCTION__
 
-#ifdef I830DEBUG
-#define MARKER() ErrorF("\n### %s:%d: >>> %s <<< ###\n\n", \
-			 __FILE__, __LINE__,__FUNCTION__)
-#define DPRINTF I830DPRINTF
-#else /* #ifdef I830DEBUG */
-#define MARKER()
-#define DPRINTF I830DPRINTF_stub
-static inline void
-I830DPRINTF_stub(const char *filename, int line, const char *function,
-		 const char *fmt, ...)
-{
-}
-#endif /* #ifdef I830DEBUG */
-
 #define KB(x) ((x) * 1024)
 #define MB(x) ((x) * KB(1024))
 
@@ -82,9 +68,6 @@ extern void intel_init_scrn(ScrnInfoPtr scrn);
 /* Symbol lists shared by the i810 and i830 parts. */
 extern int I830EntityIndex;
 
-extern void I830DPRINTF_stub(const char *filename, int line,
-			     const char *function, const char *fmt, ...);
-
 #ifdef _I830_H_
 #define PrintErrorState i830_dump_error_state
 #define WaitRingFunc I830WaitLpRing
diff --git a/src/intel_driver.c b/src/intel_driver.c
index b3871f4..1837509 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -137,23 +137,6 @@ static Bool I830EnterVT(int scrnIndex, int flags);
 /* temporary */
 extern void xf86SetCursor(ScreenPtr screen, CursorPtr pCurs, int x, int y);
 
-#ifdef I830DEBUG
-void
-I830DPRINTF(const char *filename, int line, const char *function,
-	    const char *fmt, ...)
-{
-	va_list ap;
-
-	ErrorF("\n##############################################\n"
-	       "*** In function %s, on line %d, in file %s ***\n",
-	       function, line, filename);
-	va_start(ap, fmt);
-	VErrorF(fmt, ap);
-	va_end(ap);
-	ErrorF("##############################################\n\n");
-}
-#endif /* #ifdef I830DEBUG */
-
 /* Export I830 options to i830 driver where necessary */
 const OptionInfoRec *intel_uxa_available_options(int chipid, int busid)
 {
@@ -169,8 +152,6 @@ I830LoadPalette(ScrnInfoPtr scrn, int numColors, int *indices,
 	int p;
 	uint16_t lut_r[256], lut_g[256], lut_b[256];
 
-	DPRINTF(PFX, "I830LoadPalette: numColors: %d\n", numColors);
-
 	for (p = 0; p < xf86_config->num_crtc; p++) {
 		xf86CrtcPtr crtc = xf86_config->crtc[p];
 
@@ -979,9 +960,6 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	if (!miSetPixmapDepths())
 		return FALSE;
 
-	DPRINTF(PFX, "assert( if(!I830EnterVT(scrnIndex, 0)) )\n");
-
-	DPRINTF(PFX, "assert( if(!fbScreenInit(screen, ...) )\n");
 	if (!fbScreenInit(screen, NULL,
 			  scrn->virtualX, scrn->virtualY,
 			  scrn->xDpi, scrn->yDpi,
@@ -1055,11 +1033,9 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	if (!xf86CrtcScreenInit(screen))
 		return FALSE;
 
-	DPRINTF(PFX, "assert( if(!miCreateDefColormap(screen)) )\n");
 	if (!miCreateDefColormap(screen))
 		return FALSE;
 
-	DPRINTF(PFX, "assert( if(!xf86HandleColormaps(screen, ...)) )\n");
 	if (!xf86HandleColormaps(screen, 256, 8, I830LoadPalette, NULL,
 				 CMAP_RELOAD_ON_MODE_SWITCH |
 				 CMAP_PALETTED_TRUECOLOR)) {
@@ -1141,8 +1117,6 @@ static void I830LeaveVT(int scrnIndex, int flags)
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	int ret;
 
-	DPRINTF(PFX, "Leave VT\n");
-
 	xf86RotateFreeShadow(scrn);
 
 	xf86_hide_cursors(scrn);
@@ -1162,8 +1136,6 @@ static Bool I830EnterVT(int scrnIndex, int flags)
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 	int ret;
 
-	DPRINTF(PFX, "Enter VT\n");
-
 	ret = drmSetMaster(intel->drmSubFD);
 	if (ret) {
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
@@ -1296,9 +1268,6 @@ static Bool I830PMEvent(int scrnIndex, pmEvent event, Bool undo)
 	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
-	DPRINTF(PFX, "Enter VT, event %d, undo: %s\n", event,
-		BOOLTOSTRING(undo));
-
 	switch (event) {
 	case XF86_APM_SYS_SUSPEND:
 	case XF86_APM_CRITICAL_SUSPEND:	/*do we want to delay a critical suspend? */
commit c72a67390ea243bf43e2ee4efe237ab88a4615b7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 10:21:56 2012 +0000

    sna/dri: Update for AsyncSwap interface changes
    
    We now need to return TRUE/FALSE depending on whether we need to
    invalidate the drawable.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 2a25cbd..96324e6 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -1537,7 +1537,7 @@ blit_fallback:
 }
 
 #if DRI2INFOREC_VERSION >= 7
-static void
+static Bool
 sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 		   DRI2BufferPtr front, DRI2BufferPtr back,
 		   DRI2SwapEventPtr func, void *data)
@@ -1553,6 +1553,8 @@ sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 	if (pipe == -1) {
 		PixmapPtr pixmap = get_drawable_pixmap(draw);
 
+		DBG(("%s: unattached, exchange pixmaps\n", __FUNCTION__));
+
 		set_bo(pixmap, get_private(back)->bo);
 		sna_dri_exchange_attachment(front, back);
 		get_private(back)->pixmap = pixmap;
@@ -1560,18 +1562,20 @@ sna_dri_async_swap(ClientPtr client, DrawablePtr draw,
 
 		DRI2SwapComplete(client, draw, 0, 0, 0,
 				 DRI2_EXCHANGE_COMPLETE, func, data);
-		return;
+		return TRUE;
 	}
 
 	if (!can_flip(sna, draw, front, back)) {
 blit:
+		DBG(("%s: unable to flip, so blit\n", __FUNCTION__));
+
 		sna_dri_copy(sna, draw, NULL,
 			     get_private(front)->bo,
 			     get_private(back)->bo,
 			     false);
 		DRI2SwapComplete(client, draw, 0, 0, 0,
 				 DRI2_BLIT_COMPLETE, func, data);
-		return;
+		return FALSE;
 	}
 
 	bo = NULL;
@@ -1611,13 +1615,13 @@ blit:
 
 		sna_dri_reference_buffer(front);
 		sna_dri_reference_buffer(back);
-
 	} else if (info->type != DRI2_ASYNC_FLIP) {
 		/* A normal vsync'ed client is finishing, wait for it
 		 * to unpin the old framebuffer before taking over.
 		 */
 		goto blit;
 	} else {
+		DBG(("%s: pending flip, chaining next\n", __FUNCTION__));
 		if (info->next_front.name == info->front->name) {
 			name = info->cache.name;
 			bo = info->cache.bo;
@@ -1629,16 +1633,16 @@ blit:
 		get_private(info->front)->bo = get_private(info->back)->bo;
 	}
 
-	if (bo == NULL)
+	if (bo == NULL) {
+		DBG(("%s: creating new back buffer\n", __FUNCTION__));
 		bo = kgem_create_2d(&sna->kgem,
 				    draw->width,
 				    draw->height,
 				    draw->bitsPerPixel,
 				    I915_TILING_X, CREATE_EXACT);
-	get_private(info->back)->bo = bo;
-
-	if (name == 0)
 		name = kgem_bo_flink(&sna->kgem, bo);
+	}
+	get_private(info->back)->bo = bo;
 	info->back->name = name;
 
 	set_bo(sna->front, get_private(info->front)->bo);
@@ -1646,6 +1650,7 @@ blit:
 
 	DRI2SwapComplete(client, draw, 0, 0, 0,
 			 DRI2_EXCHANGE_COMPLETE, func, data);
+	return TRUE;
 }
 #endif
 
commit 27bc2acf0e6a0e5e071e0d187bdf71577e821af8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 09:29:41 2012 +0000

    sna: Fix use of RegionInit() for singular regions
    
    For a singular region, we want to use a value for nboxes of 0 not 1,
    fortunately if you pass in a box, it ignores the value of nboxes.
    RegionInit() is a most peculiar API!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index bef1774..91273c9 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -1026,7 +1026,7 @@ glyphs_fallback(CARD8 op,
 	DBG(("%s: (%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
-	RegionInit(&region, &box, 1);
+	RegionInit(&region, &box, 0);
 	RegionTranslate(&region, dst->pDrawable->x, dst->pDrawable->y);
 	if (dst->pCompositeClip)
 		RegionIntersect(&region, &region, dst->pCompositeClip);
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index cec0473..ebc3860 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -157,7 +157,7 @@ sna_video_clip_helper(ScrnInfoPtr scrn,
 
 	/* For textured video, we don't actually want to clip at all. */
 	if (crtc && !video->textured) {
-		RegionInit(&crtc_region_local, &crtc_box, 1);
+		RegionInit(&crtc_region_local, &crtc_box, 0);
 		crtc_region = &crtc_region_local;
 		RegionIntersect(crtc_region, crtc_region, reg);
 	}
commit d98b41ce4323b43d2359349a04a4a56559e341a2
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Fri Feb 17 19:50:52 2012 +0800

    uxa/glamor/dri: Should fixup the drawable pixmap.
    
    Two fixes in this commit, first we only need to check the
    front left buffer, for other attachment we don't need to
    check them. The second is, we should fixup the pixmap's
    drawable not the original drawable.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 2a0102d..f6f0c86 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -280,14 +280,18 @@ I830DRI2CreateBuffers(DrawablePtr drawable, unsigned int *attachments,
 		pixmap = NULL;
 		if (attachments[i] == DRI2BufferFrontLeft) {
 			pixmap = get_front_buffer(drawable);
+
+			if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
+				is_glamor_pixmap = TRUE;
+				drawable = &pixmap->drawable;
+				pixmap = NULL;
+			}
 		} else if (attachments[i] == DRI2BufferStencil && pDepthPixmap) {
 			pixmap = pDepthPixmap;
 			pixmap->refcnt++;
 		}
 
-		is_glamor_pixmap = pixmap && (intel_get_pixmap_private(pixmap) == NULL);
-
-		if (pixmap == NULL || is_glamor_pixmap) {
+		if (pixmap == NULL) {
 			unsigned int hint = INTEL_CREATE_PIXMAP_DRI2;
 
 			if (intel->tiling & INTEL_TILING_3D) {
@@ -398,11 +402,17 @@ I830DRI2CreateBuffer(DrawablePtr drawable, unsigned int attachment,
 	}
 
 	pixmap = NULL;
-	if (attachment == DRI2BufferFrontLeft)
+	if (attachment == DRI2BufferFrontLeft) {
 		pixmap = get_front_buffer(drawable);
 
-	is_glamor_pixmap = pixmap && (intel_get_pixmap_private(pixmap) == NULL);
-	if (pixmap == NULL || is_glamor_pixmap) {
+		if (pixmap && intel_get_pixmap_private(pixmap) == NULL) {
+			is_glamor_pixmap = TRUE;
+			drawable = &pixmap->drawable;
+			pixmap = NULL;
+		}
+	}
+
+	if (pixmap == NULL) {
 		unsigned int hint = INTEL_CREATE_PIXMAP_DRI2;
 		int pixmap_width = drawable->width;
 		int pixmap_height = drawable->height;
commit 1e0d702c3a77f6db3dfd55b8cafc5fca4d778751
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Fri Feb 17 19:50:51 2012 +0800

    uxa/glamor/dri: Enable the pageflip support on glamor.
    
    To support easy buffer exchange at glamor layer, glamor
    added a new API glamor_egl_exchange_buffers() to exchange
    two pixmaps' EGL image and fbos and textures without
    recreating any of them. But this simple method's requirement
    is that there are two pixmaps. A exceptional case is:
    If we are using triple buffer when do page flipping, we
    will have an extra back_buffer which doesn't have a pixmap
    attached to it. Then each time we set that buffer to a
    pixmap, we will have to call the create_egl_textured_pixmap
    to create the corresponding EGL image and fbo and texture
    for it. This is not efficient.
    
    To fix this issue, this commit introduces a new back_pixmap
    to intel structure to hold the back buffer and corresponding
    glamor resources. Then we will just need to do the light
    weight buffer exchanging at both DDX and glamor layer.
    
    As the new back pixmap is similar to the screen pixmap
    and need to be handled carefully when close screen. As the
    glamor data structure is a per screen data, and will be
    released at its close screen method. The glamor's close
    screen method must cleanup the screen pixmap and back
    pixmap's glamor resources. screen pixmap is easy to get,
    but there is no good way to store the back pixmap.
    
    So the glamor add a new API glamor_egl_create_textured_screen_ext
    function to pass the back pixmap's pointer to glamor layer.
    
    This commit make us depend on glamor commit: 4e58c4f.
    And we increased the required glamor version from 0.3.0 to 0.3.1
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 785392a..1e77faf 100644
--- a/configure.ac
+++ b/configure.ac
@@ -158,7 +158,7 @@ AC_ARG_ENABLE(glamor,
 AC_MSG_RESULT([$GLAMOR])
 AM_CONDITIONAL(GLAMOR, test x$GLAMOR != xno)
 if test "x$GLAMOR" != "xno"; then
-	PKG_CHECK_MODULES(LIBGLAMOR, [glamor >= 0.3.0])
+	PKG_CHECK_MODULES(LIBGLAMOR, [glamor >= 0.3.1])
 	PKG_CHECK_MODULES(LIBGLAMOR_EGL, [glamor-egl])
 	AC_DEFINE(USE_GLAMOR, 1, [Enable glamor acceleration])
 fi
diff --git a/src/intel.h b/src/intel.h
index 355aaaf..e5f8bc8 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -166,6 +166,7 @@ typedef struct intel_screen_private {
 
 	void *modes;
 	drm_intel_bo *front_buffer, *back_buffer;
+	PixmapPtr back_pixmap;
 	unsigned int back_name;
 	long front_pitch, front_tiling;
 	void *shadow_buffer;
diff --git a/src/intel_display.c b/src/intel_display.c
index d525ffa..11d0e2b 100644
--- a/src/intel_display.c
+++ b/src/intel_display.c
@@ -1369,6 +1369,7 @@ intel_xf86crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	int	    i, old_width, old_height, old_pitch;
 	unsigned long pitch;
 	uint32_t tiling;
+	ScreenPtr screen;
 
 	if (scrn->virtualX == width && scrn->virtualY == height)
 		return TRUE;
@@ -1382,6 +1383,12 @@ intel_xf86crtc_resize(ScrnInfoPtr scrn, int width, int height)
 	old_fb_id = mode->fb_id;
 	old_front = intel->front_buffer;
 
+	if (intel->back_pixmap) {
+		screen = intel->back_pixmap->drawable.pScreen;
+		screen->DestroyPixmap(intel->back_pixmap);
+		intel->back_pixmap = NULL;
+	}
+
 	if (intel->back_buffer) {
 		drm_intel_bo_unreference(intel->back_buffer);
 		intel->back_buffer = NULL;
diff --git a/src/intel_dri.c b/src/intel_dri.c
index 98ae230..2a0102d 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -66,6 +66,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "dri2.h"
 
 #include "intel_glamor.h"
+#include "uxa.h"
 
 typedef struct {
 	int refcnt;
@@ -836,7 +837,7 @@ i830_dri2_del_frame_event(DrawablePtr drawable, DRI2FrameEventPtr info)
 }
 
 static struct intel_pixmap *
-intel_exchange_pixmap_buffers(PixmapPtr front, PixmapPtr back)
+intel_exchange_pixmap_buffers(struct intel_screen_private *intel, PixmapPtr front, PixmapPtr back)
 {
 	struct intel_pixmap *new_front, *new_back;
 
@@ -847,6 +848,7 @@ intel_exchange_pixmap_buffers(PixmapPtr front, PixmapPtr back)
 	new_front->busy = 1;
 	new_back->busy = -1;
 
+	intel_glamor_exchange_buffers(intel, front, back);
 	return new_front;
 }
 
@@ -866,13 +868,46 @@ I830DRI2ExchangeBuffers(struct intel_screen_private *intel, DRI2BufferPtr front,
 	back->name = tmp;
 
 	/* Swap pixmap bos */
-	new_front = intel_exchange_pixmap_buffers(front_priv->pixmap,
+	new_front = intel_exchange_pixmap_buffers(intel,
+						  front_priv->pixmap,
 						  back_priv->pixmap);
 	dri_bo_unreference (intel->front_buffer);
 	intel->front_buffer = new_front->bo;
 	dri_bo_reference (intel->front_buffer);
 }
 
+static PixmapPtr
+intel_glamor_create_back_pixmap(ScreenPtr screen,
+				PixmapPtr front_pixmap,
+				drm_intel_bo *back_bo)
+{
+	PixmapPtr back_pixmap;
+
+	back_pixmap = screen->CreatePixmap(screen,
+					   0,
+					   0,
+				           front_pixmap->drawable.depth,
+				           0);
+	if (back_pixmap == NULL)
+		return NULL;
+
+	screen->ModifyPixmapHeader(back_pixmap,
+				   front_pixmap->drawable.width,
+				   front_pixmap->drawable.height,
+				   0, 0,
+				   front_pixmap->devKind,
+				   0);
+	intel_set_pixmap_bo(back_pixmap, back_bo);
+	if (!intel_glamor_create_textured_pixmap(back_pixmap)) {
+		ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
+			   "Failed to create textured back pixmap.\n");
+		screen->DestroyPixmap(back_pixmap);
+		return NULL;
+	}
+	return back_pixmap;
+}
+
 /*
  * Our internal swap routine takes care of actually exchanging, blitting, or
  * flipping buffers as necessary.
@@ -904,6 +939,10 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 	}
 
 	if (intel->back_buffer == NULL) {
+		I830DRI2BufferPrivatePtr priv;
+		PixmapPtr front_pixmap, back_pixmap;
+		ScreenPtr screen;
+
 		new_back = drm_intel_bo_alloc(intel->bufmgr, "front buffer",
 					      intel->front_buffer->size, 0);
 		if (new_back == NULL)
@@ -920,6 +959,21 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 
 		drm_intel_bo_disable_reuse(new_back);
 		dri_bo_flink(new_back, &intel->back_name);
+
+		if ((intel->uxa_flags & UXA_USE_GLAMOR)) {
+			screen = draw->pScreen;
+			priv = info->front->driverPrivate;
+			front_pixmap = priv->pixmap;
+
+			back_pixmap = intel_glamor_create_back_pixmap(screen,
+								      front_pixmap,
+								      new_back);
+			if (back_pixmap == NULL) {
+				drm_intel_bo_unreference(new_back);
+				return FALSE;
+			}
+			intel->back_pixmap = back_pixmap;
+		}
 	} else {
 		new_back = intel->back_buffer;
 		intel->back_buffer = NULL;
@@ -933,12 +987,17 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 	info->type = DRI2_SWAP_CHAIN;
 	intel->pending_flip[info->pipe] = info;
 
-	/* Exchange the current front-buffer with the fresh bo */
-	intel->back_buffer = intel->front_buffer;
-	drm_intel_bo_reference(intel->back_buffer);
-
 	priv = info->front->driverPrivate;
-	intel_set_pixmap_bo(priv->pixmap, new_back);
+
+	/* Exchange the current front-buffer with the fresh bo */
+	if (!(intel->uxa_flags & UXA_USE_GLAMOR)) {
+		intel->back_buffer = intel->front_buffer;
+		drm_intel_bo_reference(intel->back_buffer);
+		intel_set_pixmap_bo(priv->pixmap, new_back);
+	}
+	else
+		intel_exchange_pixmap_buffers(intel, priv->pixmap,
+					      intel->back_pixmap);
 
 	tmp_name = info->front->name;
 	info->front->name = intel->back_name;
diff --git a/src/intel_driver.c b/src/intel_driver.c
index d66a8fd..b3871f4 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1210,6 +1210,11 @@ static Bool I830CloseScreen(int scrnIndex, ScreenPtr screen)
 		intel->uxa_driver = NULL;
 	}
 
+	if (intel->back_pixmap) {
+		screen->DestroyPixmap(intel->back_pixmap);
+		intel->back_pixmap = NULL;
+	}
+
 	if (intel->back_buffer) {
 		drm_intel_bo_unreference(intel->back_buffer);
 		intel->back_buffer = NULL;
diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index c468d34..a868157 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -40,6 +40,16 @@
 #include "intel_glamor.h"
 #include "uxa.h"
 
+void
+intel_glamor_exchange_buffers(struct intel_screen_private *intel,
+			      PixmapPtr src,
+			      PixmapPtr dst)
+{
+	if (!(intel->uxa_flags & UXA_USE_GLAMOR))
+		return;
+	glamor_egl_exchange_buffers(src, dst);
+}
+
 Bool
 intel_glamor_create_screen_resources(ScreenPtr screen)
 {
@@ -52,9 +62,10 @@ intel_glamor_create_screen_resources(ScreenPtr screen)
 	if (!glamor_glyphs_init(screen))
 		return FALSE;
 
-	if (!glamor_egl_create_textured_screen(screen,
-					       intel->front_buffer->handle,
-					       intel->front_pitch))
+	if (!glamor_egl_create_textured_screen_ext(screen,
+						   intel->front_buffer->handle,
+						   intel->front_pitch,
+						   &intel->back_pixmap))
 		return FALSE;
 
 	return TRUE;
@@ -70,7 +81,7 @@ intel_glamor_pre_init(ScrnInfoPtr scrn)
 	/* Load glamor module */
 	if ((glamor_module = xf86LoadSubModule(scrn, GLAMOR_EGL_MODULE_NAME))) {
 		version = xf86GetModuleVersion(glamor_module);
-		if (version < MODULE_VERSION_NUMERIC(0,3,0)) {
+		if (version < MODULE_VERSION_NUMERIC(0,3,1)) {
 			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
 			"Incompatible glamor version, required >= 0.3.0.\n");
 		} else {
diff --git a/src/intel_glamor.h b/src/intel_glamor.h
index 3065132..46692bc 100644
--- a/src/intel_glamor.h
+++ b/src/intel_glamor.h
@@ -44,7 +44,7 @@ Bool intel_glamor_create_textured_pixmap(PixmapPtr pixmap);
 void intel_glamor_destroy_pixmap(PixmapPtr pixmap);
 PixmapPtr intel_glamor_create_pixmap(ScreenPtr screen, int w, int h,
 				     int depth, unsigned int usage);
-
+void intel_glamor_exchange_buffers(struct intel_screen_private *intel, PixmapPtr src, PixmapPtr dst);
 #else
 
 static inline Bool intel_glamor_pre_init(ScrnInfoPtr scrn) { return TRUE; }
@@ -61,6 +61,7 @@ static inline void intel_glamor_destroy_pixmap(PixmapPtr pixmap) { }
 static inline PixmapPtr intel_glamor_create_pixmap(ScreenPtr screen, int w, int h,
 						   int depth, unsigned int usage) { return NULL; }
 
+static inline void intel_glamor_exchange_buffers(struct intel_screen_private *intel, PixmapPtr src, PixmapPtr dst) {}
 #endif
 
 #endif /* INTEL_GLAMOR_H */
commit ce7a57994d662f340b9457a2750e4385e7d669cd
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Fri Feb 17 19:50:50 2012 +0800

    uxa/dri: Refine the pageflip processing.
    
    Add a new element back_name to intel structure to track
    the back bo's name then avoid flink every time.
    And at function I830DRI2ExchangeBuffers, after finish
    the BO exchange between info's front and back pixmap,
    it set the new front bo to the screen pixmap. But the
    screen pixmap should be the same as front's pixmap,
    so this is a duplicate operation and can be removed.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>

diff --git a/src/intel.h b/src/intel.h
index 8104bfe..355aaaf 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -166,6 +166,7 @@ typedef struct intel_screen_private {
 
 	void *modes;
 	drm_intel_bo *front_buffer, *back_buffer;
+	unsigned int back_name;
 	long front_pitch, front_tiling;
 	void *shadow_buffer;
 	int shadow_stride;
diff --git a/src/intel_dri.c b/src/intel_dri.c
index 8bc6157..98ae230 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -835,14 +835,27 @@ i830_dri2_del_frame_event(DrawablePtr drawable, DRI2FrameEventPtr info)
 	free(info);
 }
 
+static struct intel_pixmap *
+intel_exchange_pixmap_buffers(PixmapPtr front, PixmapPtr back)
+{
+	struct intel_pixmap *new_front, *new_back;
+
+	new_front = intel_get_pixmap_private(back);
+	new_back = intel_get_pixmap_private(front);
+	intel_set_pixmap_private(front, new_front);
+	intel_set_pixmap_private(back, new_back);
+	new_front->busy = 1;
+	new_back->busy = -1;
+
+	return new_front;
+}
+
 static void
-I830DRI2ExchangeBuffers(DrawablePtr draw, DRI2BufferPtr front, DRI2BufferPtr back)
+I830DRI2ExchangeBuffers(struct intel_screen_private *intel, DRI2BufferPtr front, DRI2BufferPtr back)
 {
 	I830DRI2BufferPrivatePtr front_priv, back_priv;
-	struct intel_pixmap *front_intel, *back_intel;
-	ScreenPtr screen;
-	intel_screen_private *intel;
 	int tmp;
+	struct intel_pixmap *new_front;
 
 	front_priv = front->driverPrivate;
 	back_priv = back->driverPrivate;
@@ -853,21 +866,11 @@ I830DRI2ExchangeBuffers(DrawablePtr draw, DRI2BufferPtr front, DRI2BufferPtr bac
 	back->name = tmp;
 
 	/* Swap pixmap bos */
-	front_intel = intel_get_pixmap_private(front_priv->pixmap);
-	back_intel = intel_get_pixmap_private(back_priv->pixmap);
-	intel_set_pixmap_private(front_priv->pixmap, back_intel);
-	intel_set_pixmap_private(back_priv->pixmap, front_intel);
-
-	screen = draw->pScreen;
-	intel = intel_get_screen_private(xf86Screens[screen->myNum]);
-
+	new_front = intel_exchange_pixmap_buffers(front_priv->pixmap,
+						  back_priv->pixmap);
 	dri_bo_unreference (intel->front_buffer);
-	intel->front_buffer = back_intel->bo;
+	intel->front_buffer = new_front->bo;
 	dri_bo_reference (intel->front_buffer);
-
-	intel_set_pixmap_private(screen->GetScreenPixmap(screen), back_intel);
-	back_intel->busy = 1;
-	front_intel->busy = -1;
 }
 
 /*
@@ -881,6 +884,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 {
 	I830DRI2BufferPrivatePtr priv = info->back->driverPrivate;
 	drm_intel_bo *new_back, *old_back;
+	int tmp_name;
 
 	if (!intel->use_triple_buffer) {
 		if (!intel_do_pageflip(intel,
@@ -889,7 +893,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 			return FALSE;
 
 		info->type = DRI2_SWAP;
-		I830DRI2ExchangeBuffers(draw, info->front, info->back);
+		I830DRI2ExchangeBuffers(intel, info->front, info->back);
 		return TRUE;
 	}
 
@@ -915,6 +919,7 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 		}
 
 		drm_intel_bo_disable_reuse(new_back);
+		dri_bo_flink(new_back, &intel->back_name);
 	} else {
 		new_back = intel->back_buffer;
 		intel->back_buffer = NULL;
@@ -934,10 +939,13 @@ I830DRI2ScheduleFlip(struct intel_screen_private *intel,
 
 	priv = info->front->driverPrivate;
 	intel_set_pixmap_bo(priv->pixmap, new_back);
-	dri_bo_flink(new_back, &info->front->name);
+
+	tmp_name = info->front->name;
+	info->front->name = intel->back_name;
+	intel->back_name = tmp_name;
 
 	/* Then flip DRI2 pointers and update the screen pixmap */
-	I830DRI2ExchangeBuffers(draw, info->front, info->back);
+	I830DRI2ExchangeBuffers(intel, info->front, info->back);
 	DRI2SwapComplete(info->client, draw, 0, 0, 0,
 			 DRI2_EXCHANGE_COMPLETE,
 			 info->event_complete,
commit 1a65e2b8a2ebfb4d736efb7631515babad75faf2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 21 08:09:52 2012 +0000

    sna: Split up/down edge walking in order to handle endpoint clipping
    
    In order to prevent walking upwards off the top of the pixmap when
    rendering a clipped vertical edge, we need to tweak the boundary
    conditions for the vertical edge walker.
    
    Reported-by: Clemens Eisserer <linuxhippy at gmail.com>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46261
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c8fcc75..215dbca 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5669,35 +5669,48 @@ X_continue2:
 				}
 
 				b->x2 = b->x1 = x;
-				b->y1 = y;
-				while (length--) {
-					e += e1;
-					y += sdy;
-					if (e >= 0) {
-						b->y2 = y;
-						if (b->y2 < b->y1) {
-							int16_t t = b->y1;
-							b->y1 = b->y2;
-							b->y2 = t;
+				if (sdy < 0) {
+					b->y2 = y + 1;
+					while (length--) {
+						e += e1;
+						y--;
+						if (e >= 0) {
+							b->y1 = y;
+							b->x2++;
+							if (++b == last_box) {
+								ret = &&Y_up_continue;
+								goto *jump;
+Y_up_continue:
+								b = box;
+							}
+							e += e3;
+							b->x2 = b->x1 = ++x;
+							b->y2 = y;
 						}
-						b->x2++;
-						if (++b == last_box) {
-							ret = &&Y_continue;
-							goto *jump;
-Y_continue:
-							b = box;
+					}
+
+					b->y1 = y;
+				} else {
+					b->y1 = y;
+					while (length--) {
+						e += e1;
+						y++;
+						if (e >= 0) {
+							b->y2 = y;
+							b->x2++;
+							if (++b == last_box) {
+								ret = &&Y_down_continue;
+								goto *jump;
+Y_down_continue:
+								b = box;
+							}
+							e += e3;
+							b->x2 = b->x1 = ++x;
+							b->y1 = y;
 						}
-						e += e3;
-						b->x2 = b->x1 = ++x;
-						b->y1 = y;
 					}
-				}
 
-				b->y2 = y + sdy;
-				if (b->y2 < b->y1) {
-					int16_t t = b->y1;
-					b->y1 = b->y2;
-					b->y2 = t;
+					b->y2 = ++y;
 				}
 				b->x2++;
 				if (++b == last_box) {
@@ -6949,35 +6962,48 @@ X_continue2:
 				}
 
 				b->x2 = b->x1 = x1;
-				b->y1 = y1;
-				while (--length) {
-					e += e1;
-					y1 += sdy;
-					if (e >= 0) {
-						b->y2 = y1;
-						if (b->y2 < b->y1) {
-							int16_t t = b->y1;
-							b->y1 = b->y2;
-							b->y2 = t;
+				if (sdy < 0) {
+					b->y2 = y1 + 1;
+					while (--length) {
+						e += e1;
+						y1--;
+						if (e >= 0) {
+							b->y1 = y1;
+							b->x2++;
+							if (++b == last_box) {
+								ret = &&Y_up_continue;
+								goto *jump;
+Y_up_continue:
+								b = box;
+							}
+							e += e3;
+							b->x2 = b->x1 = ++x1;
+							b->y2 = y1;
 						}
-						b->x2++;
-						if (++b == last_box) {
-							ret = &&Y_continue;
-							goto *jump;
-Y_continue:
-							b = box;
+					}
+
+					b->y1 = y1;
+				} else {
+					b->y1 = y1;
+					while (--length) {
+						e += e1;
+						y1++;
+						if (e >= 0) {
+							b->y2 = y1;
+							b->x2++;
+							if (++b == last_box) {
+								ret = &&Y_down_continue;
+								goto *jump;
+Y_down_continue:
+								b = box;
+							}
+							e += e3;
+							b->x2 = b->x1 = ++x1;
+							b->y1 = y1;
 						}
-						e += e3;
-						b->x2 = b->x1 = ++x1;
-						b->y1 = y1;
 					}
-				}
 
-				b->y2 = y1 + sdy;
-				if (b->y2 < b->y1) {
-					int16_t t = b->y1;
-					b->y1 = b->y2;
-					b->y2 = t;
+					b->y2 = ++y1;
 				}
 				b->x2++;
 				if (++b == last_box) {
commit 7ea44997553ffdf57b346dc9d83742c511c9e5a4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 23:50:42 2012 +0000

    sna: Restore the shadow pixels when reducing CPU damage to all
    
    Reported-by: Joe Nahmias <joe at nahmias.net>
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46346
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index aab85d8..c8fcc75 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1215,8 +1215,17 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 	if (sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
-			      pixmap->drawable.height))
+			      pixmap->drawable.height)) {
+		sna_damage_destroy(&priv->gpu_damage);
+		sna_pixmap_free_gpu(sna, priv);
+		priv->undamaged = false;
+
+		if (pixmap->devPrivate.ptr == NULL &&
+		    !sna_pixmap_alloc_cpu(sna, pixmap, priv, false))
+			return false;
+
 		goto out;
+	}
 
 	if (priv->clear)
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
commit dbe0580e207ad85cb6a659f86c5746a7ecbcd036
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 16:30:53 2012 +0000

    sna: gen4+ suffer no penalty for changing tiling
    
    On gen4, the tiling/fence constraints are fairly lax, only requiring
    page alignment of the object and its size, and so we can switch
    tiling modes without incurring a GPU stall on active bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f107f14..e7c4987 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2403,15 +2403,28 @@ search_again:
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
 
-			if (bo->pitch < pitch) {
-				DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
-				     bo->tiling, tiling,
-				     bo->pitch, pitch));
-				continue;
-			}
+			if (kgem->gen < 40) {
+				if (bo->pitch < pitch) {
+					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
+					     bo->tiling, tiling,
+					     bo->pitch, pitch));
+					continue;
+				}
 
-			if (bo->pitch * tiled_height > bytes(bo))
-				continue;
+				if (bo->pitch * tiled_height > bytes(bo))
+					continue;
+			} else {
+				if (num_pages(bo) < size)
+					continue;
+
+				if (bo->pitch != pitch) {
+					gem_set_tiling(kgem->fd,
+						       bo->handle,
+						       tiling, pitch);
+
+					bo->pitch = pitch;
+				}
+			}
 
 			kgem_bo_remove_from_active(kgem, bo);
 
@@ -2444,6 +2457,38 @@ search_again:
 	}
 
 	if (--retry && flags & CREATE_EXACT) {
+		if (kgem->gen >= 40) {
+			for (i = I915_TILING_NONE; i <= I915_TILING_Y; i++) {
+				if (i == tiling)
+					continue;
+
+				cache = &kgem->active[bucket][i];
+				list_for_each_entry(bo, cache, list) {
+					assert(!bo->purged);
+					assert(bo->refcnt == 0);
+					assert(bo->reusable);
+
+					if (num_pages(bo) < size)
+						continue;
+
+					if (tiling != gem_set_tiling(kgem->fd,
+								     bo->handle,
+								     tiling, pitch))
+						continue;
+
+					kgem_bo_remove_from_active(kgem, bo);
+
+					bo->unique_id = kgem_get_unique_id(kgem);
+					bo->pitch = pitch;
+					bo->tiling = tiling;
+					bo->delta = 0;
+					DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
+					     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+					return kgem_bo_reference(bo);
+				}
+			}
+		}
+
 		bucket++;
 		goto search_again;
 	}
commit b68b76cf54a322e80685f1ec93538cd6c5813ea4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 13:38:11 2012 +0000

    sna: Move sync'ing of CPU bo after allocation to first write
    
    The idea was that we could afford to allocate an active CPU bo for
    copying to from using the GPU and later sync just before we need to
    write to the shadow pixels. Having the sync inside the allocation
    function potentially causes an unwanted stall.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ab44cf3..aab85d8 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -321,10 +321,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 			if (priv->ptr == NULL) {
 				kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 				priv->cpu_bo = NULL;
-			} else {
-				kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+			} else
 				priv->stride = priv->cpu_bo->pitch;
-			}
 		}
 	}
 
@@ -2599,6 +2597,11 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		DBG(("%s: applying clear [%08x]\n",
 		     __FUNCTION__, priv->clear_color));
 
+		if (priv->cpu_bo) {
+			DBG(("%s: syncing CPU bo\n", __FUNCTION__));
+			kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
+		}
+
 		pixman_fill(pixmap->devPrivate.ptr,
 			    pixmap->devKind/sizeof(uint32_t),
 			    pixmap->drawable.bitsPerPixel,
commit bbb6794a3b97b1fcf72c8712ab0ec591683b128b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 12:25:31 2012 +0000

    sna: Trim clipped lines to end within bounds
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46261
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45673
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2088019..ab44cf3 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5425,8 +5425,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	int x2, y2, xstart, ystart;
-	int oc2, pt2_clipped = 0;
+	int x2, y2, xstart, ystart, oc2;
 	unsigned int bias = miGetZeroLineBias(drawable->pScreen);
 	bool degenerate = true;
 	struct sna_fill_op fill;
@@ -5471,9 +5470,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 		x2 = xstart;
 		y2 = ystart;
 		oc2 = 0;
-		MIOUTCODES(oc2, x2, y2,
-			   extents->x1, extents->y1,
-			   extents->x2, extents->y2);
+		OUTCODES(oc2, x2, y2, extents);
 
 		while (--n) {
 			int16_t sdx, sdy;
@@ -5504,9 +5501,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 			degenerate = false;
 
 			oc2 = 0;
-			MIOUTCODES(oc2, x2, y2,
-				   extents->x1, extents->y1,
-				   extents->x2, extents->y2);
+			OUTCODES(oc2, x2, y2, extents);
 			if (oc1 & oc2)
 				continue;
 
@@ -5554,13 +5549,12 @@ rectangle_continue:
 
 				x = x1;
 				y = y1;
-				pt2_clipped = 0;
 
 				if (oc1 | oc2) {
-					int pt1_clipped;
+					int pt1_clipped, pt2_clipped;
 
 					if (miZeroClipLine(extents->x1, extents->y1,
-							   extents->x2, extents->y2,
+							   extents->x2-1, extents->y2-1,
 							   &x, &y, &x2_clipped, &y2_clipped,
 							   adx, ady,
 							   &pt1_clipped, &pt2_clipped,
@@ -5568,12 +5562,6 @@ rectangle_continue:
 						continue;
 
 					length = abs(x2_clipped - x);
-
-					/* if we've clipped the endpoint, always draw the full length
-					 * of the segment, because then the capstyle doesn't matter
-					 */
-					if (pt2_clipped)
-						length++;
 					if (length == 0)
 						continue;
 
@@ -5636,13 +5624,12 @@ X_continue2:
 
 				x = x1;
 				y = y1;
-				pt2_clipped = 0;
 
 				if (oc1 | oc2) {
-					int pt1_clipped;
+					int pt1_clipped, pt2_clipped;
 
 					if (miZeroClipLine(extents->x1, extents->y1,
-							   extents->x2, extents->y2,
+							   extents->x2-1, extents->y2-1,
 							   &x, &y, &x2_clipped, &y2_clipped,
 							   adx, ady,
 							   &pt1_clipped, &pt2_clipped,
@@ -5650,12 +5637,6 @@ X_continue2:
 						continue;
 
 					length = abs(y2_clipped - y);
-
-					/* if we've clipped the endpoint, always draw the full length
-					 * of the segment, because then the capstyle doesn't matter
-					 */
-					if (pt2_clipped)
-						length++;
 					if (length == 0)
 						continue;
 
@@ -6807,17 +6788,9 @@ sna_poly_zero_segment_blt(DrawablePtr drawable,
 				continue;
 
 			oc1 = 0;
-			MIOUTCODES(oc1, x1, y1,
-				   extents->x1,
-				   extents->y1,
-				   extents->x2,
-				   extents->y2);
+			OUTCODES(oc1, x1, y1, extents);
 			oc2 = 0;
-			MIOUTCODES(oc2, x2, y2,
-				   extents->x1,
-				   extents->y1,
-				   extents->x2,
-				   extents->y2);
+			OUTCODES(oc2, x2, y2, extents);
 			if (oc1 & oc2)
 				continue;
 
@@ -6865,10 +6838,8 @@ rectangle_continue:
 					int pt1_clipped, pt2_clipped;
 					int x = x1, y = y1;
 
-					if (miZeroClipLine(extents->x1,
-							   extents->y1,
-							   extents->x2,
-							   extents->y2,
+					if (miZeroClipLine(extents->x1, extents->y1,
+							   extents->x2-1, extents->y2-1,
 							   &x1, &y1, &x2, &y2,
 							   adx, ady,
 							   &pt1_clipped, &pt2_clipped,
@@ -6937,10 +6908,8 @@ X_continue2:
 					int pt1_clipped, pt2_clipped;
 					int x = x1, y = y1;
 
-					if (miZeroClipLine(extents->x1,
-							   extents->y1,
-							   extents->x2,
-							   extents->y2,
+					if (miZeroClipLine(extents->x1, extents->y1,
+							   extents->x2-1, extents->y2-1,
 							   &x1, &y1, &x2, &y2,
 							   adx, ady,
 							   &pt1_clipped, &pt2_clipped,
commit 805bc3310cd0a13eab8e48e7615bdd42638cfa33
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 12:09:19 2012 +0000

    sna: When reversing line-drawing direction, use the clipped endpoint
    
    Make sure we take the clipping into account if we choose to reverse the
    draw direction (to maintain left-to-right inside the box emission).
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46261
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45673
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 23fc9ad..2088019 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5451,11 +5451,11 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 	}
 
 	jump = _jump[(damage != NULL) | !!(dx|dy) << 1];
-	DBG(("%s: [clipped] extents=(%d, %d), (%d, %d), delta=(%d, %d)\n",
-	     __FUNCTION__,
+	DBG(("%s: [clipped=%x] extents=(%d, %d), (%d, %d), delta=(%d, %d), damage=%p\n",
+	     __FUNCTION__, clipped,
 	     clip.extents.x1, clip.extents.y1,
 	     clip.extents.x2, clip.extents.y2,
-	     dx, dy));
+	     dx, dy, damage));
 
 	extents = REGION_RECTS(&clip);
 	last_extents = extents + REGION_NUM_RECTS(&clip);
@@ -5514,7 +5514,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 				       adx, ady, sdx, sdy,
 				       1, 1, octant);
 
-			DBG(("%s: adx=(%d, %d), sdx=(%d, %d), oc1=%d, oc2=%d\n",
+			DBG(("%s: adx=(%d, %d), sdx=(%d, %d), oc1=%x, oc2=%x\n",
 			     __FUNCTION__, adx, ady, sdx, sdy, oc1, oc2));
 			if (adx == 0 || ady == 0) {
 				if (x1 <= x2) {
@@ -5542,11 +5542,13 @@ rectangle_continue:
 					b = box;
 				}
 			} else if (adx >= ady) {
+				int x2_clipped = x2, y2_clipped = y2;
+
 				/* X-major segment */
 				e1 = ady << 1;
 				e2 = e1 - (adx << 1);
 				e  = e1 - adx;
-				length = adx;	/* don't draw endpoint in main loop */
+				length = adx;
 
 				FIXUP_ERROR(e, octant, bias);
 
@@ -5555,7 +5557,6 @@ rectangle_continue:
 				pt2_clipped = 0;
 
 				if (oc1 | oc2) {
-					int x2_clipped = x2, y2_clipped = y2;
 					int pt1_clipped;
 
 					if (miZeroClipLine(extents->x1, extents->y1,
@@ -5587,8 +5588,8 @@ rectangle_continue:
 				e  = e - e1;
 
 				if (sdx < 0) {
-					x = x2;
-					y = y2;
+					x = x2_clipped;
+					y = y2_clipped;
 					sdy = -sdy;
 				}
 
@@ -5622,11 +5623,13 @@ X_continue2:
 					b = box;
 				}
 			} else {
+				int x2_clipped = x2, y2_clipped = y2;
+
 				/* Y-major segment */
 				e1 = adx << 1;
 				e2 = e1 - (ady << 1);
 				e  = e1 - ady;
-				length  = ady;	/* don't draw endpoint in main loop */
+				length  = ady;
 
 				SetYMajorOctant(octant);
 				FIXUP_ERROR(e, octant, bias);
@@ -5636,7 +5639,6 @@ X_continue2:
 				pt2_clipped = 0;
 
 				if (oc1 | oc2) {
-					int x2_clipped = x2, y2_clipped = y2;
 					int pt1_clipped;
 
 					if (miZeroClipLine(extents->x1, extents->y1,
@@ -5668,8 +5670,8 @@ X_continue2:
 				e  = e - e1;
 
 				if (sdx < 0) {
-					x = x2;
-					y = y2;
+					x = x2_clipped;
+					y = y2_clipped;
 					sdy = -sdy;
 				}
 
commit 7f0a4a5f7224003a0cd226137de5a068949a41b4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 20 09:52:37 2012 +0000

    sna/dri: Ensure the domain tracking is reset when releasing bo used for swaps
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 0689fc5..2a25cbd 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -710,6 +710,14 @@ sna_dri_add_frame_event(struct sna_dri_frame_event *info)
 }
 
 static void
+sna_dri_frame_event_release_bo(struct kgem *kgem, struct kgem_bo *bo)
+{
+	bo->needs_flush = true; /* has been used externally, reset domains */
+	bo->reusable = true; /* No longer in use by an external client */
+	kgem_bo_destroy(kgem, bo);
+}
+
+static void
 sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 {
 	DBG(("%s: del[%p] (%p, %ld)\n", __FUNCTION__,
@@ -721,18 +729,17 @@ sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 	_sna_dri_destroy_buffer(info->sna, info->front);
 	_sna_dri_destroy_buffer(info->sna, info->back);
 
-	if (info->old_front.bo) {
-		info->old_front.bo->reusable = true;
-		kgem_bo_destroy(&info->sna->kgem, info->old_front.bo);
-	}
-	if (info->next_front.bo) {
-		info->next_front.bo->reusable = true;
-		kgem_bo_destroy(&info->sna->kgem, info->next_front.bo);
-	}
-	if (info->cache.bo) {
-		info->cache.bo->reusable = true;
-		kgem_bo_destroy(&info->sna->kgem, info->cache.bo);
-	}
+	if (info->old_front.bo)
+		sna_dri_frame_event_release_bo(&info->sna->kgem,
+					       info->old_front.bo);
+
+	if (info->next_front.bo)
+		sna_dri_frame_event_release_bo(&info->sna->kgem,
+					       info->next_front.bo);
+
+	if (info->cache.bo)
+		sna_dri_frame_event_release_bo(&info->sna->kgem,
+					       info->cache.bo);
 
 	free(info);
 }
commit 7d147c7462ba183f5316e7e8dafeca62add6f97c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Feb 19 17:50:56 2012 +0000

    sna: Correct tile sizes for Y-tiling on i915g
    
    128-byte Y-tiling wasn't introduced until the 945.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index f20a7bb..f107f14 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -731,11 +731,16 @@ static uint32_t kgem_untiled_pitch(struct kgem *kgem,
 void kgem_get_tile_size(struct kgem *kgem, int tiling,
 			int *tile_width, int *tile_height, int *tile_size)
 {
-	if (kgem->gen < 30) {
+	if (kgem->gen <= 30) {
 		if (tiling) {
 			*tile_width = 512;
-			*tile_height = 16;
-			*tile_size = 2048;
+			if (kgem->gen < 30) {
+				*tile_height = 16;
+				*tile_size = 2048;
+			} else {
+				*tile_height = 8;
+				*tile_size = 4096;
+			}
 		} else {
 			*tile_width = 1;
 			*tile_height = 1;
@@ -754,7 +759,7 @@ void kgem_get_tile_size(struct kgem *kgem, int tiling,
 		*tile_size = 4096;
 		break;
 	case I915_TILING_Y:
-		*tile_width = kgem->gen <= 30 ? 512 : 128;
+		*tile_width = 128;
 		*tile_height = 32;
 		*tile_size = 4096;
 		break;
@@ -776,10 +781,10 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 	assert(width <= MAXSHORT);
 	assert(height <= MAXSHORT);
 
-	if (kgem->gen < 30) {
+	if (kgem->gen <= 30) {
 		if (tiling) {
 			tile_width = 512;
-			tile_height = 16;
+			tile_height = kgem->gen < 30 ? 16 : 8;
 		} else {
 			tile_width = scanout ? 64 : kgem->min_alignment;
 			tile_height = 2;
@@ -795,7 +800,7 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 		tile_height = 8;
 		break;
 	case I915_TILING_Y:
-		tile_width = kgem->gen <= 30 ? 512 : 128;
+		tile_width = 128;
 		tile_height = 32;
 		break;
 	}
@@ -837,8 +842,8 @@ static uint32_t kgem_aligned_height(struct kgem *kgem,
 {
 	uint32_t tile_height;
 
-	if (kgem->gen < 30) {
-		tile_height = tiling ? 16 : 2;
+	if (kgem->gen <= 30) {
+		tile_height = tiling ? kgem->gen < 30 ? 16 : 8 : 1;
 	} else switch (tiling) {
 	default:
 	case I915_TILING_NONE:
commit 95391b7312147760d8da01fce68b8398aa8e4e2f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 16 22:01:41 2012 +0000

    sna/trapezoids: Presume that Precise mono rasterisation adheres to the spec
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46156
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index b02f8f7..fafb16f 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3007,7 +3007,7 @@ trapezoid_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (NO_SCAN_CONVERTER)
 		return false;
 
-	if (dst->polyMode == PolyModePrecise) {
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat)) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
@@ -3351,7 +3351,7 @@ trapezoid_span_inplace(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (NO_SCAN_CONVERTER)
 		return false;
 
-	if (dst->polyMode == PolyModePrecise) {
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat)) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
@@ -3519,7 +3519,7 @@ trapezoid_span_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (NO_SCAN_CONVERTER)
 		return false;
 
-	if (dst->polyMode == PolyModePrecise) {
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat)) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
@@ -4008,7 +4008,8 @@ trap_mask_converter(PicturePtr picture,
 		return false;
 
 	/* XXX strict adherence to the Render specification */
-	if (picture->polyMode == PolyModePrecise) {
+	if (picture->polyMode == PolyModePrecise &&
+	    picture->polyEdge != PolyEdgeSharp) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
@@ -4515,7 +4516,7 @@ triangles_mask_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (NO_SCAN_CONVERTER)
 		return false;
 
-	if (dst->polyMode == PolyModePrecise) {
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat)) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
@@ -4748,7 +4749,7 @@ tristrip_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		return false;
 
 	/* XXX strict adherence to the Render specification */
-	if (dst->polyMode == PolyModePrecise) {
+	if (dst->polyMode == PolyModePrecise && !is_mono(dst, maskFormat)) {
 		DBG(("%s: fallback -- precise rasterisation requested\n",
 		     __FUNCTION__));
 		return false;
commit 52b11f63d7922032caef0f0a5979b080dbddcbfc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 16 11:22:23 2012 +0000

    sna: Upconvert fallback trapezoids to a8
    
    Since the hardware only handles a8 without tricky emulation and pixman
    insists on using a1 for sharp trapezoids we need to ensure that we
    convert the a1 to a8 for our trapezoidal mask.
    
    More worryingly, this path should never be hit...
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=46156
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 28c8a67..b02f8f7 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2009,20 +2009,41 @@ trapezoids_fallback(CARD8 op, PicturePtr src, PicturePtr dst,
 		DBG(("%s: mask (%dx%d) depth=%d, format=%08x\n",
 		     __FUNCTION__, width, height, depth, format));
 		scratch = sna_pixmap_create_upload(screen,
-						   width, height, depth,
+						   width, height, 8,
 						   KGEM_BUFFER_WRITE);
 		if (!scratch)
 			return;
 
-		memset(scratch->devPrivate.ptr, 0, scratch->devKind*height);
-		image = pixman_image_create_bits(format, width, height,
-						 scratch->devPrivate.ptr,
-						 scratch->devKind);
+		if (depth < 8) {
+			image = pixman_image_create_bits(format, width, height,
+							 NULL, 0);
+		} else {
+			memset(scratch->devPrivate.ptr, 0, scratch->devKind*height);
+			image = pixman_image_create_bits(format, width, height,
+							 scratch->devPrivate.ptr,
+							 scratch->devKind);
+		}
 		if (image) {
 			for (; ntrap; ntrap--, traps++)
 				pixman_rasterize_trapezoid(image,
 							   (pixman_trapezoid_t *)traps,
 							   -bounds.x1, -bounds.y1);
+			if (depth < 8) {
+				pixman_image_t *a8;
+
+				a8 = pixman_image_create_bits(PIXMAN_a8, width, height,
+							      scratch->devPrivate.ptr,
+							      scratch->devKind);
+				if (a8) {
+					pixman_image_composite(PIXMAN_OP_SRC,
+							       image, NULL, a8,
+							       0, 0,
+							       0, 0,
+							       0, 0,
+							       width, height);
+					pixman_image_unref (a8);
+				}
+			}
 
 			pixman_image_unref(image);
 		}
commit 8050ced6204f5aca12e6c57f86308b6ad1b98209
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 15 17:01:18 2012 +0000

    sna/dri: Mark bo as reusable after completion of a flip-event
    
    After the flip chain is completed, any residual buffers are no longer in
    use and so available for reuse.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index c83d580..0689fc5 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -721,12 +721,18 @@ sna_dri_frame_event_info_free(struct sna_dri_frame_event *info)
 	_sna_dri_destroy_buffer(info->sna, info->front);
 	_sna_dri_destroy_buffer(info->sna, info->back);
 
-	if (info->old_front.bo)
+	if (info->old_front.bo) {
+		info->old_front.bo->reusable = true;
 		kgem_bo_destroy(&info->sna->kgem, info->old_front.bo);
-	if (info->next_front.bo)
+	}
+	if (info->next_front.bo) {
+		info->next_front.bo->reusable = true;
 		kgem_bo_destroy(&info->sna->kgem, info->next_front.bo);
-	if (info->cache.bo)
+	}
+	if (info->cache.bo) {
+		info->cache.bo->reusable = true;
 		kgem_bo_destroy(&info->sna->kgem, info->cache.bo);
+	}
 
 	free(info);
 }
commit fc046aabde76142fce130773d78d797d7d750ab7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 15 16:08:23 2012 +0000

    sna/dri: Don't attempt to change tiling if it is a no-op
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_module.c b/src/intel_module.c
index 6e2af57..2c0e5cc 100644
--- a/src/intel_module.c
+++ b/src/intel_module.c
@@ -280,10 +280,8 @@ static Bool has_kernel_mode_setting(struct pci_device *dev)
 		 dev->domain, dev->bus, dev->dev, dev->func);
 
 	ret = drmCheckModesettingSupported(id);
-	if (ret) {
-		if (xf86LoadKernelModule("i915"))
-			ret = drmCheckModesettingSupported(id);
-	}
+	if (ret && xf86LoadKernelModule("i915"))
+		ret = drmCheckModesettingSupported(id);
 	/* Be nice to the user and load fbcon too */
 	if (!ret)
 		(void)xf86LoadKernelModule("fbcon");
diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index d0e19cf..c83d580 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -157,7 +157,7 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 					  PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
-	uint32_t tiling;
+	int tiling;
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
@@ -167,6 +167,8 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 		return ref(priv->gpu_bo);
 
 	tiling = color_tiling(sna, &pixmap->drawable);
+	if (tiling < 0)
+		tiling = -tiling;
 	if (priv->gpu_bo->tiling != tiling)
 		sna_pixmap_change_tiling(pixmap, tiling);
 
commit 66cc9c69657ac2703f2c7fc3c2c50f06bf5daa99
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 15 11:58:42 2012 +0000

    Be paranoid about the definition of container_of
    
    Replace any existing definition with a correct version, since there are
    broken container_of macros floating around the xorg includes.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 9187766..8e8c612 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -267,24 +267,6 @@ list_is_empty(struct list *head)
 }
 
 /**
- * Returns a pointer to the container of this list element.
- *
- * Example:
- * struct foo* f;
- * f = container_of(&foo->entry, struct foo, entry);
- * assert(f == foo);
- *
- * @param ptr Pointer to the struct list.
- * @param type Data type of the list element.
- * @param member Member name of the struct list field in the list element.
- * @return A pointer to the data struct containing the list head.
- */
-#ifndef container_of
-#define container_of(ptr, type, member) \
-	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
-#endif
-
-/**
  * Alias of container_of
  */
 #define list_entry(ptr, type, member) \
@@ -397,14 +379,14 @@ static inline void list_move_tail(struct list *list, struct list *head)
 	list_add_tail(list, head);
 }
 
-#undef container_of
-#define container_of(ptr, type, member) \
-	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
-
 #define list_last_entry(ptr, type, member) \
     list_entry((ptr)->prev, type, member)
 
 #endif
 
+#undef container_of
+#define container_of(ptr, type, member) \
+	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
+
 #endif /* _INTEL_LIST_H_ */
 
commit c0376b7f7b083ab2e87edc36e56fd8eb99c3cd05
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 13 00:48:15 2012 +0000

    Add a missing macro for old xorg/list.h
    
    list_last_entry() needs to be defined if we are including the xorg
    list.h as opposed to our standalone variant.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_list.h b/src/intel_list.h
index 2595a29..9187766 100644
--- a/src/intel_list.h
+++ b/src/intel_list.h
@@ -401,6 +401,9 @@ static inline void list_move_tail(struct list *list, struct list *head)
 #define container_of(ptr, type, member) \
 	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
 
+#define list_last_entry(ptr, type, member) \
+    list_entry((ptr)->prev, type, member)
+
 #endif
 
 #endif /* _INTEL_LIST_H_ */
commit 87bed52180cd2abd80ef6b58384f9fd9c9968f7b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 11 20:54:18 2012 +0000

    Include a local copy of list.h
    
    In 1.11.903, the list.h was renamed to xorg-list.h with a corresponding
    change to all structures. As we carried local fixes to list.h and
    extended functionality, just create our own list.h with a bit of
    handwaving to protect us for the brief existence of xorg/include/list.h.
    
    Reported-by: Armin K <krejzi at email.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45938
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/Makefile.am b/src/Makefile.am
index a632543..448a354 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -41,6 +41,7 @@ endif
 NULL:=#
 
 intel_drv_la_SOURCES = \
+	intel_list.h \
 	intel_module.c \
 	$(NULL)
 
diff --git a/src/intel.h b/src/intel.h
index 7593731..8104bfe 100644
--- a/src/intel.h
+++ b/src/intel.h
@@ -68,100 +68,12 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include "i915_drm.h"
 
 #include "intel_driver.h"
+#include "intel_list.h"
 
 #if HAVE_UDEV
 #include <libudev.h>
 #endif
 
-/* XXX
- * The X server gained an *almost* identical implementation in 1.9.
- *
- * Remove this duplicate code either in 2.16 (when we can depend upon 1.9)
- * or the drivers are merged back into the xserver tree, whichever happens
- * earlier.
- */
-
-#ifndef _LIST_H_
-/* classic doubly-link circular list */
-struct list {
-	struct list *next, *prev;
-};
-
-static void
-list_init(struct list *list)
-{
-	list->next = list->prev = list;
-}
-
-static inline void
-__list_add(struct list *entry,
-	    struct list *prev,
-	    struct list *next)
-{
-	next->prev = entry;
-	entry->next = next;
-	entry->prev = prev;
-	prev->next = entry;
-}
-
-static inline void
-list_add(struct list *entry, struct list *head)
-{
-	__list_add(entry, head, head->next);
-}
-
-static inline void
-__list_del(struct list *prev, struct list *next)
-{
-	next->prev = prev;
-	prev->next = next;
-}
-
-static inline void
-list_del(struct list *entry)
-{
-	__list_del(entry->prev, entry->next);
-	list_init(entry);
-}
-
-static inline Bool
-list_is_empty(struct list *head)
-{
-	return head->next == head;
-}
-#endif
-
-/* XXX work around a broken define in list.h currently [ickle 20100713] */
-#undef container_of
-
-#ifndef container_of
-#define container_of(ptr, type, member) \
-	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
-#endif
-
-#ifndef list_entry
-#define list_entry(ptr, type, member) \
-	container_of(ptr, type, member)
-#endif
-
-#ifndef list_first_entry
-#define list_first_entry(ptr, type, member) \
-	list_entry((ptr)->next, type, member)
-#endif
-
-#ifndef list_foreach
-#define list_foreach(pos, head)			\
-	for (pos = (head)->next; pos != (head);	pos = pos->next)
-#endif
-
-/* XXX list.h from xserver-1.9 uses a GCC-ism to avoid having to pass type */
-#ifndef list_foreach_entry
-#define list_foreach_entry(pos, type, head, member)		\
-	for (pos = list_entry((head)->next, type, member);\
-	     &pos->member != (head);					\
-	     pos = list_entry(pos->member.next, type, member))
-#endif
-
 /* remain compatible to xorg-server 1.6 */
 #ifndef MONITOR_EDID_COMPLETE_RAWDATA
 #define MONITOR_EDID_COMPLETE_RAWDATA EDID_COMPLETE_RAWDATA
diff --git a/src/intel_list.h b/src/intel_list.h
new file mode 100644
index 0000000..2595a29
--- /dev/null
+++ b/src/intel_list.h
@@ -0,0 +1,407 @@
+/*
+ * Copyright © 2010-2012 Intel Corporation
+ * Copyright © 2010 Francisco Jerez <currojerez at riseup.net>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_LIST_H_
+#define _INTEL_LIST_H_
+
+#include <xorgVersion.h>
+
+#if XORG_VERSION_CURRENT < XORG_VERSION_NUMERIC(1,9,0,0,0) || XORG_VERSION_CURRENT >= XORG_VERSION_NUMERIC(1,11,99,903,0)
+
+#include <stdbool.h>
+
+/**
+ * @file Classic doubly-link circular list implementation.
+ * For real usage examples of the linked list, see the file test/list.c
+ *
+ * Example:
+ * We need to keep a list of struct foo in the parent struct bar, i.e. what
+ * we want is something like this.
+ *
+ *     struct bar {
+ *          ...
+ *          struct foo *list_of_foos; -----> struct foo {}, struct foo {}, struct foo{}
+ *          ...
+ *     }
+ *
+ * We need one list head in bar and a list element in all list_of_foos (both are of
+ * data type 'struct list').
+ *
+ *     struct bar {
+ *          ...
+ *          struct list list_of_foos;
+ *          ...
+ *     }
+ *
+ *     struct foo {
+ *          ...
+ *          struct list entry;
+ *          ...
+ *     }
+ *
+ * Now we initialize the list head:
+ *
+ *     struct bar bar;
+ *     ...
+ *     list_init(&bar.list_of_foos);
+ *
+ * Then we create the first element and add it to this list:
+ *
+ *     struct foo *foo = malloc(...);
+ *     ....
+ *     list_add(&foo->entry, &bar.list_of_foos);
+ *
+ * Repeat the above for each element you want to add to the list. Deleting
+ * works with the element itself.
+ *      list_del(&foo->entry);
+ *      free(foo);
+ *
+ * Note: calling list_del(&bar.list_of_foos) will set bar.list_of_foos to an empty
+ * list again.
+ *
+ * Looping through the list requires a 'struct foo' as iterator and the
+ * name of the field the subnodes use.
+ *
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar.list_of_foos, entry) {
+ *      if (iterator->something == ...)
+ *             ...
+ * }
+ *
+ * Note: You must not call list_del() on the iterator if you continue the
+ * loop. You need to run the safe for-each loop instead:
+ *
+ * struct foo *iterator, *next;
+ * list_for_each_entry_safe(iterator, next, &bar.list_of_foos, entry) {
+ *      if (...)
+ *              list_del(&iterator->entry);
+ * }
+ *
+ */
+
+/**
+ * The linkage struct for list nodes. This struct must be part of your
+ * to-be-linked struct. struct list is required for both the head of the
+ * list and for each list node.
+ *
+ * Position and name of the struct list field is irrelevant.
+ * There are no requirements that elements of a list are of the same type.
+ * There are no requirements for a list head, any struct list can be a list
+ * head.
+ */
+struct list {
+    struct list *next, *prev;
+};
+
+/**
+ * Initialize the list as an empty list.
+ *
+ * Example:
+ * list_init(&bar->list_of_foos);
+ *
+ * @param The list to initialized.
+ */
+static void
+list_init(struct list *list)
+{
+    list->next = list->prev = list;
+}
+
+static inline void
+__list_add(struct list *entry,
+	    struct list *prev,
+	    struct list *next)
+{
+    next->prev = entry;
+    entry->next = next;
+    entry->prev = prev;
+    prev->next = entry;
+}
+
+/**
+ * Insert a new element after the given list head. The new element does not
+ * need to be initialised as empty list.
+ * The list changes from:
+ *      head → some element → ...
+ * to
+ *      head → new element → older element → ...
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_add(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_add(struct list *entry, struct list *head)
+{
+    __list_add(entry, head, head->next);
+}
+
+static inline void
+list_add_tail(struct list *entry, struct list *head)
+{
+    __list_add(entry, head->prev, head);
+}
+
+static inline void list_replace(struct list *old,
+				struct list *new)
+{
+	new->next = old->next;
+	new->next->prev = new;
+	new->prev = old->prev;
+	new->prev->next = new;
+}
+
+#define list_last_entry(ptr, type, member) \
+    list_entry((ptr)->prev, type, member)
+
+#define list_for_each(pos, head)				\
+    for (pos = (head)->next; pos != (head); pos = pos->next)
+
+/**
+ * Append a new element to the end of the list given with this list head.
+ *
+ * The list changes from:
+ *      head → some element → ... → lastelement
+ * to
+ *      head → some element → ... → lastelement → new element
+ *
+ * Example:
+ * struct foo *newfoo = malloc(...);
+ * list_append(&newfoo->entry, &bar->list_of_foos);
+ *
+ * @param entry The new element to prepend to the list.
+ * @param head The existing list.
+ */
+static inline void
+list_append(struct list *entry, struct list *head)
+{
+    __list_add(entry, head->prev, head);
+}
+
+
+static inline void
+__list_del(struct list *prev, struct list *next)
+{
+    next->prev = prev;
+    prev->next = next;
+}
+
+static inline void
+_list_del(struct list *entry)
+{
+    __list_del(entry->prev, entry->next);
+}
+
+/**
+ * Remove the element from the list it is in. Using this function will reset
+ * the pointers to/from this element so it is removed from the list. It does
+ * NOT free the element itself or manipulate it otherwise.
+ *
+ * Using list_del on a pure list head (like in the example at the top of
+ * this file) will NOT remove the first element from
+ * the list but rather reset the list as empty list.
+ *
+ * Example:
+ * list_del(&foo->entry);
+ *
+ * @param entry The element to remove.
+ */
+static inline void
+list_del(struct list *entry)
+{
+    _list_del(entry);
+    list_init(entry);
+}
+
+static inline void list_move(struct list *list, struct list *head)
+{
+	if (list->prev != head) {
+		_list_del(list);
+		list_add(list, head);
+	}
+}
+
+static inline void list_move_tail(struct list *list, struct list *head)
+{
+	_list_del(list);
+	list_add_tail(list, head);
+}
+
+/**
+ * Check if the list is empty.
+ *
+ * Example:
+ * list_is_empty(&bar->list_of_foos);
+ *
+ * @return True if the list contains one or more elements or False otherwise.
+ */
+static inline bool
+list_is_empty(struct list *head)
+{
+    return head->next == head;
+}
+
+/**
+ * Returns a pointer to the container of this list element.
+ *
+ * Example:
+ * struct foo* f;
+ * f = container_of(&foo->entry, struct foo, entry);
+ * assert(f == foo);
+ *
+ * @param ptr Pointer to the struct list.
+ * @param type Data type of the list element.
+ * @param member Member name of the struct list field in the list element.
+ * @return A pointer to the data struct containing the list head.
+ */
+#ifndef container_of
+#define container_of(ptr, type, member) \
+	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
+#endif
+
+/**
+ * Alias of container_of
+ */
+#define list_entry(ptr, type, member) \
+    container_of(ptr, type, member)
+
+/**
+ * Retrieve the first list entry for the given list pointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_first_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list field in the list element.
+ * @return A pointer to the first list element.
+ */
+#define list_first_entry(ptr, type, member) \
+    list_entry((ptr)->next, type, member)
+
+/**
+ * Retrieve the last list entry for the given listpointer.
+ *
+ * Example:
+ * struct foo *first;
+ * first = list_last_entry(&bar->list_of_foos, struct foo, list_of_foos);
+ *
+ * @param ptr The list head
+ * @param type Data type of the list element to retrieve
+ * @param member Member name of the struct list field in the list element.
+ * @return A pointer to the last list element.
+ */
+#define list_last_entry(ptr, type, member) \
+    list_entry((ptr)->prev, type, member)
+
+#define __container_of(ptr, sample, member)				\
+    (void *)((char *)(ptr)						\
+	     - ((char *)&(sample)->member - (char *)(sample)))
+/**
+ * Loop through the list given by head and set pos to struct in the list.
+ *
+ * Example:
+ * struct foo *iterator;
+ * list_for_each_entry(iterator, &bar->list_of_foos, entry) {
+ *      [modify iterator]
+ * }
+ *
+ * This macro is not safe for node deletion. Use list_for_each_entry_safe
+ * instead.
+ *
+ * @param pos Iterator variable of the type of the list elements.
+ * @param head List head
+ * @param member Member name of the struct list in the list elements.
+ *
+ */
+#define list_for_each_entry(pos, head, member)				\
+    for (pos = __container_of((head)->next, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = __container_of(pos->member.next, pos, member))
+
+/**
+ * Loop through the list, keeping a backup pointer to the element. This
+ * macro allows for the deletion of a list element while looping through the
+ * list.
+ *
+ * See list_for_each_entry for more details.
+ */
+#define list_for_each_entry_safe(pos, tmp, head, member)		\
+    for (pos = __container_of((head)->next, pos, member),		\
+	 tmp = __container_of(pos->member.next, pos, member);		\
+	 &pos->member != (head);					\
+	 pos = tmp, tmp = __container_of(pos->member.next, tmp, member))
+
+#else
+
+#include <list.h>
+
+static inline void
+list_add_tail(struct list *entry, struct list *head)
+{
+    __list_add(entry, head->prev, head);
+}
+
+static inline void
+_list_del(struct list *entry)
+{
+    __list_del(entry->prev, entry->next);
+}
+
+static inline void list_replace(struct list *old,
+				struct list *new)
+{
+	new->next = old->next;
+	new->next->prev = new;
+	new->prev = old->prev;
+	new->prev->next = new;
+}
+
+static inline void list_move(struct list *list, struct list *head)
+{
+	if (list->prev != head) {
+		_list_del(list);
+		list_add(list, head);
+	}
+}
+
+static inline void list_move_tail(struct list *list, struct list *head)
+{
+	_list_del(list);
+	list_add_tail(list, head);
+}
+
+#undef container_of
+#define container_of(ptr, type, member) \
+	((type *)((char *)(ptr) - (char *) &((type *)0)->member))
+
+#endif
+
+#endif /* _INTEL_LIST_H_ */
+
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index a11846d..0cb8df3 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -1092,9 +1092,7 @@ intel_uxa_create_pixmap(ScreenPtr screen, int w, int h, int depth,
 			else
 				aligned_h = ALIGN(h, 2);
 
-			list_foreach_entry(priv, struct intel_pixmap,
-					   &intel->in_flight,
-					   in_flight) {
+			list_for_each_entry(priv, &intel->in_flight, in_flight) {
 				if (priv->tiling != tiling)
 					continue;
 
diff --git a/src/sna/Makefile.am b/src/sna/Makefile.am
index 2809617..70afd53 100644
--- a/src/sna/Makefile.am
+++ b/src/sna/Makefile.am
@@ -18,18 +18,19 @@
 #  IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
-AM_CFLAGS = @CWARNFLAGS@ @XORG_CFLAGS@ \
-	    @UDEV_CFLAGS@ \
-	    @DRM_CFLAGS@ \
-	    @DRI_CFLAGS@ \
-	    -I$(top_srcdir)/src \
-	    -I$(top_srcdir)/src/render_program
+AM_CFLAGS = \
+	@CWARNFLAGS@ \
+	-I$(top_srcdir)/src \
+	-I$(top_srcdir)/src/render_program \
+	@XORG_CFLAGS@ \
+	@UDEV_CFLAGS@ \
+	@DRM_CFLAGS@ \
+	@DRI_CFLAGS@ \
+	$(NULL)
 
 noinst_LTLIBRARIES = libsna.la
 libsna_la_LIBADD = @UDEV_LIBS@ -lm @DRM_LIBS@
 
-NULL:=#
-
 libsna_la_SOURCES = \
 	blt.c \
 	compiler.h \
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 607f1c4..f20a7bb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -51,40 +51,6 @@
 static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
-static inline void _list_del(struct list *list)
-{
-	assert(list->prev->next == list);
-	assert(list->next->prev == list);
-	__list_del(list->prev, list->next);
-}
-
-static inline void list_move(struct list *list, struct list *head)
-{
-	_list_del(list);
-	list_add(list, head);
-}
-
-static inline void list_move_tail(struct list *list, struct list *head)
-{
-	_list_del(list);
-	list_add_tail(list, head);
-}
-
-static inline void list_replace(struct list *old,
-				struct list *new)
-{
-	new->next = old->next;
-	new->next->prev = new;
-	new->prev = old->prev;
-	new->prev->next = new;
-}
-
-#define list_last_entry(ptr, type, member) \
-    list_entry((ptr)->prev, type, member)
-
-#define list_for_each(pos, head)				\
-    for (pos = (head)->next; pos != (head); pos = pos->next)
-
 
 #define DBG_NO_HW 0
 #define DBG_NO_TILING 0
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 73e2490..575c213 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -44,6 +44,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #include <stdint.h>
 
 #include "compiler.h"
+
 #include <xf86_OSproc.h>
 #include <xf86Pci.h>
 #include <xf86Cursor.h>
@@ -109,15 +110,11 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 #define TEST_RENDER (TEST_ALL || 0)
 
 #include "intel_driver.h"
+#include "intel_list.h"
 #include "kgem.h"
 #include "sna_damage.h"
 #include "sna_render.h"
 
-static inline void list_add_tail(struct list *new, struct list *head)
-{
-	__list_add(new, head->prev, head);
-}
-
 #ifndef CREATE_PIXMAP_USAGE_SCRATCH_HEADER
 #define FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER 1
 #define CREATE_PIXMAP_USAGE_SCRATCH_HEADER (unsigned)-1
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2a4aaec..23fc9ad 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -815,14 +815,6 @@ static Bool sna_destroy_pixmap(PixmapPtr pixmap)
 	return fbDestroyPixmap(pixmap);
 }
 
-static inline void list_move(struct list *list, struct list *head)
-{
-	if (list->prev != head) {
-		__list_del(list->prev, list->next);
-		list_add(list, head);
-	}
-}
-
 static inline bool pixmap_inplace(struct sna *sna,
 				  PixmapPtr pixmap,
 				  struct sna_pixmap *priv)
commit c64ebee5fdccf313cbd3c73850e02e6fa7dd2a65
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 11 13:31:12 2012 +0000

    sna/gen6: Prefer the render ring for copies
    
    Slower for fills, but on the current stack faster for copies, both large
    and small. Hopefully, when we write some good shaders for SNB, we will
    not only improve performance for copies but also make fills faster on
    the render ring than the blt?
    
    As the BLT copy routine is GPU bound for copywinpix10, and the RENDER
    copy routine is CPU bound and faster, I believe that we have reached the
    potential of the BLT ring and not yet saturated the GPU using the render
    copy.
    
    Note that we still do not casually switch rings, so the actual routine
    chosen will still be selected by the preceeding operations, so is
    unlikely to have any effect in practice during, for example, cairo-traces.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 146a2d1..1e99709 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3167,7 +3167,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   PixmapPtr src, struct kgem_bo *src_bo,
 				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
-	return (sna->kgem.ring != KGEM_RENDER ||
+	return (sna->kgem.ring == KGEM_BLT ||
 		prefer_blt_bo(sna, src, src_bo) ||
 		prefer_blt_bo(sna, dst, dst_bo));
 }
commit 6a9b50177408c919ed5c6c2463f687476af2c698
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 11 11:02:53 2012 +0000

    sna/gen6: Suppress the CS stall for the first command in the batch
    
    The batch emission serves as a full stall, so we do not need to incur a
    second before our first rendering.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 1476ff7..146a2d1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -718,11 +718,13 @@ gen6_emit_drawing_rectangle(struct sna *sna,
 	 * BEFORE the pipe-control with a post-sync op and no write-cache
 	 * flushes.
 	 */
-	OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2));
-	OUT_BATCH(GEN6_PIPE_CONTROL_CS_STALL |
-		  GEN6_PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	OUT_BATCH(0);
-	OUT_BATCH(0);
+	if (!sna->render_state.gen6.first_state_packet) {
+		OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2));
+		OUT_BATCH(GEN6_PIPE_CONTROL_CS_STALL |
+			  GEN6_PIPE_CONTROL_STALL_AT_SCOREBOARD);
+		OUT_BATCH(0);
+		OUT_BATCH(0);
+	}
 
 	OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2));
 	OUT_BATCH(GEN6_PIPE_CONTROL_WRITE_TIME);
@@ -868,6 +870,7 @@ gen6_emit_state(struct sna *sna,
 		OUT_BATCH(0);
 		OUT_BATCH(0);
 	}
+	sna->render_state.gen6.first_state_packet = false;
 }
 
 static void gen6_magic_ca_pass(struct sna *sna,
@@ -4140,6 +4143,7 @@ gen6_render_retire(struct kgem *kgem)
 static void gen6_render_reset(struct sna *sna)
 {
 	sna->render_state.gen6.needs_invariant = TRUE;
+	sna->render_state.gen6.first_state_packet = true;
 	sna->render_state.gen6.vb_id = 0;
 	sna->render_state.gen6.ve_id = -1;
 	sna->render_state.gen6.last_primitive = -1;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index a689315..7243042 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -405,6 +405,7 @@ struct gen6_render_state {
 	uint16_t surface_table;
 
 	Bool needs_invariant;
+	Bool first_state_packet;
 };
 
 enum {
commit cbe8bed83f0b4097958c4541ad7809a05e6c6f43
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 11 10:58:05 2012 +0000

    sna/gen7: Mention the depth-stall required before changing VS state
    
    Because one day we may actually start using VS! Copied from the addition
    of the w/a to Mesa by Kenneth Graunke.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5740a42..5294547 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -476,6 +476,15 @@ gen7_emit_state_base_address(struct sna *sna)
 static void
 gen7_disable_vs(struct sna *sna)
 {
+	/* For future reference:
+	 * A PIPE_CONTROL with post-sync op set to 1 and a depth stall needs
+	 * to be emitted just prior to change VS state, i.e. 3DSTATE_VS,
+	 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
+	 * 3DSTATE_BINDING_TABLE_POINTER_VS, 3DSTATE_SAMPLER_STATE_POINTER_VS.
+	 *
+	 * Here we saved by the full-flush incurred when emitting
+	 * the batchbuffer.
+	 */
 	OUT_BATCH(GEN7_3DSTATE_VS | (6 - 2));
 	OUT_BATCH(0); /* no VS kernel */
 	OUT_BATCH(0);
commit 6193f2f00fa7205f9d736340318c66d116dca53e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 9 14:16:17 2012 +0000

    sna: Fix retire after readback
    
    Upon reading, we encounter a serialisation point and so can retire all
    requests. However, kgem_bo_retire() wasn't correctly detecting that
    barrier and so we continued to using GPU detiling thinking the target
    was still busy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1c23320..607f1c4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -306,7 +306,7 @@ static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo)
 	     __FUNCTION__, bo->handle, bo->domain));
 	assert(!kgem_busy(kgem, bo->handle));
 
-	if (bo->domain == DOMAIN_GPU)
+	if (bo->rq)
 		kgem_retire(kgem);
 
 	if (bo->exec == NULL) {
@@ -3561,6 +3561,7 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
 	struct kgem_partial_bo *bo;
 	uint32_t offset = _bo->delta, length = _bo->size.bytes;
+	int domain;
 
 	assert(_bo->io);
 	assert(_bo->exec == NULL);
@@ -3587,11 +3588,11 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		if (IS_CPU_MAP(bo->base.map)) {
 			set_domain.read_domains = I915_GEM_DOMAIN_CPU;
 			set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-			bo->base.domain = DOMAIN_CPU;
+			domain = DOMAIN_CPU;
 		} else {
 			set_domain.read_domains = I915_GEM_DOMAIN_GTT;
 			set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-			bo->base.domain = DOMAIN_GTT;
+			domain = DOMAIN_GTT;
 		}
 
 		drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
@@ -3600,9 +3601,10 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 			 bo->base.handle, (char *)bo->mem+offset,
 			 offset, length);
 		kgem_bo_map__cpu(kgem, &bo->base);
-		bo->base.domain = DOMAIN_NONE;
+		domain = DOMAIN_NONE;
 	}
 	kgem_bo_retire(kgem, &bo->base);
+	bo->base.domain = domain;
 }
 
 uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format)
commit 4d8369f8e60fd4f5a0ef49f3e9866ea5ecb21927
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 8 13:15:46 2012 +0000

    sna/gen2+: Force upload rather than perform source transformations on the CPU
    
    If both the source and destination is on the CPU, then the thinking was
    it would be quicker to operate on those on the CPU rather than copy both
    to the GPU and then perform the operation. This turns out to be a false
    assumption if transformation is involved -- something to be reconsidered
    if pixman should ever be improved.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index ed48ce6..64b4e7c 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1507,9 +1507,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index fc006ac..97e5839 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2419,9 +2419,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 6246538..6e7d4be 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2016,9 +2016,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index b9c7a92..7ac993c 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2044,9 +2044,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 17789e9..1476ff7 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2325,9 +2325,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 9757405..5740a42 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2329,9 +2329,15 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+untransformed(PicturePtr p)
+{
+	return !p->transform || pixman_transform_is_int_translate(p->transform);
+}
+
+static bool
 need_upload(PicturePtr p)
 {
-	return p->pDrawable && unattached(p->pDrawable);
+	return p->pDrawable && unattached(p->pDrawable) && untransformed(p);
 }
 
 static bool
commit 8634d461bd9e5a3d3f75b0efc11db87b8d3e1245
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 8 09:13:27 2012 +0000

    sna: Limit max CPU bo size to prevent aperture thrashing on upload
    
    Copying between two objects that consume more than the available GATT
    space is a painful experience due to the forced use of an intermediatory
    and eviction on every batch. The tiled upload paths are in comparison
    remarkably efficient, so favour their use when handling extremely large
    buffers.
    
    This reverses the previous idea in that we now prefer large GPU bo
    rather than large CPU bo, as the render pipeline is far more flexible
    for handling those than the blitter is for handling the CPU bo (at least
    for gen4+).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 94b6c18..1c23320 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -575,6 +575,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
 	size_t totalram;
+	unsigned half_gpu_max;
 	unsigned int i, j;
 
 	memset(kgem, 0, sizeof(*kgem));
@@ -679,7 +680,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		kgem->min_alignment = 64;
 
 	kgem->max_object_size = 2 * kgem->aperture_total / 3;
-	kgem->max_cpu_size = kgem->max_object_size;
 	kgem->max_gpu_size = kgem->max_object_size;
 	if (!kgem->has_llc)
 		kgem->max_gpu_size = MAX_CACHE_SIZE;
@@ -691,16 +691,6 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		if (kgem->max_gpu_size > kgem->aperture_low)
 			kgem->max_gpu_size = kgem->aperture_low;
 	}
-	if (kgem->max_gpu_size > kgem->max_cpu_size)
-		kgem->max_gpu_size = kgem->max_cpu_size;
-
-	kgem->max_upload_tile_size = kgem->aperture_mappable / 2;
-	if (kgem->max_upload_tile_size > kgem->max_gpu_size / 2)
-		kgem->max_upload_tile_size = kgem->max_gpu_size / 2;
-
-	kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
-	if (kgem->max_copy_tile_size > kgem->max_gpu_size / 2)
-		kgem->max_copy_tile_size = kgem->max_gpu_size / 2;
 
 	totalram = total_ram_size();
 	if (totalram == 0) {
@@ -708,14 +698,32 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		     __FUNCTION__));
 		totalram = kgem->aperture_total;
 	}
+	DBG(("%s: total ram=%ld\n", __FUNCTION__, (long)totalram));
 	if (kgem->max_object_size > totalram / 2)
 		kgem->max_object_size = totalram / 2;
-	if (kgem->max_cpu_size > totalram / 2)
-		kgem->max_cpu_size = totalram / 2;
 	if (kgem->max_gpu_size > totalram / 4)
 		kgem->max_gpu_size = totalram / 4;
 
+	half_gpu_max = kgem->max_gpu_size / 2;
+	if (kgem->gen >= 40)
+		kgem->max_cpu_size = half_gpu_max;
+	else
+		kgem->max_cpu_size = kgem->max_object_size;
+
+	kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
+	if (kgem->max_copy_tile_size > half_gpu_max)
+		kgem->max_copy_tile_size = half_gpu_max;
+
+	if (kgem->has_llc)
+		kgem->max_upload_tile_size = kgem->max_copy_tile_size;
+	else
+		kgem->max_upload_tile_size = kgem->aperture_mappable / 4;
+	if (kgem->max_upload_tile_size > half_gpu_max)
+		kgem->max_upload_tile_size = half_gpu_max;
+
 	kgem->large_object_size = MAX_CACHE_SIZE;
+	if (kgem->large_object_size > kgem->max_cpu_size)
+		kgem->large_object_size = kgem->max_cpu_size;
 	if (kgem->large_object_size > kgem->max_gpu_size)
 		kgem->large_object_size = kgem->max_gpu_size;
 
commit 5b16972d7850b2347efc084311d664e14263cba1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 23:45:37 2012 +0000

    sna: Check that we successfully retired an active linear buffer
    
    If we go to the trouble of running retire before searching, we may as
    well check that we retired something before proceeding to check all the
    inactive lists.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0c2f547..94b6c18 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1945,11 +1945,16 @@ search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
 		return NULL;
 
-	if (!use_active &&
-	    list_is_empty(inactive(kgem, num_pages)) &&
-	    !list_is_empty(active(kgem, num_pages, I915_TILING_NONE)) &&
-	    !kgem_retire(kgem))
-		return NULL;
+	if (!use_active && list_is_empty(inactive(kgem, num_pages))) {
+		if (list_is_empty(active(kgem, num_pages, I915_TILING_NONE)))
+			return NULL;
+
+		if (!kgem_retire(kgem))
+			return NULL;
+
+		if (list_is_empty(inactive(kgem, num_pages)))
+			return NULL;
+	}
 
 	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
commit 207b4d4482a6af4a39472ec20ff04fa0c9322d73
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 21:56:29 2012 +0000

    sna: Relax must-be-blittable rules for gen4+
    
    The render pipeline is actually more flexible than the blitter for
    dealing with large surfaces and so the BLT is no longer the limiting
    factor on gen4+.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e80eaae..0c2f547 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -799,6 +799,9 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 	uint32_t tile_width, tile_height;
 	uint32_t size;
 
+	assert(width <= MAXSHORT);
+	assert(height <= MAXSHORT);
+
 	if (kgem->gen < 30) {
 		if (tiling) {
 			tile_width = 512;
@@ -823,32 +826,26 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
 		break;
 	}
 
-	/* If it is too wide for the blitter, don't even bother.  */
 	*pitch = ALIGN(width * bpp / 8, tile_width);
-	if (kgem->gen < 40) {
-		if (tiling != I915_TILING_NONE) {
-			if (*pitch > 8192)
-				return 0;
-			for (size = tile_width; size < *pitch; size <<= 1)
-				;
-			*pitch = size;
-		} else {
-			if (*pitch >= 32768)
-				return 0;
-		}
+	height = ALIGN(height, tile_height);
+	if (kgem->gen >= 40)
+		return PAGE_ALIGN(*pitch * height);
+
+	/* If it is too wide for the blitter, don't even bother.  */
+	if (tiling != I915_TILING_NONE) {
+		if (*pitch > 8192)
+			return 0;
+
+		for (size = tile_width; size < *pitch; size <<= 1)
+			;
+		*pitch = size;
 	} else {
-		int limit = 32768;
-		if (tiling)
-			limit *= 4;
-		if (*pitch >= limit)
+		if (*pitch >= 32768)
 			return 0;
 	}
-	height = ALIGN(height, tile_height);
-	if (height >= 65536)
-		return 0;
 
 	size = *pitch * height;
-	if (relaxed_fencing || tiling == I915_TILING_NONE || kgem->gen >= 40)
+	if (relaxed_fencing || tiling == I915_TILING_NONE)
 		return PAGE_ALIGN(size);
 
 	/*  We need to allocate a pot fence region for a tiled buffer. */
@@ -2233,6 +2230,9 @@ unsigned kgem_can_create_2d(struct kgem *kgem,
 	if (depth < 8 || kgem->wedged)
 		return 0;
 
+	if (width > MAXSHORT || height > MAXSHORT)
+		return 0;
+
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
commit 13c960db9ef876ee99991d97dfc34fef184c0341
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Thu Feb 2 11:30:57 2012 +0800

    uxa/glamor: Use a macro to specify module name.
    
    This depends upon glamor commit b5f8d, just after the 0.3.0 tag.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index 262a06a..c468d34 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -68,7 +68,7 @@ intel_glamor_pre_init(ScrnInfoPtr scrn)
 	CARD32 version;
 
 	/* Load glamor module */
-	if ((glamor_module = xf86LoadSubModule(scrn, "glamor_egl"))) {
+	if ((glamor_module = xf86LoadSubModule(scrn, GLAMOR_EGL_MODULE_NAME))) {
 		version = xf86GetModuleVersion(glamor_module);
 		if (version < MODULE_VERSION_NUMERIC(0,3,0)) {
 			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
commit 70092bfbc51ddc5a51c9cae21c6b2852c216a6fc
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Wed Feb 1 19:47:28 2012 +0800

    uxa/glamor: Refine CloseScreen and InitScreen process.
    
    The previous version calls glamor_egl_close_screen and
    glamor_egl_free_screen manually which is not align with
    standard process. Now glamor change the way to follow
    standard method:
    
    glamor layer and glamor egl layer both have their internal
    CloseScreens. The correct sequence is after the I830CloseScreen
    is registered, then register glamor_egl_close_screen and
    the last one is glamor_close_screen. So we move out the
    intel_glamor_init from the intel_uxa_init to I830ScreenInit
    and just after the registration of I830CloseScreen.
    
    As the glamor interfaces changed, we need to check the
    glamor version when load the glamor egl module to make
    sure we are loading the right glamor module. If
    failed, it will switch back to UXA path.
    
    This depends upon glamor commit 1bc8bf tagged with version 0.3.0.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index e953ae5..785392a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -158,7 +158,7 @@ AC_ARG_ENABLE(glamor,
 AC_MSG_RESULT([$GLAMOR])
 AM_CONDITIONAL(GLAMOR, test x$GLAMOR != xno)
 if test "x$GLAMOR" != "xno"; then
-	PKG_CHECK_MODULES(LIBGLAMOR, [glamor])
+	PKG_CHECK_MODULES(LIBGLAMOR, [glamor >= 0.3.0])
 	PKG_CHECK_MODULES(LIBGLAMOR_EGL, [glamor-egl])
 	AC_DEFINE(USE_GLAMOR, 1, [Enable glamor acceleration])
 fi
diff --git a/src/intel_driver.c b/src/intel_driver.c
index 9d1c4e8..d66a8fd 100644
--- a/src/intel_driver.c
+++ b/src/intel_driver.c
@@ -1051,6 +1051,7 @@ I830ScreenInit(int scrnIndex, ScreenPtr screen, int argc, char **argv)
 	intel->CreateScreenResources = screen->CreateScreenResources;
 	screen->CreateScreenResources = i830CreateScreenResources;
 
+	intel_glamor_init(screen);
 	if (!xf86CrtcScreenInit(screen))
 		return FALSE;
 
@@ -1124,8 +1125,6 @@ static void I830FreeScreen(int scrnIndex, int flags)
 	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
-	intel_glamor_free_screen(scrnIndex, flags);
-
 	if (intel) {
 		intel_mode_fini(intel);
 		intel_close_drm_master(intel);
diff --git a/src/intel_glamor.c b/src/intel_glamor.c
index e96daa6..262a06a 100644
--- a/src/intel_glamor.c
+++ b/src/intel_glamor.c
@@ -51,30 +51,40 @@ intel_glamor_create_screen_resources(ScreenPtr screen)
 
 	if (!glamor_glyphs_init(screen))
 		return FALSE;
+
 	if (!glamor_egl_create_textured_screen(screen,
 					       intel->front_buffer->handle,
 					       intel->front_pitch))
 		return FALSE;
+
 	return TRUE;
 }
 
 Bool
 intel_glamor_pre_init(ScrnInfoPtr scrn)
 {
-	intel_screen_private *intel;
-	intel = intel_get_screen_private(scrn);
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+	pointer glamor_module;
+	CARD32 version;
 
 	/* Load glamor module */
-	if (xf86LoadSubModule(scrn, "glamor_egl") &&
-	    glamor_egl_init(scrn, intel->drmSubFD)) {
-		xf86DrvMsg(scrn->scrnIndex, X_INFO,
-			   "glamor detected, initialising\n");
-		intel->uxa_flags |= UXA_USE_GLAMOR;
-	} else {
+	if ((glamor_module = xf86LoadSubModule(scrn, "glamor_egl"))) {
+		version = xf86GetModuleVersion(glamor_module);
+		if (version < MODULE_VERSION_NUMERIC(0,3,0)) {
+			xf86DrvMsg(scrn->scrnIndex, X_ERROR,
+			"Incompatible glamor version, required >= 0.3.0.\n");
+		} else {
+			if (glamor_egl_init(scrn, intel->drmSubFD)) {
+				xf86DrvMsg(scrn->scrnIndex, X_INFO,
+					   "glamor detected, initialising egl layer.\n");
+				intel->uxa_flags = UXA_GLAMOR_EGL_INITIALIZED;
+			} else
+				xf86DrvMsg(scrn->scrnIndex, X_WARNING,
+					   "glamor detected, failed to initialize egl.\n");
+		}
+	} else
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "glamor not available\n");
-		intel->uxa_flags &= ~UXA_USE_GLAMOR;
-	}
 
 	return TRUE;
 }
@@ -83,7 +93,13 @@ PixmapPtr
 intel_glamor_create_pixmap(ScreenPtr screen, int w, int h,
 			   int depth, unsigned int usage)
 {
-	return glamor_create_pixmap(screen, w, h, depth, usage);
+	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
+	intel_screen_private *intel = intel_get_screen_private(scrn);
+
+	if (intel->uxa_flags & UXA_USE_GLAMOR)
+		return glamor_create_pixmap(screen, w, h, depth, usage);
+	else
+		return NULL;
 }
 
 Bool
@@ -145,30 +161,29 @@ intel_glamor_finish_access(PixmapPtr pixmap, uxa_access_t access)
 	return;
 }
 
-
 Bool
 intel_glamor_init(ScreenPtr screen)
 {
 	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
 	intel_screen_private *intel = intel_get_screen_private(scrn);
 
-	if ((intel->uxa_flags & UXA_USE_GLAMOR) == 0)
-		return TRUE;
+	if ((intel->uxa_flags & UXA_GLAMOR_EGL_INITIALIZED) == 0)
+		goto fail;
 
-	if (!glamor_init(screen, GLAMOR_INVERTED_Y_AXIS)) {
+	if (!glamor_init(screen, GLAMOR_INVERTED_Y_AXIS | GLAMOR_USE_EGL_SCREEN)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "Failed to initialize glamor\n");
+			   "Failed to initialize glamor.\n");
 		goto fail;
 	}
 
 	if (!glamor_egl_init_textured_pixmap(screen)) {
 		xf86DrvMsg(scrn->scrnIndex, X_ERROR,
-			   "Failed to initialize textured pixmap.\n");
+			   "Failed to initialize textured pixmap of screen for glamor.\n");
 		goto fail;
 	}
 
 	intel->uxa_driver->flags |= UXA_USE_GLAMOR;
-	intel->uxa_flags = intel->uxa_driver->flags;
+	intel->uxa_flags |= intel->uxa_driver->flags;
 
 	intel->uxa_driver->finish_access = intel_glamor_finish_access;
 
@@ -177,8 +192,8 @@ intel_glamor_init(ScreenPtr screen)
 	return TRUE;
 
   fail:
-	xf86DrvMsg(scrn->scrnIndex, X_WARNING,
-		   "Use standard UXA acceleration.");
+	xf86DrvMsg(scrn->scrnIndex, X_INFO,
+		   "Use standard UXA acceleration.\n");
 	return FALSE;
 }
 
@@ -196,21 +211,10 @@ Bool
 intel_glamor_close_screen(ScreenPtr screen)
 {
 	ScrnInfoPtr scrn = xf86Screens[screen->myNum];
-	intel_screen_private * intel;
-
-	intel = intel_get_screen_private(scrn);
-	if (intel && (intel->uxa_flags & UXA_USE_GLAMOR))
-		return glamor_egl_close_screen(screen);
-	return TRUE;
-}
+	intel_screen_private *intel = intel_get_screen_private(scrn);
 
-void
-intel_glamor_free_screen(int scrnIndex, int flags)
-{
-	ScrnInfoPtr scrn = xf86Screens[scrnIndex];
-	intel_screen_private * intel;
+	if (intel->uxa_flags & UXA_USE_GLAMOR)
+		intel->uxa_flags &= ~UXA_USE_GLAMOR;
 
-	intel = intel_get_screen_private(scrn);
-	if (intel && (intel->uxa_flags & UXA_USE_GLAMOR))
-		glamor_egl_free_screen(scrnIndex, GLAMOR_EGL_EXTERNAL_BUFFER);
+	return TRUE;
 }
diff --git a/src/intel_uxa.c b/src/intel_uxa.c
index f04a2ef..a11846d 100644
--- a/src/intel_uxa.c
+++ b/src/intel_uxa.c
@@ -1391,7 +1391,5 @@ Bool intel_uxa_init(ScreenPtr screen)
 	uxa_set_fallback_debug(screen, intel->fallback_debug);
 	uxa_set_force_fallback(screen, intel->force_fallback);
 
-	intel_glamor_init(screen);
-
 	return TRUE;
 }
diff --git a/uxa/uxa.h b/uxa/uxa.h
index 66b5f1e..b8569f0 100644
--- a/uxa/uxa.h
+++ b/uxa/uxa.h
@@ -548,12 +548,18 @@ typedef struct _UxaDriver {
 /**
  * UXA_USE_GLAMOR indicates to use glamor acceleration to perform rendering.
  * And if glamor fail to accelerate the rendering, then goto fallback to
- * use CPU to do the rendering.
+ * use CPU to do the rendering. This flag will be set only when glamor get
+ * initialized successfully.
+ * Note, in ddx close screen, this bit need to be cleared.
  */
 #define UXA_USE_GLAMOR			(1 << 3)
 
-/** @} */
+/* UXA_GLAMOR_EGL_INITIALIZED indicates glamor egl layer get initialized
+ * successfully. UXA layer does not use this flag, before call to
+ * glamor_init, ddx need to check this flag. */
+#define UXA_GLAMOR_EGL_INITIALIZED	(1 << 4)
 
+/** @} */
 /** @name UXA CreatePixmap hint flags
  * @{
  */
commit 798aad6c95a1a95fd587430dc7a6d59497a10ce1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 20:16:48 2012 +0000

    sna/gen[4-7]: Fix erroneous scale factor for partial large bo render copies
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 700a271..6246538 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2550,8 +2550,6 @@ fallback_blt:
 	gen4_copy_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
 
-	tmp.src.scale[0] = 1.f/src->drawable.width;
-	tmp.src.scale[1] = 1.f/src->drawable.height;
 	do {
 		gen4_render_copy_one(sna, &tmp,
 				     box->x1 + src_dx, box->y1 + src_dy,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 213b80b..17789e9 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3253,7 +3253,7 @@ fallback_blt:
 	if (!gen6_check_format(tmp.src.pict_format))
 		goto fallback_blt;
 
-	tmp.op = PictOpSrc;
+	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3357,8 +3357,6 @@ fallback_blt:
 	gen6_emit_copy_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
-	tmp.src.scale[0] = 1.f / src->drawable.width;
-	tmp.src.scale[1] = 1.f / src->drawable.height;
 	do {
 		float *v;
 		int n_this_time = gen6_get_rectangles(sna, &tmp, n);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 731d952..9757405 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3271,8 +3271,7 @@ fallback_blt:
 
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
-	tmp.src.card_format =
-		gen7_get_card_format(tmp.src.pict_format);
+	tmp.src.card_format = gen7_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
 		BoxRec extents = box[0];
 		int i;
@@ -3337,8 +3336,6 @@ fallback_blt:
 	gen7_emit_copy_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
-	tmp.src.scale[0] = 1.f / src->drawable.width;
-	tmp.src.scale[1] = 1.f / src->drawable.height;
 	do {
 		float *v;
 		int n_this_time = gen7_get_rectangles(sna, &tmp, n);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 676f5c7..572d6ea 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -831,13 +831,13 @@ sna_render_pixmap_partial(struct sna *sna,
 	box.y1 = y;
 	box.x2 = x + w;
 	box.y2 = y + h;
+	DBG(("%s: unaligned box (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
 	if (box.x1 < 0)
 		box.x1 = 0;
 	if (box.y1 < 0)
 		box.y1 = 0;
-	DBG(("%s: unaligned box (%d, %d), (%d, %d)\n",
-	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
 	if (bo->tiling) {
 		int tile_width, tile_height, tile_size;
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index d491b3b..17ecaea 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -599,20 +599,6 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 			if (tile.x2 > extents.x2)
 				tile.x2 = extents.x2;
 
-			p.drawable.width  = tile.x2 - tile.x1;
-			p.drawable.height = tile.y2 - tile.y1;
-
-			DBG(("%s: tile (%d, %d), (%d, %d)\n",
-			     __FUNCTION__, tile.x1, tile.y1, tile.x2, tile.y2));
-
-			tmp_bo = kgem_create_2d(&sna->kgem,
-						p.drawable.width,
-						p.drawable.height,
-						p.drawable.bitsPerPixel,
-						tiling, CREATE_TEMPORARY);
-			if (!tmp_bo)
-				goto tiled_error;
-
 			c = clipped;
 			for (i = 0; i < n; i++) {
 				*c = box[i];
@@ -628,17 +614,31 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 				     c->y1 - tile.y1));
 				c++;
 			}
+			if (c == clipped)
+				continue;
+
+			p.drawable.width  = tile.x2 - tile.x1;
+			p.drawable.height = tile.y2 - tile.y1;
+
+			DBG(("%s: tile (%d, %d), (%d, %d)\n",
+			     __FUNCTION__, tile.x1, tile.y1, tile.x2, tile.y2));
+
+			tmp_bo = kgem_create_2d(&sna->kgem,
+						p.drawable.width,
+						p.drawable.height,
+						p.drawable.bitsPerPixel,
+						tiling, CREATE_TEMPORARY);
+			if (!tmp_bo)
+				goto tiled_error;
 
-			if (c == clipped ||
-			    (sna->render.copy_boxes(sna, GXcopy,
+			i = (sna->render.copy_boxes(sna, GXcopy,
 						    src, src_bo, src_dx, src_dy,
 						    &p, tmp_bo, -tile.x1, -tile.y1,
 						    clipped, c - clipped) &&
 			     sna->render.copy_boxes(sna, alu,
 						    &p, tmp_bo, -tile.x1, -tile.y1,
 						    dst, dst_bo, dst_dx, dst_dy,
-						    clipped, c - clipped)))
-				i = 1;
+						    clipped, c - clipped));
 
 			kgem_bo_destroy(&sna->kgem, tmp_bo);
 
commit ea6588726107f1ab0ef5a8f69d420b5bff819a76
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 15:26:50 2012 +0000

    sna: Apply offsets correctly for partial src/dst in large copy boxes
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 3db9ce7..ed48ce6 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2890,7 +2890,8 @@ fallback:
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 0f0345c..fc006ac 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3879,7 +3879,8 @@ fallback_blt:
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 459b428..700a271 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2485,7 +2485,8 @@ fallback_blt:
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 7246ed6..b9c7a92 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2801,7 +2801,8 @@ fallback_blt:
 		}
 
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 59372f8..213b80b 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3278,7 +3278,8 @@ fallback_blt:
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 8a5d95c..731d952 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3262,7 +3262,8 @@ fallback_blt:
 				extents.y2 = box[i].y2;
 		}
 		if (!sna_render_composite_redirect(sna, &tmp,
-						   extents.x1, extents.y1,
+						   extents.x1 + dst_dx,
+						   extents.y1 + dst_dy,
 						   extents.x2 - extents.x1,
 						   extents.y2 - extents.y1))
 			goto fallback_tiled;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 1625b91..676f5c7 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -836,10 +836,6 @@ sna_render_pixmap_partial(struct sna *sna,
 		box.x1 = 0;
 	if (box.y1 < 0)
 		box.y1 = 0;
-	if (box.x2 > pixmap->drawable.width)
-		box.x2 = pixmap->drawable.width;
-	if (box.y2 > pixmap->drawable.height)
-		box.y2 = pixmap->drawable.height;
 	DBG(("%s: unaligned box (%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
@@ -854,19 +850,20 @@ sna_render_pixmap_partial(struct sna *sna,
 		/* Ensure we align to an even tile row */
 		box.y1 = box.y1 & ~(2*tile_height - 1);
 		box.y2 = ALIGN(box.y2, 2*tile_height);
-		if (box.y2 > pixmap->drawable.height)
-			box.y2 = pixmap->drawable.height;
 
 		assert(tile_width * 8 >= pixmap->drawable.bitsPerPixel);
 		box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
 		box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
-		if (box.x2 > pixmap->drawable.width)
-			box.x2 = pixmap->drawable.width;
 
 		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
 	} else
 		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8;
 
+	if (box.x2 > pixmap->drawable.width)
+		box.x2 = pixmap->drawable.width;
+	if (box.y2 > pixmap->drawable.height)
+		box.y2 = pixmap->drawable.height;
+
 	w = box.x2 - box.x1;
 	h = box.y2 - box.y1;
 	DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
@@ -889,8 +886,8 @@ sna_render_pixmap_partial(struct sna *sna,
 
 	channel->bo->pitch = bo->pitch;
 
-	channel->offset[0] = x - box.x1;
-	channel->offset[1] = y - box.y1;
+	channel->offset[0] = -box.x1;
+	channel->offset[1] = -box.y1;
 	channel->scale[0] = 1.f/w;
 	channel->scale[1] = 1.f/h;
 	channel->width  = w;
commit 14c91e108464a305fb23b3313b842bfaeb9420fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 15:25:48 2012 +0000

    sna/tiling: Request Y-tiles if we know we cannot BLT to either the src or dst
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index c305da0..d491b3b 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -548,7 +548,7 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	BoxRec extents, tile, stack[64], *clipped, *c;
 	PixmapRec p;
-	int i, step;
+	int i, step, tiling;
 	Bool ret = FALSE;
 
 	extents = box[0];
@@ -568,8 +568,13 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 	while (step * step * 4 > sna->kgem.max_upload_tile_size)
 		step /= 2;
 
-	DBG(("%s: tiling copy, using %dx%d tiles\n",
-	     __FUNCTION__, step, step));
+	tiling = I915_TILING_X;
+	if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
+	    !kgem_bo_can_blt(&sna->kgem, dst_bo))
+		tiling = I915_TILING_Y;
+
+	DBG(("%s: tiling copy, using %dx%d %c tiles\n",
+	     __FUNCTION__, step, step, tiling == I915_TILING_X ? 'X' : 'Y'));
 
 	if (n > ARRAY_SIZE(stack)) {
 		clipped = malloc(sizeof(BoxRec) * n);
@@ -597,12 +602,14 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 			p.drawable.width  = tile.x2 - tile.x1;
 			p.drawable.height = tile.y2 - tile.y1;
 
+			DBG(("%s: tile (%d, %d), (%d, %d)\n",
+			     __FUNCTION__, tile.x1, tile.y1, tile.x2, tile.y2));
+
 			tmp_bo = kgem_create_2d(&sna->kgem,
 						p.drawable.width,
 						p.drawable.height,
 						p.drawable.bitsPerPixel,
-						I915_TILING_X,
-					       	CREATE_TEMPORARY);
+						tiling, CREATE_TEMPORARY);
 			if (!tmp_bo)
 				goto tiled_error;
 
commit 3131217e3ecbdf6cd40b044bd31e8228ff5c6bff
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 14:35:59 2012 +0000

    sna: Mark up the temporary allocations
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index c6e898b..c305da0 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -384,7 +384,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 							       tmp.drawable.width,
 							       tmp.drawable.height,
 							       dst->drawable.bitsPerPixel),
-					    CREATE_SCANOUT);
+					    CREATE_SCANOUT | CREATE_TEMPORARY);
 			if (bo) {
 				int16_t dx = this.extents.x1;
 				int16_t dy = this.extents.y1;
@@ -489,7 +489,7 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 					    kgem_choose_tiling(&sna->kgem,
 							       I915_TILING_X,
 							       w, h, bpp),
-					    0);
+					    CREATE_TEMPORARY);
 			if (bo) {
 				int16_t dx = this.extents.x1;
 				int16_t dy = this.extents.y1;
@@ -601,7 +601,8 @@ sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 						p.drawable.width,
 						p.drawable.height,
 						p.drawable.bitsPerPixel,
-						I915_TILING_X, 0);
+						I915_TILING_X,
+					       	CREATE_TEMPORARY);
 			if (!tmp_bo)
 				goto tiled_error;
 
commit ec1ccb6bf6a984cbe3317e636d7ff73887348a46
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 13:37:52 2012 +0000

    sna: Set the damage for render->copy_boxes to NULL before use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 1e0c680..3db9ce7 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2870,6 +2870,7 @@ fallback:
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height) ||
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index b9afdb9..0f0345c 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3859,6 +3859,7 @@ fallback_blt:
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height) ||
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 530251c..459b428 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2466,6 +2466,7 @@ fallback_blt:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.bo = dst_bo;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 487d903..7246ed6 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2781,6 +2781,7 @@ fallback_blt:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.bo = dst_bo;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d337b30..59372f8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3259,6 +3259,7 @@ fallback_blt:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 2e9948d..8a5d95c 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3243,6 +3243,7 @@ fallback_blt:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.bo = dst_bo;
 	tmp.dst.x = tmp.dst.y = 0;
+	tmp.damage = NULL;
 
 	sna_render_composite_redirect_init(&tmp);
 	if (too_large(tmp.dst.width, tmp.dst.height)) {
commit 58f634b792c6d109c5f2374a3e24d759e744abb4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Feb 7 13:32:20 2012 +0000

    sna: Handle tile alignment for untiled large bo more carefully
    
    We ended up trying to align the upper bound to zero as the integer
    divsion of the tile width by pixel was zero.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c4d2301..d337b30 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3285,8 +3285,7 @@ fallback_blt:
 
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
-	tmp.src.card_format =
-		gen6_get_card_format(tmp.src.pict_format);
+	tmp.src.card_format = gen6_get_card_format(tmp.src.pict_format);
 	if (too_large(src->drawable.width, src->drawable.height)) {
 		BoxRec extents = box[0];
 		int i;
@@ -3307,8 +3306,10 @@ fallback_blt:
 					       extents.x1 + src_dx,
 					       extents.y1 + src_dy,
 					       extents.x2 - extents.x1,
-					       extents.y2 - extents.y1))
+					       extents.y2 - extents.y1)) {
+			DBG(("%s: unable to extract partial pixmap\n", __FUNCTION__));
 			goto fallback_tiled_dst;
+		}
 	} else {
 		tmp.src.bo = kgem_bo_reference(src_bo);
 		tmp.src.width  = src->drawable.width;
@@ -3336,8 +3337,11 @@ fallback_blt:
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
-		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
+			DBG(("%s: too large for a single operation\n",
+			     __FUNCTION__));
 			goto fallback_tiled_src;
+		}
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 3309157..1625b91 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -819,10 +819,10 @@ sna_render_pixmap_partial(struct sna *sna,
 			  int16_t w, int16_t h)
 {
 	BoxRec box;
-	int tile_width, tile_height, tile_size;
 	int offset;
 
-	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
+	DBG(("%s (%d, %d)x(%d, %d), pitch %d, max %d\n",
+	     __FUNCTION__, x, y, w, h, bo->pitch, sna->render.max_3d_pitch));
 
 	if (bo->pitch > sna->render.max_3d_pitch)
 		return false;
@@ -840,20 +840,32 @@ sna_render_pixmap_partial(struct sna *sna,
 		box.x2 = pixmap->drawable.width;
 	if (box.y2 > pixmap->drawable.height)
 		box.y2 = pixmap->drawable.height;
+	DBG(("%s: unaligned box (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
-	kgem_get_tile_size(&sna->kgem, bo->tiling,
-			   &tile_width, &tile_height, &tile_size);
+	if (bo->tiling) {
+		int tile_width, tile_height, tile_size;
 
-	/* Ensure we align to an even tile row */
-	box.y1 = box.y1 & ~(2*tile_height - 1);
-	box.y2 = ALIGN(box.y2, 2*tile_height);
-	if (box.y2 > pixmap->drawable.height)
-		box.y2 = pixmap->drawable.height;
+		kgem_get_tile_size(&sna->kgem, bo->tiling,
+				   &tile_width, &tile_height, &tile_size);
+		DBG(("%s: tile size for tiling %d: %dx%d, size=%d\n",
+		     __FUNCTION__, bo->tiling, tile_width, tile_height, tile_size));
 
-	box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
-	box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
-	if (box.x2 > pixmap->drawable.width)
-		box.x2 = pixmap->drawable.width;
+		/* Ensure we align to an even tile row */
+		box.y1 = box.y1 & ~(2*tile_height - 1);
+		box.y2 = ALIGN(box.y2, 2*tile_height);
+		if (box.y2 > pixmap->drawable.height)
+			box.y2 = pixmap->drawable.height;
+
+		assert(tile_width * 8 >= pixmap->drawable.bitsPerPixel);
+		box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
+		box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
+		if (box.x2 > pixmap->drawable.width)
+			box.x2 = pixmap->drawable.width;
+
+		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+	} else
+		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8;
 
 	w = box.x2 - box.x1;
 	h = box.y2 - box.y1;
@@ -862,11 +874,13 @@ sna_render_pixmap_partial(struct sna *sna,
 	     pixmap->drawable.width, pixmap->drawable.height));
 	if (w <= 0 || h <= 0 ||
 	    w > sna->render.max_3d_size ||
-	    h > sna->render.max_3d_size)
+	    h > sna->render.max_3d_size) {
+		DBG(("%s: box too large (%dx%d) for 3D pipeline (max %d)\n",
+		    __FUNCTION__, w, h, sna->render.max_3d_size));
 		return false;
+	}
 
 	/* How many tiles across are we? */
-	offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
 	channel->bo = kgem_create_proxy(bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
@@ -895,27 +909,11 @@ sna_render_picture_partial(struct sna *sna,
 	struct kgem_bo *bo = NULL;
 	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
 	BoxRec box;
-	int tile_width, tile_height, tile_size;
 	int offset;
 
 	DBG(("%s (%d, %d)x(%d, %d) [dst=(%d, %d)]\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
-	if (use_cpu_bo(sna, pixmap, &box)) {
-		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
-			return 0;
-
-		bo = sna_pixmap(pixmap)->cpu_bo;
-	} else {
-		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
-			return 0;
-
-		bo = sna_pixmap(pixmap)->gpu_bo;
-	}
-
-	if (bo->pitch > sna->render.max_3d_pitch)
-		return 0;
-
 	box.x1 = x;
 	box.y1 = y;
 	box.x2 = x + w;
@@ -951,19 +949,41 @@ sna_render_picture_partial(struct sna *sna,
 		}
 	}
 
-	kgem_get_tile_size(&sna->kgem, bo->tiling,
-			   &tile_width, &tile_height, &tile_size);
+	if (use_cpu_bo(sna, pixmap, &box)) {
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
+			return 0;
 
-	/* Ensure we align to an even tile row */
-	box.y1 = box.y1 & ~(2*tile_height - 1);
-	box.y2 = ALIGN(box.y2, 2*tile_height);
-	if (box.y2 > pixmap->drawable.height)
-		box.y2 = pixmap->drawable.height;
+		bo = sna_pixmap(pixmap)->cpu_bo;
+	} else {
+		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+			return 0;
 
-	box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
-	box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
-	if (box.x2 > pixmap->drawable.width)
-		box.x2 = pixmap->drawable.width;
+		bo = sna_pixmap(pixmap)->gpu_bo;
+	}
+
+	if (bo->pitch > sna->render.max_3d_pitch)
+		return 0;
+
+	if (bo->tiling) {
+		int tile_width, tile_height, tile_size;
+
+		kgem_get_tile_size(&sna->kgem, bo->tiling,
+				   &tile_width, &tile_height, &tile_size);
+
+		/* Ensure we align to an even tile row */
+		box.y1 = box.y1 & ~(2*tile_height - 1);
+		box.y2 = ALIGN(box.y2, 2*tile_height);
+		if (box.y2 > pixmap->drawable.height)
+			box.y2 = pixmap->drawable.height;
+
+		box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
+		box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
+		if (box.x2 > pixmap->drawable.width)
+			box.x2 = pixmap->drawable.width;
+
+		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+	} else
+		offset = box.x1 * pixmap->drawable.bitsPerPixel / 8;
 
 	w = box.x2 - box.x1;
 	h = box.y2 - box.y1;
@@ -976,7 +996,6 @@ sna_render_picture_partial(struct sna *sna,
 		return 0;
 
 	/* How many tiles across are we? */
-	offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
 	channel->bo = kgem_create_proxy(bo,
 					box.y1 * bo->pitch + offset,
 					h * bo->pitch);
@@ -1638,53 +1657,63 @@ sna_render_composite_redirect(struct sna *sna,
 	return FALSE;
 #endif
 
-	DBG(("%s: target too large (%dx%d), copying to temporary %dx%d\n",
-	     __FUNCTION__, op->dst.width, op->dst.height, width,height));
+	DBG(("%s: target too large (%dx%d), copying to temporary %dx%d, max %d\n",
+	     __FUNCTION__,
+	     op->dst.width, op->dst.height,
+	     width, height,
+	     sna->render.max_3d_size));
 
 	if (!width || !height)
 		return FALSE;
 
-	if (width  > sna->render.max_3d_pitch ||
-	    height > sna->render.max_3d_pitch)
+	if (width  > sna->render.max_3d_size ||
+	    height > sna->render.max_3d_size)
 		return FALSE;
 
 	if (op->dst.bo->pitch <= sna->render.max_3d_pitch) {
-		int tile_width, tile_height, tile_size;
 		BoxRec box;
-		int w, h;
+		int w, h, offset;
 
 		DBG(("%s: dst pitch (%d) fits within render pipeline (%d)\n",
 		     __FUNCTION__, op->dst.bo->pitch, sna->render.max_3d_pitch));
 
-		kgem_get_tile_size(&sna->kgem, op->dst.bo->tiling,
-				   &tile_width, &tile_height, &tile_size);
-
 		box.x1 = x;
 		box.x2 = x + width;
 		box.y1 = y;
 		box.y2 = y + height;
 
 		/* Ensure we align to an even tile row */
-		box.y1 = box.y1 & ~(2*tile_height - 1);
-		box.y2 = ALIGN(box.y2, 2*tile_height);
+		if (op->dst.bo->tiling) {
+			int tile_width, tile_height, tile_size;
+
+			kgem_get_tile_size(&sna->kgem, op->dst.bo->tiling,
+					   &tile_width, &tile_height, &tile_size);
+
+			box.y1 = box.y1 & ~(2*tile_height - 1);
+			box.y2 = ALIGN(box.y2, 2*tile_height);
+
+			box.x1 = box.x1 & ~(tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel - 1);
+			box.x2 = ALIGN(box.x2, tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel);
+
+			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+		} else
+			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8;
+
 		if (box.y2 > op->dst.pixmap->drawable.height)
 			box.y2 = op->dst.pixmap->drawable.height;
 
-		box.x1 = box.x1 & ~(tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel - 1);
-		box.x2 = ALIGN(box.x2, tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel);
 		if (box.x2 > op->dst.pixmap->drawable.width)
 			box.x2 = op->dst.pixmap->drawable.width;
 
 		w = box.x2 - box.x1;
 		h = box.y2 - box.y1;
-		DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
+		DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d), max %d\n", __FUNCTION__,
 		     box.x1, box.y1, box.x2, box.y2, w, h,
 		     op->dst.pixmap->drawable.width,
-		     op->dst.pixmap->drawable.height));
+		     op->dst.pixmap->drawable.height,
+		     sna->render.max_3d_size));
 		if (w <= sna->render.max_3d_size &&
 		    h <= sna->render.max_3d_size) {
-			int offset;
-
 			t->box.x2 = t->box.x1 = op->dst.x;
 			t->box.y2 = t->box.y1 = op->dst.y;
 			t->real_bo = op->dst.bo;
@@ -1695,7 +1724,6 @@ sna_render_composite_redirect(struct sna *sna,
 			}
 
 			/* How many tiles across are we? */
-			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
 			op->dst.bo = kgem_create_proxy(op->dst.bo,
 						       box.y1 * op->dst.bo->pitch + offset,
 						       h * op->dst.bo->pitch);
commit bf3518ea910a2a3c5b4500dcba7c829576fbfd47
Author: Zhigang Gong <zhigang.gong at linux.intel.com>
Date:   Tue Feb 7 09:40:54 2012 +0800

    uxa/glamor/dri: Fix a typo bug when fixup glamor pixmap.
    
    Should modify the old pixmap's header not the new one which
    was already destroyed.
    
    Signed-off-by: Zhigang Gong <zhigang.gong at linux.intel.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/intel_dri.c b/src/intel_dri.c
index 0c6d45c..8bc6157 100644
--- a/src/intel_dri.c
+++ b/src/intel_dri.c
@@ -174,7 +174,7 @@ static PixmapPtr fixup_glamor(DrawablePtr drawable, PixmapPtr pixmap)
 		xf86DrvMsg(scrn->scrnIndex, X_WARNING,
 			   "Failed to get DRI drawable for glamor pixmap.\n");
 
-	screen->ModifyPixmapHeader(pixmap,
+	screen->ModifyPixmapHeader(old,
 				   drawable->width,
 				   drawable->height,
 				   0, 0,
commit 1467a4ba1a327877026cc76b3eabeb51d1415509
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 20:41:53 2012 +0000

    sna: Use the proper sna_picture_is_solid() test
    
    Rather than the specialised routines that assumed pDrawable was
    non-NULL, which was no longer true after f30be6f743.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 606ecfe..1e0c680 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1486,14 +1486,6 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
-}
-
-static bool
 is_unhandled_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
@@ -1523,7 +1515,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index de6a3ad..b9afdb9 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2413,14 +2413,6 @@ static inline bool is_constant_ps(uint32_t type)
 }
 
 static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
-}
-
-static bool
 has_alphamap(PicturePtr p)
 {
 	return p->alphaMap != NULL;
@@ -2435,7 +2427,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 3cb0fbb..530251c 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2001,14 +2001,6 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
-}
-
-static bool
 is_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
@@ -2032,7 +2024,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index f15342d..487d903 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2029,14 +2029,6 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
-}
-
-static bool
 is_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
@@ -2060,7 +2052,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 06c33c4..c4d2301 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2270,12 +2270,6 @@ static bool can_switch_rings(struct sna *sna)
 	return sna->kgem.has_semaphores && !NO_RING_SWITCH;
 }
 
-static bool
-is_solid(PicturePtr picture)
-{
-	return sna_picture_is_solid(picture, NULL);
-}
-
 static Bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
@@ -2308,7 +2302,7 @@ try_blt(struct sna *sna,
 	}
 
 	if (can_switch_rings(sna)) {
-		if (is_solid(src))
+		if (sna_picture_is_solid(src, NULL))
 			return TRUE;
 	}
 
@@ -2339,7 +2333,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5a867b1..2e9948d 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2314,14 +2314,6 @@ try_blt(struct sna *sna,
 }
 
 static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
-}
-
-static bool
 is_gradient(PicturePtr picture)
 {
 	if (picture->pDrawable)
@@ -2345,7 +2337,7 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
-	if (is_solid(p))
+	if (sna_picture_is_solid(p, NULL))
 		return false;
 
 	return (has_alphamap(p) ||
commit ef335a65a9d9f7eb96873201b546c25fd03d090c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 20:37:54 2012 +0000

    sna: Search all active buckets for a temporary allocation
    
    Reduce the need for creating a new object if we only need the allocation
    for a single operation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 259666e..e80eaae 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2291,13 +2291,14 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	if (tiling < 0)
 		tiling = -tiling, flags |= CREATE_EXACT;
 
-	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d)\n", __FUNCTION__,
+	DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, temp?=%d)\n", __FUNCTION__,
 	     width, height, bpp, tiling,
 	     !!(flags & CREATE_EXACT),
 	     !!(flags & CREATE_INACTIVE),
 	     !!(flags & CREATE_CPU_MAP),
 	     !!(flags & CREATE_GTT_MAP),
-	     !!(flags & CREATE_SCANOUT)));
+	     !!(flags & CREATE_SCANOUT),
+	     !!(flags & CREATE_TEMPORARY)));
 
 	size = kgem_surface_size(kgem,
 				 kgem->has_relaxed_fencing,
@@ -2404,7 +2405,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 
 	/* Best active match */
 	retry = NUM_CACHE_BUCKETS - bucket;
-	if (retry > 3)
+	if (retry > 3 && (flags & CREATE_TEMPORARY) == 0)
 		retry = 3;
 search_again:
 	assert(bucket < NUM_CACHE_BUCKETS);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index b6930e0..f3a7b94 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -221,6 +221,7 @@ enum {
 	CREATE_CPU_MAP = 0x4,
 	CREATE_GTT_MAP = 0x8,
 	CREATE_SCANOUT = 0x10,
+	CREATE_TEMPORARY = 0x20,
 };
 struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 			       int width,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index a8bfe40..2a4aaec 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -700,7 +700,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 
 	priv->gpu_bo = kgem_create_2d(&sna->kgem,
 				      width, height, bpp, tiling,
-				      0);
+				      CREATE_TEMPORARY);
 	if (priv->gpu_bo == NULL) {
 		free(priv);
 		fbDestroyPixmap(pixmap);
@@ -5522,7 +5522,7 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 				       adx, ady, sdx, sdy,
 				       1, 1, octant);
 
-			DBG(("%s: adx=(%d, %d), sdx=(%d, %d), oc1=%d, oc2\n",
+			DBG(("%s: adx=(%d, %d), sdx=(%d, %d), oc1=%d, oc2=%d\n",
 			     __FUNCTION__, adx, ady, sdx, sdy, oc1, oc2));
 			if (adx == 0 || ady == 0) {
 				if (x1 <= x2) {
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index c2b9e79..3309157 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1146,7 +1146,7 @@ sna_render_picture_extract(struct sna *sna,
 				    kgem_choose_tiling(&sna->kgem,
 						       I915_TILING_X, w, h,
 						       pixmap->drawable.bitsPerPixel),
-				    0);
+				    CREATE_TEMPORARY);
 		if (bo) {
 			PixmapRec tmp;
 
@@ -1725,7 +1725,7 @@ sna_render_composite_redirect(struct sna *sna,
 			    width, height, bpp,
 			    kgem_choose_tiling(&sna->kgem, I915_TILING_X,
 					       width, height, bpp),
-			    CREATE_SCANOUT);
+			    CREATE_SCANOUT | CREATE_TEMPORARY);
 	if (!bo)
 		return FALSE;
 
commit b7e3aaf773f05ce82405e135c0f99b40b3c2f434
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 18:08:19 2012 +0000

    sna: Use the clipped end-point for recomputing segment length after clipping
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45673
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8732ed4..a8bfe40 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5476,9 +5476,6 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 		xstart = pt->x + drawable->x;
 		ystart = pt->y + drawable->y;
 
-		/* x2, y2, oc2 copied to x1, y1, oc1 at top of loop to simplify
-		 * iteration logic
-		 */
 		x2 = xstart;
 		y2 = ystart;
 		oc2 = 0;
@@ -5525,8 +5522,8 @@ sna_poly_zero_line_blt(DrawablePtr drawable,
 				       adx, ady, sdx, sdy,
 				       1, 1, octant);
 
-			DBG(("%s: adx=(%d, %d), sdx=(%d, %d)\n",
-			     __FUNCTION__, adx, ady, sdx, sdy));
+			DBG(("%s: adx=(%d, %d), sdx=(%d, %d), oc1=%d, oc2\n",
+			     __FUNCTION__, adx, ady, sdx, sdy, oc1, oc2));
 			if (adx == 0 || ady == 0) {
 				if (x1 <= x2) {
 					b->x1 = x1;
@@ -5658,7 +5655,7 @@ X_continue2:
 							   octant, bias, oc1, oc2) == -1)
 						continue;
 
-					length = abs(y2 - y);
+					length = abs(y2_clipped - y);
 
 					/* if we've clipped the endpoint, always draw the full length
 					 * of the segment, because then the capstyle doesn't matter
commit f30be6f74392f5687ffe8bc9dd2c6dc024ae06c8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 15:59:21 2012 +0000

    sna/gen2+: Exclude solids from being classed as requiring an upload
    
    We treat any pixmap that is not attached to either a CPU or GPU bo as
    requiring the pixel data to be uploaded to the GPU before we can
    composite. Normally this is true, except for the solid cache.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45672
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 97b558d..606ecfe 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1523,6 +1523,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		is_unhandled_gradient(p) ||
 		!gen2_check_filter(p) ||
@@ -1572,7 +1575,7 @@ gen2_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
@@ -1580,7 +1583,7 @@ gen2_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index d5f5617..de6a3ad 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2435,6 +2435,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		!gen3_check_xformat(p) ||
 		!gen3_check_filter(p) ||
@@ -2494,7 +2497,7 @@ gen3_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv &&
 		    ((priv->gpu_damage && !priv->cpu_damage) ||
@@ -2504,7 +2507,7 @@ gen3_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv &&
 		    ((priv->gpu_damage && !priv->cpu_damage) ||
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index b3a64d9..3cb0fbb 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2032,6 +2032,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		is_gradient(p) ||
 		!gen4_check_filter(p) ||
@@ -2082,7 +2085,7 @@ gen4_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
@@ -2090,7 +2093,7 @@ gen4_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 933c51f..f15342d 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2060,6 +2060,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		is_gradient(p) ||
 		!gen5_check_filter(p) ||
@@ -2110,7 +2113,7 @@ gen5_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
@@ -2118,7 +2121,7 @@ gen5_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d73fda8..06c33c4 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2339,6 +2339,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		is_gradient(p) ||
 		!gen6_check_filter(p) ||
@@ -2391,7 +2394,7 @@ gen6_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
@@ -2399,7 +2402,7 @@ gen6_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 5385a47..5a867b1 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2345,6 +2345,9 @@ need_upload(PicturePtr p)
 static bool
 source_fallback(PicturePtr p)
 {
+	if (is_solid(p))
+		return false;
+
 	return (has_alphamap(p) ||
 		is_gradient(p) ||
 		!gen7_check_filter(p) ||
@@ -2397,7 +2400,7 @@ gen7_composite_fallback(struct sna *sna,
 		return FALSE;
 	}
 
-	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
+	if (src_pixmap && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
@@ -2405,7 +2408,7 @@ gen7_composite_fallback(struct sna *sna,
 			return FALSE;
 		}
 	}
-	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
+	if (mask_pixmap && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
 		if (priv && priv->gpu_damage && !priv->cpu_damage) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
commit f009386de87acccadb1803567e4c494e5e80a2a9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 09:50:03 2012 +0000

    sna: If we have a CPU bo, do not assert we have shadow pixels
    
    When transferring damage to the GPU, on SNB it is not necessarily true
    that we have a shadow pixmap, we may instead have drawn onto an unmapped
    CPU bo and now simply need to copy from that bo onto the GPU. Move the
    assertion onto the path where it truly matters.
    
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45672
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b6f6772..8732ed4 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2143,7 +2143,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		pixmap->devKind = priv->stride;
 		priv->mapped = false;
 	}
-	assert(pixmap->devPrivate.ptr != NULL);
 
 	n = sna_damage_get_boxes(priv->cpu_damage, &box);
 	if (n) {
@@ -2158,6 +2157,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n);
 		if (!ok) {
+			assert(pixmap->devPrivate.ptr != NULL);
 			if (n == 1 && !priv->pinned &&
 			    (box->x2 - box->x1) >= pixmap->drawable.width &&
 			    (box->y2 - box->y1) >= pixmap->drawable.height) {
commit 22e452ebe01c32a08599411743cf18f9ad0545a7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Feb 6 09:19:56 2012 +0000

    sna: Disable use of xvmc for SNB+
    
    Not yet implemented, so don't bother setting it to fail.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=44874
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_hwmc.c b/src/sna/sna_video_hwmc.c
index 1f36096..2baa939 100644
--- a/src/sna/sna_video_hwmc.c
+++ b/src/sna/sna_video_hwmc.c
@@ -199,6 +199,10 @@ Bool sna_video_xvmc_setup(struct sna *sna,
 	if (sna->kgem.gen < 31)
 		return FALSE;
 
+	/* Not implemented */
+	if (sna->kgem.gen >= 60)
+		return FALSE;
+
 	pAdapt = calloc(1, sizeof(XF86MCAdaptorRec));
 	if (!pAdapt)
 		return FALSE;
commit a8ed1a02ada1f8d2f910dfefb150d26c840bf9ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 4 20:13:07 2012 +0000

    sna: Discard the redundant clear of the unbounded area if already clear
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index c2a4190..28c8a67 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2658,6 +2658,13 @@ choose_span(PicturePtr dst,
 }
 
 static bool
+sna_drawable_is_clear(DrawablePtr d)
+{
+	struct sna_pixmap *priv = sna_pixmap(get_drawable_pixmap(d));
+	return priv && priv->clear && priv->clear_color == 0;
+}
+
+static bool
 mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 			       INT16 src_x, INT16 src_y,
 			       int ntrap, xTrapezoid *traps)
@@ -2666,6 +2673,7 @@ mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	BoxRec extents;
 	int16_t dst_x, dst_y;
 	int16_t dx, dy;
+	bool was_clear;
 	int n;
 
 	if (NO_SCAN_CONVERTER)
@@ -2709,6 +2717,8 @@ mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	if (!mono_init(&mono, 2*ntrap))
 		return false;
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+
 	for (n = 0; n < ntrap; n++) {
 		if (!xTrapezoidValid(&traps[n]))
 			continue;
@@ -2741,7 +2751,7 @@ mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	mono.op.done(mono.sna, &mono.op);
 	mono_fini(&mono);
 
-	if (!operator_is_bounded(op)) {
+	if (!was_clear && !operator_is_bounded(op)) {
 		xPointFixed p1, p2;
 
 		if (!mono_init(&mono, 2+2*ntrap))
@@ -4223,6 +4233,7 @@ mono_triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	BoxRec extents;
 	int16_t dst_x, dst_y;
 	int16_t dx, dy;
+	bool was_clear;
 	int n;
 
 	mono.sna = to_sna_from_drawable(dst->pDrawable);
@@ -4261,6 +4272,8 @@ mono_triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 	     src_x + mono.clip.extents.x1 - dst_x - dx,
 	     src_y + mono.clip.extents.y1 - dst_y - dy));
 
+	was_clear = sna_drawable_is_clear(dst->pDrawable);
+
 	if (mono_init(&mono, 3*count))
 		return false;
 
@@ -4289,7 +4302,7 @@ mono_triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		mono.op.done(mono.sna, &mono.op);
 	}
 
-	if (!operator_is_bounded(op)) {
+	if (!was_clear && !operator_is_bounded(op)) {
 		xPointFixed p1, p2;
 
 		if (!mono_init(&mono, 2+3*count))
commit b899a4b69696141ca8b897a7abf52649b09f7b3b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 4 20:07:49 2012 +0000

    sna: Always pass the clear colour for PictOpClear
    
    Having made that optimisation for Composite, and then made the
    assumption that it is always true in the backends, we failed to clear
    the unbounded area outside of a trapezoid since we passed in the
    original colour and the operation was optimised as a continuation.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index 9df8cfd..73e2490 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -274,6 +274,7 @@ struct sna {
 	void *WakeupData;
 	CloseScreenProcPtr CloseScreen;
 
+	PicturePtr clear;
 	struct {
 		uint32_t fill_bo;
 		uint32_t fill_pixel;
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 8eb0c94..5b81596 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -43,20 +43,18 @@
 
 #define BOUND(v)	(INT16) ((v) < MINSHORT ? MINSHORT : (v) > MAXSHORT ? MAXSHORT : (v))
 
-static PicturePtr clear;
-
 Bool sna_composite_create(struct sna *sna)
 {
 	xRenderColor color ={ 0 };
 	int error;
 
-	clear = CreateSolidPicture(0, &color, &error);
-	return clear != NULL;
+	sna->clear = CreateSolidPicture(0, &color, &error);
+	return sna->clear != NULL;
 }
 
 void sna_composite_close(struct sna *sna)
 {
-	FreePicture(clear, 0);
+	FreePicture(sna->clear, 0);
 }
 
 static inline bool
@@ -436,7 +434,7 @@ sna_composite(CARD8 op,
 	if (op == PictOpClear) {
 		DBG(("%s: discarding source and mask for clear\n", __FUNCTION__));
 		mask = NULL;
-		src = clear;
+		src = sna->clear;
 	}
 
 	if (mask && sna_composite_mask_is_opaque(mask)) {
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index eb6c968..c2a4190 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -2776,7 +2776,7 @@ mono_trapezoids_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		memset(&mono.op, 0, sizeof(mono.op));
 		if (mono.sna->render.composite(mono.sna,
 					       PictOpClear,
-					       src, NULL, dst,
+					       mono.sna->clear, NULL, dst,
 					       0, 0,
 					       0, 0,
 					       mono.clip.extents.x1,  mono.clip.extents.y1,
@@ -4321,7 +4321,7 @@ mono_triangles_span_converter(CARD8 op, PicturePtr src, PicturePtr dst,
 		memset(&mono.op, 0, sizeof(mono.op));
 		if (mono.sna->render.composite(mono.sna,
 					       PictOpClear,
-					       src, NULL, dst,
+					       mono.sna->clear, NULL, dst,
 					       0, 0,
 					       0, 0,
 					       mono.clip.extents.x1,  mono.clip.extents.y1,
commit c107b90a44abb45c837ff8924939872be5b490eb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 4 16:33:34 2012 +0000

    sna/gen6: Reduce PictOpClear to PictOpSrc (with blending disabled)
    
    The advantage of PictOpSrc is that it writes its results directly to
    memory bypassing the blend unit.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 08f9668..d73fda8 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -579,17 +579,6 @@ gen6_emit_cc(struct sna *sna,
 	if (render->blend == blend)
 		return op <= PictOpSrc;
 
-	if (op == PictOpClear) {
-		uint32_t src;
-
-		/* We can emulate a clear using src, which is beneficial if
-		 * the blend unit is already disabled.
-		 */
-		src = BLEND_OFFSET(GEN6_BLENDFACTOR_ONE, GEN6_BLENDFACTOR_ZERO);
-		if (render->blend == src)
-			return true;
-	}
-
 	OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2));
 	OUT_BATCH((render->cc_blend + blend) | 1);
 	if (render->blend == (unsigned)-1) {
@@ -716,8 +705,8 @@ gen6_emit_drawing_rectangle(struct sna *sna,
 	assert(!too_large(op->dst.x, op->dst.y));
 	assert(!too_large(op->dst.width, op->dst.height));
 
-	if  (sna->render_state.gen6.drawrect_limit  == limit &&
-	     sna->render_state.gen6.drawrect_offset == offset)
+	if (sna->render_state.gen6.drawrect_limit  == limit &&
+	    sna->render_state.gen6.drawrect_offset == offset)
 		return false;
 
 	/* [DevSNB-C+{W/A}] Before any depth stall flush (including those
@@ -932,6 +921,8 @@ static int gen6_vertex_finish(struct sna *sna)
 	struct kgem_bo *bo;
 	unsigned int i;
 
+	DBG(("%s: used=%d / %d\n", __FUNCTION__,
+	     sna->render.vertex_used, sna->render.vertex_size));
 	assert(sna->render.vertex_used);
 
 	/* Note: we only need dword alignment (currently) */
@@ -978,6 +969,10 @@ static int gen6_vertex_finish(struct sna *sna)
 
 	kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo);
 	if (sna->render.vertex_used) {
+		DBG(("%s: copying initial buffer x %d to handle=%d\n",
+		     __FUNCTION__,
+		     sna->render.vertex_used,
+		     sna->render.vbo->handle));
 		memcpy(sna->render.vertices,
 		       sna->render.vertex_data,
 		       sizeof(float)*sna->render.vertex_used);
@@ -1003,6 +998,8 @@ static void gen6_vertex_close(struct sna *sna)
 
 	bo = sna->render.vbo;
 	if (bo == NULL) {
+		assert(sna->render.vertices == sna->render.vertex_data);
+		assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data));
 		if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) {
 			DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__,
 			     sna->render.vertex_used, sna->kgem.nbatch));
@@ -1230,24 +1227,24 @@ gen6_emit_composite_primitive_solid(struct sna *sna,
 		float f;
 	} dst;
 
+	DBG(("%s: [%d+9] = (%d, %d)x(%d, %d)\n", __FUNCTION__,
+	     sna->render.vertex_used, r->dst.x, r->dst.y, r->width, r->height));
+
 	v = sna->render.vertices + sna->render.vertex_used;
 	sna->render.vertex_used += 9;
+	assert(sna->render.vertex_used <= sna->render.vertex_size);
+	assert(!too_large(r->dst.x + r->width, r->dst.y + r->height));
 
 	dst.p.x = r->dst.x + r->width;
 	dst.p.y = r->dst.y + r->height;
 	v[0] = dst.f;
-	v[1] = 1.;
-	v[2] = 1.;
-
 	dst.p.x = r->dst.x;
 	v[3] = dst.f;
-	v[4] = 0.;
-	v[5] = 1.;
-
 	dst.p.y = r->dst.y;
 	v[6] = dst.f;
-	v[7] = 0.;
-	v[8] = 0.;
+
+	v[5] = v[2] = v[1] = 1.;
+	v[8] = v[7] = v[4] = 0.;
 }
 
 fastcall static void
@@ -1280,6 +1277,45 @@ gen6_emit_composite_primitive_identity_source(struct sna *sna,
 }
 
 fastcall static void
+gen6_emit_composite_primitive_simple_source(struct sna *sna,
+					    const struct sna_composite_op *op,
+					    const struct sna_composite_rectangles *r)
+{
+	float *v;
+	union {
+		struct sna_coordinate p;
+		float f;
+	} dst;
+
+	float xx = op->src.transform->matrix[0][0];
+	float x0 = op->src.transform->matrix[0][2];
+	float yy = op->src.transform->matrix[1][1];
+	float y0 = op->src.transform->matrix[1][2];
+	float sx = op->src.scale[0];
+	float sy = op->src.scale[1];
+	int16_t tx = op->src.offset[0];
+	int16_t ty = op->src.offset[1];
+
+	v = sna->render.vertices + sna->render.vertex_used;
+	sna->render.vertex_used += 3*3;
+
+	dst.p.x = r->dst.x + r->width;
+	dst.p.y = r->dst.y + r->height;
+	v[0] = dst.f;
+	v[1] = ((r->src.x + r->width + tx) * xx + x0) * sx;
+	v[5] = v[2] = ((r->src.y + r->height + ty) * yy + y0) * sy;
+
+	dst.p.x = r->dst.x;
+	v[3] = dst.f;
+	v[7] = v[4] = ((r->src.x + tx) * xx + x0) * sx;
+
+	dst.p.y = r->dst.y;
+	v[6] = dst.f;
+	v[8] = ((r->src.y + ty) * yy + y0) * sy;
+}
+
+
+fastcall static void
 gen6_emit_composite_primitive_affine_source(struct sna *sna,
 					    const struct sna_composite_op *op,
 					    const struct sna_composite_rectangles *r)
@@ -1525,6 +1561,10 @@ static void gen6_emit_vertex_buffer(struct sna *sna,
 static void gen6_emit_primitive(struct sna *sna)
 {
 	if (sna->kgem.nbatch == sna->render_state.gen6.last_primitive) {
+		DBG(("%s: continuing previous primitive, start=%d, index=%d\n",
+		     __FUNCTION__,
+		     sna->render.vertex_start,
+		     sna->render.vertex_index));
 		sna->render_state.gen6.vertex_offset = sna->kgem.nbatch - 5;
 		return;
 	}
@@ -1541,6 +1581,8 @@ static void gen6_emit_primitive(struct sna *sna)
 	OUT_BATCH(0);	/* start instance location */
 	OUT_BATCH(0);	/* index buffer offset, ignored */
 	sna->render.vertex_start = sna->render.vertex_index;
+	DBG(("%s: started new primitive: index=%d\n",
+	     __FUNCTION__, sna->render.vertex_start));
 
 	sna->render_state.gen6.last_primitive = sna->kgem.nbatch;
 }
@@ -1603,6 +1645,7 @@ inline static int gen6_get_rectangles(struct sna *sna,
 	if (want > 1 && want * op->floats_per_rect > rem)
 		want = rem / op->floats_per_rect;
 
+	assert(want > 0);
 	sna->render.vertex_index += 3*want;
 	return want;
 }
@@ -2156,6 +2199,8 @@ static void gen6_composite_channel_convert(struct sna_composite_channel *channel
 static void gen6_render_composite_done(struct sna *sna,
 				       const struct sna_composite_op *op)
 {
+	DBG(("%s\n", __FUNCTION__));
+
 	if (sna->render_state.gen6.vertex_offset) {
 		gen6_vertex_flush(sna);
 		gen6_magic_ca_pass(sna, op);
@@ -2488,6 +2533,8 @@ gen6_render_composite(struct sna *sna,
 					    width, height,
 					    tmp);
 
+	if (op == PictOpClear)
+		op = PictOpSrc;
 	tmp->op = op;
 	if (!gen6_composite_set_target(sna, tmp, dst))
 		return FALSE;
@@ -2585,12 +2632,28 @@ gen6_render_composite(struct sna *sna,
 
 		tmp->floats_per_vertex = 5 + 2 * !tmp->is_affine;
 	} else {
-		if (tmp->src.is_solid)
+		if (tmp->src.is_solid) {
+			DBG(("%s: choosing gen6_emit_composite_primitive_solid\n",
+			     __FUNCTION__));
 			tmp->prim_emit = gen6_emit_composite_primitive_solid;
-		else if (tmp->src.transform == NULL)
+		} else if (tmp->src.transform == NULL) {
+			DBG(("%s: choosing gen6_emit_composite_primitive_identity_source\n",
+			     __FUNCTION__));
 			tmp->prim_emit = gen6_emit_composite_primitive_identity_source;
-		else if (tmp->src.is_affine)
-			tmp->prim_emit = gen6_emit_composite_primitive_affine_source;
+		} else if (tmp->src.is_affine) {
+			if (tmp->src.transform->matrix[0][1] == 0 &&
+			    tmp->src.transform->matrix[1][0] == 0) {
+				tmp->src.scale[0] /= tmp->src.transform->matrix[2][2];
+				tmp->src.scale[1] /= tmp->src.transform->matrix[2][2];
+				DBG(("%s: choosing gen6_emit_composite_primitive_simple_source\n",
+				     __FUNCTION__));
+				tmp->prim_emit = gen6_emit_composite_primitive_simple_source;
+			} else {
+				DBG(("%s: choosing gen6_emit_composite_primitive_affine_source\n",
+				     __FUNCTION__));
+				tmp->prim_emit = gen6_emit_composite_primitive_affine_source;
+			}
+		}
 
 		tmp->floats_per_vertex = 3 + !tmp->is_affine;
 	}
@@ -2923,11 +2986,11 @@ fastcall static void
 gen6_render_composite_spans_done(struct sna *sna,
 				 const struct sna_composite_spans_op *op)
 {
+	DBG(("%s()\n", __FUNCTION__));
+
 	if (sna->render_state.gen6.vertex_offset)
 		gen6_vertex_flush(sna);
 
-	DBG(("%s()\n", __FUNCTION__));
-
 	if (op->base.src.bo)
 		kgem_bo_destroy(&sna->kgem, op->base.src.bo);
 
@@ -3193,8 +3256,7 @@ fallback_blt:
 	if (!gen6_check_format(tmp.src.pict_format))
 		goto fallback_blt;
 
-	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
-
+	tmp.op = PictOpSrc;
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
@@ -3373,6 +3435,8 @@ gen6_render_copy_blt(struct sna *sna,
 static void
 gen6_render_copy_done(struct sna *sna, const struct sna_copy_op *op)
 {
+	DBG(("%s()\n", __FUNCTION__));
+
 	if (sna->render_state.gen6.vertex_offset)
 		gen6_vertex_flush(sna);
 }
@@ -3428,7 +3492,7 @@ fallback:
 	if (!gen6_check_format(op->base.src.pict_format))
 		goto fallback;
 
-	op->base.op = alu == GXcopy ? PictOpSrc : PictOpClear;
+	op->base.op = PictOpSrc;
 
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
@@ -3574,9 +3638,10 @@ gen6_render_fill_boxes(struct sna *sna,
 	return FALSE;
 #endif
 
-	if (op == PictOpClear)
+	if (op == PictOpClear) {
 		pixel = 0;
-	else if (!sna_get_pixel_from_rgba(&pixel,
+		op = PictOpSrc;
+	} else if (!sna_get_pixel_from_rgba(&pixel,
 				     color->red,
 				     color->green,
 				     color->blue,
@@ -3744,6 +3809,8 @@ gen6_render_op_fill_boxes(struct sna *sna,
 static void
 gen6_render_op_fill_done(struct sna *sna, const struct sna_fill_op *op)
 {
+	DBG(("%s()\n", __FUNCTION__));
+
 	if (sna->render_state.gen6.vertex_offset)
 		gen6_vertex_flush(sna);
 	kgem_bo_destroy(&sna->kgem, op->base.src.bo);
@@ -3781,7 +3848,7 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	if (alu == GXclear)
 		color = 0;
 
-	op->base.op = color == 0 ? PictOpClear : PictOpSrc;
+	op->base.op = PictOpSrc;
 
 	op->base.dst.pixmap = dst;
 	op->base.dst.width  = dst->drawable.width;
@@ -3874,7 +3941,7 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	if (alu == GXclear)
 		color = 0;
 
-	tmp.op = color == 0 ? PictOpClear : PictOpSrc;
+	tmp.op = PictOpSrc;
 
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
@@ -3976,7 +4043,7 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	if (too_large(dst->drawable.width, dst->drawable.height))
 		return gen6_render_clear_try_blt(sna, dst, bo);
 
-	tmp.op = PictOpClear;
+	tmp.op = PictOpSrc;
 
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 757bad1..259666e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2917,7 +2917,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 {
 	if (bo->map)
-		return bo->map;
+		return CPU_MAP(bo->map);
 
 	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
diff --git a/src/sna/kgem_debug_gen6.c b/src/sna/kgem_debug_gen6.c
index f748da6..72ed299 100644
--- a/src/sna/kgem_debug_gen6.c
+++ b/src/sna/kgem_debug_gen6.c
@@ -42,7 +42,6 @@
 static struct state {
 	struct vertex_buffer {
 		int handle;
-		void *base;
 		const char *ptr;
 		int pitch;
 
@@ -67,7 +66,7 @@ static void gen6_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 {
 	uint32_t reloc = sizeof(uint32_t) * (&data[1] - kgem->batch);
 	struct kgem_bo *bo = NULL;
-	void *base, *ptr;
+	void *base;
 	int i;
 
 	for (i = 0; i < kgem->nreloc; i++)
@@ -85,13 +84,12 @@ static void gen6_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 		assert(&bo->request != &kgem->next_request->buffers);
 		base = kgem_bo_map__debug(kgem, bo);
 	}
-	ptr = (char *)base + kgem->reloc[i].delta;
 
+	base = (char *)base + kgem->reloc[i].delta;
 	i = data[0] >> 26;
 
 	state.vb[i].current = bo;
-	state.vb[i].base = base;
-	state.vb[i].ptr = ptr;
+	state.vb[i].ptr = base;
 	state.vb[i].pitch = data[0] & 0x7ff;
 }
 
@@ -268,10 +266,8 @@ static void indirect_vertex_out(struct kgem *kgem, uint32_t v)
 		const struct vertex_buffer *vb = &state.vb[ve->buffer];
 		const void *ptr = vb->ptr + v * vb->pitch + ve->offset;
 
-		if (!ve->valid)
-			continue;
-
-		ve_out(ve, ptr);
+		if (ve->valid)
+			ve_out(ve, ptr);
 
 		while (++i <= state.num_ve && !state.ve[i].valid)
 			;
diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 0faad84..8eb0c94 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -792,6 +792,21 @@ sna_composite_rectangles(CARD8		 op,
 			}
 		} else
 			sna_damage_add(&priv->gpu_damage, &region);
+	} else if (op <= PictOpSrc &&
+		   region.data == NULL &&
+		   region.extents.x2 - region.extents.x1 == pixmap->drawable.width &&
+		   region.extents.y2 - region.extents.y1 == pixmap->drawable.height) {
+		priv->clear = true;
+		priv->clear_color = 0;
+		if (op == PictOpSrc)
+			sna_get_pixel_from_rgba(&priv->clear_color,
+						color->red,
+						color->green,
+						color->blue,
+						color->alpha,
+						dst->format);
+		DBG(("%s: marking clear [%08x]\n",
+		     __FUNCTION__, priv->clear_color));
 	}
 
 	goto done;
commit 4baa2806bc0f51e7576b769ca6750deb3821c4d3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Feb 4 12:06:22 2012 +0000

    sna: Check if the damage reduces to all before performing the migration
    
    An assert exposed a situation where we had accumulated an unreduced
    damage-all and so we were taking the slow path only to discover later
    that it was a damage-all and that we had performed needless checks.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1d2b999..b6f6772 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -416,6 +416,8 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
 	if (sna_damage_is_all(&priv->cpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height)) {
+		DBG(("%s: entire source is damaged, using Y-tiling\n",
+		     __FUNCTION__));
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->undamaged = false;
 		return I915_TILING_Y;
@@ -1221,7 +1223,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 		return true;
 	}
 
-	if (DAMAGE_IS_ALL(priv->cpu_damage))
+	if (sna_damage_is_all(&priv->cpu_damage,
+			      pixmap->drawable.width,
+			      pixmap->drawable.height))
 		goto out;
 
 	if (priv->clear)
@@ -1245,6 +1249,8 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 	if ((flags & MOVE_READ) == 0) {
+		DBG(("%s: no read, checking to see if we can stream the write into the GPU bo\n",
+		     __FUNCTION__));
 		assert(flags & MOVE_WRITE);
 
 		if (priv->stride && priv->gpu_bo &&
@@ -1611,7 +1617,9 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 
 	assert_pixmap_contains_box(pixmap, box);
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
+	if (sna_damage_is_all(&priv->gpu_damage,
+			      pixmap->drawable.width,
+			      pixmap->drawable.height))
 		goto done;
 
 	if (priv->gpu_bo == NULL) {
@@ -2081,7 +2089,9 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 		return NULL;
 	}
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
+	if (sna_damage_is_all(&priv->gpu_damage,
+			      pixmap->drawable.width,
+			      pixmap->drawable.height)) {
 		DBG(("%s: already all-damaged\n", __FUNCTION__));
 		goto active;
 	}
commit 2653524dffc1fe0dbff7d74bfc9be535d9ececb1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 3 20:06:43 2012 +0000

    sna: Reduce the downsample tile size to accommodate alignment
    
    If we need to enlarge the sampled tile due to tiling alignments, the
    resulting sample can become larger than we can accommodate through the 3D
    pipeline, resulting in FAIL.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index bc8b2de..c2b9e79 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -622,7 +622,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 	struct sna_pixmap *priv;
 	pixman_transform_t t;
 	PixmapPtr tmp;
-	int width, height;
+	int width, height, size;
 	int sx, sy, ox, oy, ow, oh;
 	int error, ret = 0;
 	BoxRec box, b;
@@ -743,8 +743,13 @@ static int sna_render_picture_downsample(struct sna *sna,
 	ValidatePicture(tmp_dst);
 	ValidatePicture(tmp_src);
 
-	w = sna->render.max_3d_size / sx - 2 * sx;
-	h = sna->render.max_3d_size / sy - 2 * sy;
+	/* Use a small size to accommodate enlargement through tile alignment */
+	size = sna->render.max_3d_size - 4096 / pixmap->drawable.bitsPerPixel;
+	while (size * size * 4 > sna->kgem.max_copy_tile_size)
+		size /= 2;
+
+	w = size / sx - 2 * sx;
+	h = size / sy - 2 * sy;
 	DBG(("%s %d:%d downsampling using %dx%d GPU tiles\n",
 	     __FUNCTION__, (width + w-1)/w, (height + h-1)/h, w, h));
 
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 00e111c..c6e898b 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -142,7 +142,8 @@ sna_tiling_composite_done(struct sna *sna,
 
 	/* Use a small step to accommodate enlargement through tile alignment */
 	step = sna->render.max_3d_size;
-	if (tile->dst_x & (8*512 / tile->dst->pDrawable->bitsPerPixel - 1))
+	if (tile->dst_x & (8*512 / tile->dst->pDrawable->bitsPerPixel - 1) ||
+	    tile->dst_y & 63)
 		step /= 2;
 	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
@@ -330,7 +331,11 @@ sna_tiling_fill_boxes(struct sna *sna,
 
 	pixman_region_init_rects(&region, box, n);
 
+	/* Use a small step to accommodate enlargement through tile alignment */
 	step = sna->render.max_3d_size;
+	if (region.extents.x1 & (8*512 / dst->drawable.bitsPerPixel - 1) ||
+	    region.extents.y1 & 63)
+		step /= 2;
 	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
 
@@ -443,7 +448,10 @@ Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
 
 	pixman_region_init_rects(&region, box, nbox);
 
+	/* Use a small step to accommodate enlargement through tile alignment */
 	step = sna->render.max_3d_size;
+	if (region.extents.x1 & (8*512 / bpp - 1) || region.extents.y1 & 63)
+		step /= 2;
 	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
 
commit 93a0b10f163ee79b6a6a7ea46b0a33b622b1f86e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Feb 3 19:30:24 2012 +0000

    sna: Apply redirection for the render copy into large pixmaps
    
    If the pixmap is larger than the pipeline, but the operation extents fit
    within the pipeline, we may be able to create a proxy target to
    transform the operation into one that fits within the constraints of the
    render pipeline.
    
    This fixes the infinite recursion hit with partially displayed extremely
    large images.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 7250d66..97b558d 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -2852,9 +2852,7 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	if (src_bo == dst_bo || /* XXX handle overlap using 3D ? */
 	    too_large(src->drawable.width, src->drawable.height) ||
-	    src_bo->pitch > MAX_3D_PITCH ||
-	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
+	    src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch < 8) {
 fallback:
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -2876,10 +2874,39 @@ fallback:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = dst_bo;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height) ||
+	    dst_bo->pitch > MAX_3D_PITCH) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
 
 	tmp.floats_per_vertex = 4;
 	tmp.floats_per_rect = 12;
 
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
 	gen2_render_copy_setup_source(&tmp.src, src, src_bo);
 	gen2_emit_copy_state(sna, &tmp);
 	do {
@@ -2917,7 +2944,14 @@ fallback:
 	} while (n);
 
 	gen2_vertex_flush(sna, &tmp);
+	sna_render_composite_redirect_done(sna, &tmp);
 	return TRUE;
+
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 784d399..d5f5617 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -3841,10 +3841,8 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    src_bo == dst_bo || /* XXX handle overlap using 3D ? */
 	    src_bo->pitch > MAX_3D_PITCH ||
-	    too_large(src->drawable.width, src->drawable.height) ||
-	    dst_bo->pitch > MAX_3D_PITCH ||
-	    too_large(dst->drawable.width, dst->drawable.height)) {
-fallback:
+	    too_large(src->drawable.width, src->drawable.height)) {
+fallback_blt:
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
 						   dst, dst_bo, dst_dx, dst_dy,
@@ -3854,7 +3852,7 @@ fallback:
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
-			goto fallback;
+			goto fallback_blt;
 	}
 
 	memset(&tmp, 0, sizeof(tmp));
@@ -3865,6 +3863,31 @@ fallback:
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.format = sna_format_for_depth(dst->drawable.depth);
 	tmp.dst.bo = dst_bo;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height) ||
+	    dst_bo->pitch > MAX_3D_PITCH) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
 
 	gen3_render_copy_setup_source(&tmp.src, src, src_bo);
 
@@ -3873,6 +3896,10 @@ fallback:
 	tmp.mask.bo = NULL;
 	tmp.mask.u.gen3.type = SHADER_NONE;
 
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
 	gen3_emit_composite_state(sna, &tmp);
 	gen3_align_vertex(sna, &tmp);
 
@@ -3911,7 +3938,14 @@ fallback:
 	} while (n);
 
 	gen3_vertex_flush(sna);
+	sna_render_composite_redirect_done(sna, &tmp);
 	return TRUE;
+
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index ffdcbb7..b3a64d9 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -289,6 +289,13 @@ gen4_emit_pipelined_pointers(struct sna *sna,
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
 
+#define GEN4_MAX_3D_SIZE 8192
+
+static inline bool too_large(int width, int height)
+{
+	return width > GEN4_MAX_3D_SIZE || height > GEN4_MAX_3D_SIZE;
+}
+
 static int
 gen4_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine)
 {
@@ -1884,7 +1891,7 @@ gen4_composite_picture(struct sna *sna,
 		return sna_render_picture_convert(sna, picture, channel, pixmap,
 						  x, y, w, h, dst_x, dst_y);
 
-	if (pixmap->drawable.width > 8192 || pixmap->drawable.height > 8192)
+	if (too_large(pixmap->drawable.width, pixmap->drawable.height))
 		return sna_render_picture_extract(sna, picture, channel,
 						  x, y, w, h, dst_x, dst_y);
 
@@ -1983,7 +1990,7 @@ try_blt(struct sna *sna,
 		return TRUE;
 	}
 
-	if (width > 8192 || height > 8192) {
+	if (too_large(width, height)) {
 		DBG(("%s: operation too large for 3D pipe (%d, %d)\n",
 		     __FUNCTION__, width, height));
 		return TRUE;
@@ -2221,11 +2228,10 @@ gen4_render_composite(struct sna *sna,
 		return FALSE;
 	sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
 
-	if (tmp->dst.width > 8192 || tmp->dst.height > 8192) {
-		if (!sna_render_composite_redirect(sna, tmp,
-						   dst_x, dst_y, width, height))
+	if (too_large(tmp->dst.width, tmp->dst.height) &&
+	    !sna_render_composite_redirect(sna, tmp,
+					   dst_x, dst_y, width, height))
 			return FALSE;
-	}
 
 	switch (gen4_composite_picture(sna, src, &tmp->src,
 				       src_x, src_y,
@@ -2432,10 +2438,8 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return TRUE;
 
-	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
-	    src->drawable.width > 8192 || src->drawable.height > 8192 ||
-	    dst->drawable.width > 8192 || dst->drawable.height > 8192) {
-fallback:
+	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
+fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
 
@@ -2458,24 +2462,73 @@ fallback:
 		tmp.src.pict_format = sna_format_for_depth(src->drawable.depth);
 	}
 	if (!gen4_check_format(tmp.src.pict_format))
-		goto fallback;
+		goto fallback_blt;
 
 	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
 
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
+	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.bo = dst_bo;
-	tmp.dst.x = dst_dx;
-	tmp.dst.y = dst_dy;
 
-	tmp.src.bo = src_bo;
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
+
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
-	tmp.src.card_format =
-		gen4_get_card_format(tmp.src.pict_format),
-	tmp.src.width  = src->drawable.width;
-	tmp.src.height = src->drawable.height;
+	tmp.src.card_format = gen4_get_card_format(tmp.src.pict_format);
+	if (too_large(src->drawable.width, src->drawable.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+
+		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
+					       extents.x1 + src_dx,
+					       extents.y1 + src_dy,
+					       extents.x2 - extents.x1,
+					       extents.y2 - extents.y1)) {
+			goto fallback_tiled_dst;
+		}
+	} else {
+		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.width  = src->drawable.width;
+		tmp.src.height = src->drawable.height;
+		tmp.src.offset[0] = tmp.src.offset[1] = 0;
+		tmp.src.scale[0] = 1.f/src->drawable.width;
+		tmp.src.scale[1] = 1.f/src->drawable.height;
+	}
 
 	tmp.mask.bo = NULL;
 
@@ -2487,9 +2540,16 @@ fallback:
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
-			goto fallback;
+			goto fallback_tiled_src;
 	}
 
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	src_dx += tmp.src.offset[0];
+	src_dy += tmp.src.offset[1];
+
 	gen4_copy_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
 
@@ -2499,10 +2559,23 @@ fallback:
 		gen4_render_copy_one(sna, &tmp,
 				     box->x1 + src_dx, box->y1 + src_dy,
 				     box->x2 - box->x1, box->y2 - box->y1,
-				     box->x1, box->y1);
+				     box->x1 + dst_dx, box->y1 + dst_dy);
 		box++;
 	} while (--n);
+	sna_render_composite_redirect_done(sna, &tmp);
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return TRUE;
+
+fallback_tiled_src:
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+fallback_tiled_dst:
+	if (tmp.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
@@ -2552,8 +2625,8 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 		return TRUE;
 
 	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
-	    src->drawable.width > 8192 || src->drawable.height > 8192 ||
-	    dst->drawable.width > 8192 || dst->drawable.height > 8192) {
+	    too_large(src->drawable.width, src->drawable.height) ||
+	    too_large(dst->drawable.width, dst->drawable.height)) {
 fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
@@ -2683,10 +2756,7 @@ gen4_render_fill_boxes(struct sna *sna,
 		return FALSE;
 	}
 
-	if (prefer_blt(sna) ||
-	    dst->drawable.width > 8192 ||
-	    dst->drawable.height > 8192 ||
-	    !gen4_check_dst_format(format)) {
+	if (prefer_blt(sna) || too_large(dst->drawable.width, dst->drawable.height)) {
 		uint8_t alu = -1;
 
 		if (op == PictOpClear || (op == PictOpOutReverse && color->alpha >= 0xff00))
@@ -2715,7 +2785,7 @@ gen4_render_fill_boxes(struct sna *sna,
 		if (!gen4_check_dst_format(format))
 			return FALSE;
 
-		if (dst->drawable.width > 8192 || dst->drawable.height > 8192)
+		if (too_large(dst->drawable.width, dst->drawable.height))
 			return sna_tiling_fill_boxes(sna, op, format, color,
 						     dst, dst_bo, box, n);
 	}
@@ -2834,7 +2904,7 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 		return TRUE;
 
 	if (!(alu == GXcopy || alu == GXclear) ||
-	    dst->drawable.width > 8192 || dst->drawable.height > 8192)
+	    too_large(dst->drawable.width, dst->drawable.height))
 		return sna_blt_fill(sna, alu,
 				    dst_bo, dst->drawable.bitsPerPixel,
 				    color,
@@ -2925,7 +2995,7 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
-	    dst->drawable.width > 8192 || dst->drawable.height > 8192)
+	    too_large(dst->drawable.width, dst->drawable.height))
 		return FALSE;
 
 	if (alu == GXclear)
@@ -3251,7 +3321,7 @@ Bool gen4_render_init(struct sna *sna)
 	sna->render.reset = gen4_render_reset;
 	sna->render.fini = gen4_render_fini;
 
-	sna->render.max_3d_size = 8192;
+	sna->render.max_3d_size = GEN4_MAX_3D_SIZE;
 	sna->render.max_3d_pitch = 1 << 18;
 	return TRUE;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 03dc8c9..933c51f 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1378,6 +1378,9 @@ gen5_emit_drawing_rectangle(struct sna *sna, const struct sna_composite_op *op)
 	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
 	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
 
+	assert(!too_large(op->dst.x, op->dst.y));
+	assert(!too_large(op->dst.width, op->dst.height));
+
 	if (!DBG_NO_STATE_CACHE &&
 	    sna->render_state.gen5.drawrect_limit == limit &&
 	    sna->render_state.gen5.drawrect_offset == offset)
@@ -2731,20 +2734,6 @@ gen5_copy_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
-static inline bool untiled_tlb_miss(struct kgem_bo *bo)
-{
-	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
-}
-
-static inline bool prefer_blt_copy(struct sna *sna,
-				   struct kgem_bo *src_bo,
-				   struct kgem_bo *dst_bo)
-{
-	return (sna->kgem.ring != KGEM_RENDER ||
-		untiled_tlb_miss(src_bo) ||
-		untiled_tlb_miss(dst_bo));
-}
-
 static Bool
 gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 		       PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
@@ -2753,8 +2742,7 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	struct sna_composite_op tmp;
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
-	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
 			       dst_bo, dst_dx, dst_dy,
@@ -2762,12 +2750,10 @@ gen5_render_copy_boxes(struct sna *sna, uint8_t alu,
 			       box, n))
 		return TRUE;
 
-	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo ||
-	    too_large(src->drawable.width, src->drawable.height) ||
-	    too_large(dst->drawable.width, dst->drawable.height)) {
-fallback:
-	    if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-		    return FALSE;
+	if (!(alu == GXcopy || alu == GXclear) || src_bo == dst_bo) {
+fallback_blt:
+		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
+			return FALSE;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -2787,7 +2773,7 @@ fallback:
 	if (!gen5_check_format(tmp.src.pict_format)) {
 		DBG(("%s: unsupported source format, %x, use BLT\n",
 		     __FUNCTION__, tmp.src.pict_format));
-		goto fallback;
+		goto fallback_blt;
 	}
 
 	DBG(("%s (%d, %d)->(%d, %d) x %d\n",
@@ -2798,17 +2784,66 @@ fallback:
 	tmp.dst.pixmap = dst;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
+	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.bo = dst_bo;
-	tmp.dst.x = dst_dx;
-	tmp.dst.y = dst_dy;
 
-	tmp.src.bo = src_bo;
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
+
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
-	tmp.src.card_format =
-		gen5_get_card_format(tmp.src.pict_format);
-	tmp.src.width  = src->drawable.width;
-	tmp.src.height = src->drawable.height;
+	tmp.src.card_format = gen5_get_card_format(tmp.src.pict_format);
+	if (too_large(src->drawable.width, src->drawable.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+
+		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
+					       extents.x1 + src_dx,
+					       extents.y1 + src_dy,
+					       extents.x2 - extents.x1,
+					       extents.y2 - extents.y1))
+			goto fallback_tiled_dst;
+	} else {
+		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.width  = src->drawable.width;
+		tmp.src.height = src->drawable.height;
+		tmp.src.offset[0] = tmp.src.offset[1] = 0;
+		tmp.src.scale[0] = 1.f/src->drawable.width;
+		tmp.src.scale[1] = 1.f/src->drawable.height;
+	}
 
 	tmp.is_affine = TRUE;
 	tmp.floats_per_vertex = 3;
@@ -2819,24 +2854,19 @@ fallback:
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
-			goto fallback;
+			goto fallback_tiled_src;
 	}
 
-	if (kgem_bo_is_dirty(src_bo)) {
-		if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
-		    sna_blt_copy_boxes(sna, alu,
-				       src_bo, src_dx, src_dy,
-				       dst_bo, dst_dx, dst_dy,
-				       dst->drawable.bitsPerPixel,
-				       box, n))
-			return TRUE;
-	}
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	src_dx += tmp.src.offset[0];
+	src_dy += tmp.src.offset[1];
 
 	gen5_copy_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
 
-	tmp.src.scale[0] = 1.f/src->drawable.width;
-	tmp.src.scale[1] = 1.f/src->drawable.height;
 	do {
 		int n_this_time = gen5_get_rectangles(sna, &tmp, n);
 		if (n_this_time == 0) {
@@ -2850,15 +2880,15 @@ fallback:
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			OUT_VERTEX(box->x2, box->y2);
+			OUT_VERTEX(box->x2 + dst_dx, box->y2 + dst_dy);
 			OUT_VERTEX_F((box->x2 + src_dx) * tmp.src.scale[0]);
 			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
 
-			OUT_VERTEX(box->x1, box->y2);
+			OUT_VERTEX(box->x1 + dst_dx, box->y2 + dst_dy);
 			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
 			OUT_VERTEX_F((box->y2 + src_dy) * tmp.src.scale[1]);
 
-			OUT_VERTEX(box->x1, box->y1);
+			OUT_VERTEX(box->x1 + dst_dx, box->y1 + dst_dy);
 			OUT_VERTEX_F((box->x1 + src_dx) * tmp.src.scale[0]);
 			OUT_VERTEX_F((box->y1 + src_dy) * tmp.src.scale[1]);
 
@@ -2867,7 +2897,20 @@ fallback:
 	} while (n);
 
 	gen5_vertex_flush(sna);
+	sna_render_composite_redirect_done(sna, &tmp);
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return TRUE;
+
+fallback_tiled_src:
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+fallback_tiled_dst:
+	if (tmp.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
@@ -2916,8 +2959,7 @@ gen5_render_copy(struct sna *sna, uint8_t alu,
 {
 	DBG(("%s (alu=%d)\n", __FUNCTION__, alu));
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
-	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
+	if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
 			 dst->drawable.bitsPerPixel,
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 9f799ef..08f9668 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -229,6 +229,11 @@ static const struct formatinfo {
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
 
+static inline bool too_large(int width, int height)
+{
+	return width > GEN6_MAX_SIZE || height > GEN6_MAX_SIZE;
+}
+
 static uint32_t gen6_get_blend(int op,
 			       bool has_component_alpha,
 			       uint32_t dst_format)
@@ -708,6 +713,9 @@ gen6_emit_drawing_rectangle(struct sna *sna,
 	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
 	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
 
+	assert(!too_large(op->dst.x, op->dst.y));
+	assert(!too_large(op->dst.width, op->dst.height));
+
 	if  (sna->render_state.gen6.drawrect_limit  == limit &&
 	     sna->render_state.gen6.drawrect_offset == offset)
 		return false;
@@ -2061,11 +2069,6 @@ gen6_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static inline bool too_large(int width, int height)
-{
-	return width > GEN6_MAX_SIZE || height > GEN6_MAX_SIZE;
-}
-
 static int
 gen6_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -3082,13 +3085,22 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
 }
 
+static bool prefer_blt_bo(struct sna *sna,
+			  PixmapPtr pixmap,
+			  struct kgem_bo *bo)
+{
+	return (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
+		untiled_tlb_miss(bo)) &&
+		kgem_bo_can_blt(&sna->kgem, bo);
+}
+
 static inline bool prefer_blt_copy(struct sna *sna,
-				   struct kgem_bo *src_bo,
-				   struct kgem_bo *dst_bo)
+				   PixmapPtr src, struct kgem_bo *src_bo,
+				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
-	return (prefer_blt_ring(sna) ||
-		untiled_tlb_miss(src_bo) ||
-		untiled_tlb_miss(dst_bo));
+	return (sna->kgem.ring != KGEM_RENDER ||
+		prefer_blt_bo(sna, src, src_bo) ||
+		prefer_blt_bo(sna, dst, dst_bo));
 }
 
 static inline bool
@@ -3148,7 +3160,7 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3160,26 +3172,15 @@ gen6_render_copy_boxes(struct sna *sna, uint8_t alu,
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    overlaps(src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
-		     box, n) ||
-	    too_large(src->drawable.width, src->drawable.height) ||
-	    too_large(dst->drawable.width, dst->drawable.height)) {
-fallback:
+		     box, n)) {
+fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return false;
 
-		if (sna_blt_copy_boxes_fallback(sna, alu,
+		return sna_blt_copy_boxes_fallback(sna, alu,
 						 src, src_bo, src_dx, src_dy,
 						 dst, dst_bo, dst_dx, dst_dy,
-						 box, n))
-			return true;
-
-		return false;
-#if 0
-		return sna_tiling_copy_boxes(sna,
-					     src, src_bo, src_dx, src_dy,
-					     dst, dst_bo, dst_dx, dst_dy,
-					     box, n);
-#endif
+						 box, n);
 	}
 
 	if (dst->drawable.depth == src->drawable.depth) {
@@ -3190,25 +3191,73 @@ fallback:
 		tmp.src.pict_format = sna_format_for_depth(src->drawable.depth);
 	}
 	if (!gen6_check_format(tmp.src.pict_format))
-		goto fallback;
+		goto fallback_blt;
 
 	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
 
 	tmp.dst.pixmap = dst;
-	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.bo = dst_bo;
-	tmp.dst.x = dst_dx;
-	tmp.dst.y = dst_dy;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
 
-	tmp.src.bo = src_bo;
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
 	tmp.src.card_format =
 		gen6_get_card_format(tmp.src.pict_format);
-	tmp.src.width  = src->drawable.width;
-	tmp.src.height = src->drawable.height;
+	if (too_large(src->drawable.width, src->drawable.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+
+		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
+					       extents.x1 + src_dx,
+					       extents.y1 + src_dy,
+					       extents.x2 - extents.x1,
+					       extents.y2 - extents.y1))
+			goto fallback_tiled_dst;
+	} else {
+		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.width  = src->drawable.width;
+		tmp.src.height = src->drawable.height;
+		tmp.src.offset[0] = tmp.src.offset[1] = 0;
+		tmp.src.scale[0] = 1.f/src->drawable.width;
+		tmp.src.scale[1] = 1.f/src->drawable.height;
+	}
 
 	tmp.mask.bo = NULL;
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
@@ -3229,10 +3278,17 @@ fallback:
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
-			goto fallback;
+			goto fallback_tiled_src;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	src_dx += tmp.src.offset[0];
+	src_dy += tmp.src.offset[1];
+
 	gen6_emit_copy_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
 
@@ -3256,9 +3312,9 @@ fallback:
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			v[0] = pack_2s(box->x2, box->y2);
-			v[3] = pack_2s(box->x1, box->y2);
-			v[6] = pack_2s(box->x1, box->y1);
+			v[0] = pack_2s(box->x2 + dst_dx, box->y2 + dst_dy);
+			v[3] = pack_2s(box->x1 + dst_dx, box->y2 + dst_dy);
+			v[6] = pack_2s(box->x1 + dst_dx, box->y1 + dst_dy);
 
 			v[1] = (box->x2 + src_dx) * tmp.src.scale[0];
 			v[7] = v[4] = (box->x1 + src_dx) * tmp.src.scale[0];
@@ -3272,7 +3328,20 @@ fallback:
 	} while (n);
 
 	gen6_vertex_flush(sna);
+	sna_render_composite_redirect_done(sna, &tmp);
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return TRUE;
+
+fallback_tiled_src:
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+fallback_tiled_dst:
+	if (tmp.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
@@ -3329,7 +3398,7 @@ gen6_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index e2486c6..5385a47 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -235,6 +235,11 @@ static const struct formatinfo {
 #define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y)
 #define OUT_VERTEX_F(v) vertex_emit(sna, v)
 
+static inline bool too_large(int width, int height)
+{
+	return width > GEN7_MAX_SIZE || height > GEN7_MAX_SIZE;
+}
+
 static uint32_t gen7_get_blend(int op,
 			       Bool has_component_alpha,
 			       uint32_t dst_format)
@@ -817,6 +822,9 @@ gen7_emit_drawing_rectangle(struct sna *sna,
 	uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1);
 	uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x;
 
+	assert(!too_large(op->dst.x, op->dst.y));
+	assert(!too_large(op->dst.width, op->dst.height));
+
 	if (sna->render_state.gen7.drawrect_limit == limit &&
 	    sna->render_state.gen7.drawrect_offset == offset)
 		return;
@@ -2124,11 +2132,6 @@ gen7_composite_solid_init(struct sna *sna,
 	return channel->bo != NULL;
 }
 
-static inline bool too_large(int width, int height)
-{
-	return width > GEN7_MAX_SIZE || height > GEN7_MAX_SIZE;
-}
-
 static int
 gen7_composite_picture(struct sna *sna,
 		       PicturePtr picture,
@@ -3130,13 +3133,22 @@ static inline bool untiled_tlb_miss(struct kgem_bo *bo)
 	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
 }
 
+static bool prefer_blt_bo(struct sna *sna,
+			  PixmapPtr pixmap,
+			  struct kgem_bo *bo)
+{
+	return (too_large(pixmap->drawable.width, pixmap->drawable.height) ||
+		untiled_tlb_miss(bo)) &&
+		kgem_bo_can_blt(&sna->kgem, bo);
+}
+
 static inline bool prefer_blt_copy(struct sna *sna,
-				   struct kgem_bo *src_bo,
-				   struct kgem_bo *dst_bo)
+				   PixmapPtr src, struct kgem_bo *src_bo,
+				   PixmapPtr dst, struct kgem_bo *dst_bo)
 {
 	return (sna->kgem.ring != KGEM_RENDER ||
-		untiled_tlb_miss(src_bo) ||
-		untiled_tlb_miss(dst_bo));
+		prefer_blt_bo(sna, src, src_bo) ||
+		prefer_blt_bo(sna, dst, dst_bo));
 }
 
 static inline bool
@@ -3196,7 +3208,7 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 		      dst_bo, dst_dx, dst_dy,
 		      box, n)));
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy_boxes(sna, alu,
 			       src_bo, src_dx, src_dy,
@@ -3208,17 +3220,15 @@ gen7_render_copy_boxes(struct sna *sna, uint8_t alu,
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    overlaps(src_bo, src_dx, src_dy,
 		     dst_bo, dst_dx, dst_dy,
-		     box, n) ||
-	    too_large(src->drawable.width, src->drawable.height) ||
-	    too_large(dst->drawable.width, dst->drawable.height)) {
-fallback:
+		     box, n)) {
+fallback_blt:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
-			return FALSE;
+			return false;
 
 		return sna_blt_copy_boxes_fallback(sna, alu,
-						   src, src_bo, src_dx, src_dy,
-						   dst, dst_bo, dst_dx, dst_dy,
-						   box, n);
+						 src, src_bo, src_dx, src_dy,
+						 dst, dst_bo, dst_dx, dst_dy,
+						 box, n);
 	}
 
 	if (dst->drawable.depth == src->drawable.depth) {
@@ -3229,25 +3239,73 @@ fallback:
 		tmp.src.pict_format = sna_format_for_depth(src->drawable.depth);
 	}
 	if (!gen7_check_format(tmp.src.pict_format))
-		goto fallback;
+		goto fallback_blt;
 
 	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
 
 	tmp.dst.pixmap = dst;
-	tmp.dst.x = tmp.dst.y = 0;
 	tmp.dst.width  = dst->drawable.width;
 	tmp.dst.height = dst->drawable.height;
 	tmp.dst.bo = dst_bo;
-	tmp.dst.x = dst_dx;
-	tmp.dst.y = dst_dy;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	sna_render_composite_redirect_init(&tmp);
+	if (too_large(tmp.dst.width, tmp.dst.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+		if (!sna_render_composite_redirect(sna, &tmp,
+						   extents.x1, extents.y1,
+						   extents.x2 - extents.x1,
+						   extents.y2 - extents.y1))
+			goto fallback_tiled;
+	}
 
-	tmp.src.bo = src_bo;
 	tmp.src.filter = SAMPLER_FILTER_NEAREST;
 	tmp.src.repeat = SAMPLER_EXTEND_NONE;
 	tmp.src.card_format =
 		gen7_get_card_format(tmp.src.pict_format);
-	tmp.src.width  = src->drawable.width;
-	tmp.src.height = src->drawable.height;
+	if (too_large(src->drawable.width, src->drawable.height)) {
+		BoxRec extents = box[0];
+		int i;
+
+		for (i = 1; i < n; i++) {
+			if (extents.x1 < box[i].x1)
+				extents.x1 = box[i].x1;
+			if (extents.y1 < box[i].y1)
+				extents.y1 = box[i].y1;
+
+			if (extents.x2 > box[i].x2)
+				extents.x2 = box[i].x2;
+			if (extents.y2 > box[i].y2)
+				extents.y2 = box[i].y2;
+		}
+
+		if (!sna_render_pixmap_partial(sna, src, src_bo, &tmp.src,
+					       extents.x1 + src_dx,
+					       extents.y1 + src_dy,
+					       extents.x2 - extents.x1,
+					       extents.y2 - extents.y1))
+			goto fallback_tiled_dst;
+	} else {
+		tmp.src.bo = kgem_bo_reference(src_bo);
+		tmp.src.width  = src->drawable.width;
+		tmp.src.height = src->drawable.height;
+		tmp.src.offset[0] = tmp.src.offset[1] = 0;
+		tmp.src.scale[0] = 1.f/src->drawable.width;
+		tmp.src.scale[1] = 1.f/src->drawable.height;
+	}
 
 	tmp.mask.bo = NULL;
 	tmp.mask.filter = SAMPLER_FILTER_NEAREST;
@@ -3259,7 +3317,7 @@ fallback:
 	tmp.has_component_alpha = 0;
 	tmp.need_magic_ca_pass = 0;
 
-	tmp.u.gen7.wm_kernel = GEN7_WM_KERNEL_NOMASK;
+	tmp.u.gen7.wm_kernel = GEN6_WM_KERNEL_NOMASK;
 	tmp.u.gen7.nr_surfaces = 2;
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
@@ -3268,10 +3326,17 @@ fallback:
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
 		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
-			goto fallback;
+			goto fallback_tiled_src;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
+	dst_dx += tmp.dst.x;
+	dst_dy += tmp.dst.y;
+	tmp.dst.x = tmp.dst.y = 0;
+
+	src_dx += tmp.src.offset[0];
+	src_dy += tmp.src.offset[1];
+
 	gen7_emit_copy_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
 
@@ -3295,9 +3360,9 @@ fallback:
 			     box->x1 + src_dx, box->y1 + src_dy,
 			     box->x1 + dst_dx, box->y1 + dst_dy,
 			     box->x2 - box->x1, box->y2 - box->y1));
-			v[0] = pack_2s(box->x2, box->y2);
-			v[3] = pack_2s(box->x1, box->y2);
-			v[6] = pack_2s(box->x1, box->y1);
+			v[0] = pack_2s(box->x2 + dst_dx, box->y2 + dst_dy);
+			v[3] = pack_2s(box->x1 + dst_dx, box->y2 + dst_dy);
+			v[6] = pack_2s(box->x1 + dst_dx, box->y1 + dst_dy);
 
 			v[1] = (box->x2 + src_dx) * tmp.src.scale[0];
 			v[7] = v[4] = (box->x1 + src_dx) * tmp.src.scale[0];
@@ -3311,7 +3376,20 @@ fallback:
 	} while (n);
 
 	gen7_vertex_flush(sna);
+	sna_render_composite_redirect_done(sna, &tmp);
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
 	return TRUE;
+
+fallback_tiled_src:
+	kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+fallback_tiled_dst:
+	if (tmp.redirect.real_bo)
+		kgem_bo_destroy(&sna->kgem, tmp.dst.bo);
+fallback_tiled:
+	return sna_tiling_copy_boxes(sna, alu,
+				     src, src_bo, src_dx, src_dy,
+				     dst, dst_bo, dst_dx, dst_dy,
+				     box, n);
 }
 
 static void
@@ -3368,7 +3446,7 @@ gen7_render_copy(struct sna *sna, uint8_t alu,
 	     src->drawable.width, src->drawable.height,
 	     dst->drawable.width, dst->drawable.height));
 
-	if (prefer_blt_copy(sna, src_bo, dst_bo) &&
+	if (prefer_blt_copy(sna, src, src_bo, dst, dst_bo) &&
 	    sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
 	    sna_blt_copy(sna, alu,
 			 src_bo, dst_bo,
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d2580e6..757bad1 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -678,9 +678,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		 * disable dual-stream mode */
 		kgem->min_alignment = 64;
 
-	kgem->max_object_size = kgem->aperture_total / 2;
-	kgem->max_cpu_size = kgem->aperture_total / 2;
-	kgem->max_gpu_size = kgem->aperture_total / 2;
+	kgem->max_object_size = 2 * kgem->aperture_total / 3;
+	kgem->max_cpu_size = kgem->max_object_size;
+	kgem->max_gpu_size = kgem->max_object_size;
 	if (!kgem->has_llc)
 		kgem->max_gpu_size = MAX_CACHE_SIZE;
 	if (gen < 40) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 974a716..b6930e0 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -379,18 +379,10 @@ static inline int kgem_buffer_size(struct kgem_bo *bo)
 	return bo->size.bytes;
 }
 
-static inline bool kgem_bo_can_blt(struct kgem *kgem,
-				   struct kgem_bo *bo)
+static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
+					   struct kgem_bo *bo)
 {
-	int pitch;
-
-	if (bo->tiling == I915_TILING_Y) {
-		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
-		     __FUNCTION__, bo->handle));
-		return false;
-	}
-
-	pitch = bo->pitch;
+	int pitch = bo->pitch;
 	if (kgem->gen >= 40 && bo->tiling)
 		pitch /= 4;
 	if (pitch > MAXSHORT) {
@@ -402,6 +394,18 @@ static inline bool kgem_bo_can_blt(struct kgem *kgem,
 	return true;
 }
 
+static inline bool kgem_bo_can_blt(struct kgem *kgem,
+				   struct kgem_bo *bo)
+{
+	if (bo->tiling == I915_TILING_Y) {
+		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
+		     __FUNCTION__, bo->handle));
+		return false;
+	}
+
+	return kgem_bo_blt_pitch_is_ok(kgem, bo);
+}
+
 static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 				       struct kgem_bo *bo)
 {
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 9f51028..a7ea95c 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -2140,10 +2140,10 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	    !kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
 		_kgem_submit(kgem);
 		if (!kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL))
-			return sna_tiling_copy_boxes(sna, alu,
-						     src_bo, src_dx, src_dy,
-						     dst_bo, dst_dx, dst_dy,
-						     bpp, box, nbox);
+			return sna_tiling_blt_copy_boxes(sna, alu,
+							 src_bo, src_dx, src_dy,
+							 dst_bo, dst_dx, dst_dy,
+							 bpp, box, nbox);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -2244,7 +2244,8 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 	if (src_bo == dst_bo) {
 		DBG(("%s: dst == src\n", __FUNCTION__));
 
-		if (src_bo->tiling == I915_TILING_Y) {
+		if (src_bo->tiling == I915_TILING_Y &&
+		    kgem_bo_blt_pitch_is_ok(&sna->kgem, src_bo)) {
 			struct kgem_bo *bo;
 
 			DBG(("%s: src is Y-tiled\n", __FUNCTION__));
@@ -2287,7 +2288,8 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 				dst_bo = src_bo = bo;
 		}
 	} else {
-		if (src_bo->tiling == I915_TILING_Y) {
+		if (src_bo->tiling == I915_TILING_Y &&
+		    kgem_bo_blt_pitch_is_ok(&sna->kgem, src_bo)) {
 			DBG(("%s: src is y-tiled\n", __FUNCTION__));
 			assert(src_bo == sna_pixmap(src)->gpu_bo);
 			src_bo = sna_pixmap_change_tiling(src, I915_TILING_X);
@@ -2298,7 +2300,8 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
 			}
 		}
 
-		if (dst_bo->tiling == I915_TILING_Y) {
+		if (dst_bo->tiling == I915_TILING_Y &&
+		    kgem_bo_blt_pitch_is_ok(&sna->kgem, dst_bo)) {
 			DBG(("%s: dst is y-tiled\n", __FUNCTION__));
 			assert(dst_bo == sna_pixmap(dst)->gpu_bo);
 			dst_bo = sna_pixmap_change_tiling(dst, I915_TILING_X);
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index eb5df9d..62a8962 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -593,7 +593,7 @@ fallback:
 			int step;
 
 tile:
-			step = MIN(sna->render.max_3d_size,
+			step = MIN(sna->render.max_3d_size - 4096 / dst->drawable.bitsPerPixel,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
 			while (step * step * 4 > sna->kgem.max_upload_tile_size)
 				step /= 2;
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index fc6e6df..bc8b2de 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -805,6 +805,80 @@ cleanup_tmp:
 	return ret;
 }
 
+bool
+sna_render_pixmap_partial(struct sna *sna,
+			  PixmapPtr pixmap,
+			  struct kgem_bo *bo,
+			  struct sna_composite_channel *channel,
+			  int16_t x, int16_t y,
+			  int16_t w, int16_t h)
+{
+	BoxRec box;
+	int tile_width, tile_height, tile_size;
+	int offset;
+
+	DBG(("%s (%d, %d)x(%d, %d)\n", __FUNCTION__, x, y, w, h));
+
+	if (bo->pitch > sna->render.max_3d_pitch)
+		return false;
+
+	box.x1 = x;
+	box.y1 = y;
+	box.x2 = x + w;
+	box.y2 = y + h;
+
+	if (box.x1 < 0)
+		box.x1 = 0;
+	if (box.y1 < 0)
+		box.y1 = 0;
+	if (box.x2 > pixmap->drawable.width)
+		box.x2 = pixmap->drawable.width;
+	if (box.y2 > pixmap->drawable.height)
+		box.y2 = pixmap->drawable.height;
+
+	kgem_get_tile_size(&sna->kgem, bo->tiling,
+			   &tile_width, &tile_height, &tile_size);
+
+	/* Ensure we align to an even tile row */
+	box.y1 = box.y1 & ~(2*tile_height - 1);
+	box.y2 = ALIGN(box.y2, 2*tile_height);
+	if (box.y2 > pixmap->drawable.height)
+		box.y2 = pixmap->drawable.height;
+
+	box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
+	box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
+	if (box.x2 > pixmap->drawable.width)
+		box.x2 = pixmap->drawable.width;
+
+	w = box.x2 - box.x1;
+	h = box.y2 - box.y1;
+	DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
+	     box.x1, box.y1, box.x2, box.y2, w, h,
+	     pixmap->drawable.width, pixmap->drawable.height));
+	if (w <= 0 || h <= 0 ||
+	    w > sna->render.max_3d_size ||
+	    h > sna->render.max_3d_size)
+		return false;
+
+	/* How many tiles across are we? */
+	offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+	channel->bo = kgem_create_proxy(bo,
+					box.y1 * bo->pitch + offset,
+					h * bo->pitch);
+	if (channel->bo == NULL)
+		return false;
+
+	channel->bo->pitch = bo->pitch;
+
+	channel->offset[0] = x - box.x1;
+	channel->offset[1] = y - box.y1;
+	channel->scale[0] = 1.f/w;
+	channel->scale[1] = 1.f/h;
+	channel->width  = w;
+	channel->height = h;
+	return true;
+}
+
 static int
 sna_render_picture_partial(struct sna *sna,
 			   PicturePtr picture,
@@ -1068,13 +1142,25 @@ sna_render_picture_extract(struct sna *sna,
 						       I915_TILING_X, w, h,
 						       pixmap->drawable.bitsPerPixel),
 				    0);
-		if (bo && !sna_blt_copy_boxes(sna, GXcopy,
-					src_bo, 0, 0,
-					bo, -box.x1, -box.y1,
-					pixmap->drawable.bitsPerPixel,
-					&box, 1)) {
-			kgem_bo_destroy(&sna->kgem, bo);
-			bo = NULL;
+		if (bo) {
+			PixmapRec tmp;
+
+			tmp.drawable.width  = w;
+			tmp.drawable.height = h;
+			tmp.drawable.depth  = pixmap->drawable.depth;
+			tmp.drawable.bitsPerPixel = pixmap->drawable.bitsPerPixel;
+			tmp.devPrivate.ptr = NULL;
+
+			assert(tmp.drawable.width);
+			assert(tmp.drawable.height);
+
+			if (!sna->render.copy_boxes(sna, GXcopy,
+						    pixmap, src_bo, 0, 0,
+						    &tmp, bo, -box.x1, -box.y1,
+						    &box, 1)) {
+				kgem_bo_destroy(&sna->kgem, bo);
+				bo = NULL;
+			}
 		}
 	}
 
@@ -1541,7 +1627,6 @@ sna_render_composite_redirect(struct sna *sna,
 {
 	struct sna_composite_redirect *t = &op->redirect;
 	int bpp = op->dst.pixmap->drawable.bitsPerPixel;
-	struct sna_pixmap *priv;
 	struct kgem_bo *bo;
 
 #if NO_REDIRECT
@@ -1554,11 +1639,9 @@ sna_render_composite_redirect(struct sna *sna,
 	if (!width || !height)
 		return FALSE;
 
-	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-	if (priv == NULL) {
-		DBG(("%s: fallback -- no GPU bo attached\n", __FUNCTION__));
+	if (width  > sna->render.max_3d_pitch ||
+	    height > sna->render.max_3d_pitch)
 		return FALSE;
-	}
 
 	if (op->dst.bo->pitch <= sna->render.max_3d_pitch) {
 		int tile_width, tile_height, tile_size;
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 94c2744..a689315 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -507,10 +507,16 @@ Bool sna_tiling_fill_boxes(struct sna *sna,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
+
 Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
-			   struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
-			   struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-			   int bpp, const BoxRec *box, int nbox);
+			   PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			   PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			   const BoxRec *box, int n);
+
+Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			       int bpp, const BoxRec *box, int nbox);
 
 Bool sna_blt_composite(struct sna *sna,
 		       uint32_t op,
@@ -589,6 +595,14 @@ sna_render_pixmap_bo(struct sna *sna,
 		     int16_t w, int16_t h,
 		     int16_t dst_x, int16_t dst_y);
 
+bool
+sna_render_pixmap_partial(struct sna *sna,
+			  PixmapPtr pixmap,
+			  struct kgem_bo *bo,
+			  struct sna_composite_channel *channel,
+			  int16_t x, int16_t y,
+			  int16_t w, int16_t h);
+
 int
 sna_render_picture_extract(struct sna *sna,
 			   PicturePtr picture,
@@ -614,6 +628,13 @@ sna_render_picture_convert(struct sna *sna,
 			   int16_t w, int16_t h,
 			   int16_t dst_x, int16_t dst_y);
 
+inline static void sna_render_composite_redirect_init(struct sna_composite_op *op)
+{
+	struct sna_composite_redirect *t = &op->redirect;
+	t->real_bo = NULL;
+	t->damage = NULL;
+}
+
 Bool
 sna_render_composite_redirect(struct sna *sna,
 			      struct sna_composite_op *op,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 702192a..00e111c 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -421,10 +421,10 @@ done:
 	return ret;
 }
 
-Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
-			   struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
-			   struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
-			   int bpp, const BoxRec *box, int nbox)
+Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu,
+			       struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			       struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			       int bpp, const BoxRec *box, int nbox)
 {
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
@@ -516,3 +516,125 @@ done:
 	pixman_region_fini(&region);
 	return ret;
 }
+
+static Bool
+box_intersect(BoxPtr a, const BoxRec *b)
+{
+	if (a->x1 < b->x1)
+		a->x1 = b->x1;
+	if (a->x2 > b->x2)
+		a->x2 = b->x2;
+	if (a->y1 < b->y1)
+		a->y1 = b->y1;
+	if (a->y2 > b->y2)
+		a->y2 = b->y2;
+
+	return a->x1 < a->x2 && a->y1 < a->y2;
+}
+
+Bool
+sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
+		      PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+		      PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+		      const BoxRec *box, int n)
+{
+	BoxRec extents, tile, stack[64], *clipped, *c;
+	PixmapRec p;
+	int i, step;
+	Bool ret = FALSE;
+
+	extents = box[0];
+	for (i = 1; i < n; i++) {
+		if (extents.x1 < box[i].x1)
+			extents.x1 = box[i].x1;
+		if (extents.y1 < box[i].y1)
+			extents.y1 = box[i].y1;
+
+		if (extents.x2 > box[i].x2)
+			extents.x2 = box[i].x2;
+		if (extents.y2 > box[i].y2)
+			extents.y2 = box[i].y2;
+	}
+
+	step = sna->render.max_3d_size - 4096 / dst->drawable.bitsPerPixel;
+	while (step * step * 4 > sna->kgem.max_upload_tile_size)
+		step /= 2;
+
+	DBG(("%s: tiling copy, using %dx%d tiles\n",
+	     __FUNCTION__, step, step));
+
+	if (n > ARRAY_SIZE(stack)) {
+		clipped = malloc(sizeof(BoxRec) * n);
+		if (clipped == NULL)
+			goto tiled_error;
+	} else
+		clipped = stack;
+
+	p.drawable.depth = src->drawable.depth;
+	p.drawable.bitsPerPixel = src->drawable.bitsPerPixel;
+	p.devPrivate.ptr = NULL;
+
+	for (tile.y1 = extents.y1; tile.y1 < extents.y2; tile.y1 = tile.y2) {
+		tile.y2 = tile.y1 + step;
+		if (tile.y2 > extents.y2)
+			tile.y2 = extents.y2;
+
+		for (tile.x1 = extents.x1; tile.x1 < extents.x2; tile.x1 = tile.x2) {
+			struct kgem_bo *tmp_bo;
+
+			tile.x2 = tile.x1 + step;
+			if (tile.x2 > extents.x2)
+				tile.x2 = extents.x2;
+
+			p.drawable.width  = tile.x2 - tile.x1;
+			p.drawable.height = tile.y2 - tile.y1;
+
+			tmp_bo = kgem_create_2d(&sna->kgem,
+						p.drawable.width,
+						p.drawable.height,
+						p.drawable.bitsPerPixel,
+						I915_TILING_X, 0);
+			if (!tmp_bo)
+				goto tiled_error;
+
+			c = clipped;
+			for (i = 0; i < n; i++) {
+				*c = box[i];
+				if (!box_intersect(c, &tile))
+					continue;
+
+				DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+				     __FUNCTION__,
+				     c->x1, c->y1,
+				     c->x2, c->y2,
+				     src_dx, src_dy,
+				     c->x1 - tile.x1,
+				     c->y1 - tile.y1));
+				c++;
+			}
+
+			if (c == clipped ||
+			    (sna->render.copy_boxes(sna, GXcopy,
+						    src, src_bo, src_dx, src_dy,
+						    &p, tmp_bo, -tile.x1, -tile.y1,
+						    clipped, c - clipped) &&
+			     sna->render.copy_boxes(sna, alu,
+						    &p, tmp_bo, -tile.x1, -tile.y1,
+						    dst, dst_bo, dst_dx, dst_dy,
+						    clipped, c - clipped)))
+				i = 1;
+
+			kgem_bo_destroy(&sna->kgem, tmp_bo);
+
+			if (!i)
+				goto tiled_error;
+		}
+	}
+
+	ret = TRUE;
+tiled_error:
+	if (clipped != stack)
+		free(clipped);
+
+	return ret;
+}
commit 4774c6b8331831e0c9f3b24f5f6e1b6ea399f628
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Feb 2 15:23:03 2012 +0000

    sna: Add a couple of sanity checks that the CPU drawable is on the CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 71d3f06..1d2b999 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1070,7 +1070,8 @@ done:
 		DBG(("%s: syncing CPU bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
-
+	assert(pixmap->devPrivate.ptr);
+	assert(pixmap->devKind);
 	return true;
 }
 
@@ -1534,6 +1535,8 @@ out:
 		DBG(("%s: syncing cpu bo\n", __FUNCTION__));
 		kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
 	}
+	assert(pixmap->devPrivate.ptr);
+	assert(pixmap->devKind);
 	return true;
 }
 
commit 418cd98db7c4a2886c9e310f3691eb6c77421dd7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 1 19:10:41 2012 +0000

    sna/gen6: Ring switching outweighs the benefits for cairo-traces
    
    At the moment, the jury is still out on whether freely switching rings
    for fills is a Good Idea. So make it easier to turn it on and off for
    testing.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index ec5412a..9f799ef 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -57,6 +57,8 @@
 #define NO_FILL_BOXES 0
 #define NO_CLEAR 0
 
+#define NO_RING_SWITCH 1
+
 #define GEN6_MAX_SIZE 8192
 
 static const uint32_t ps_kernel_nomask_affine[][4] = {
@@ -2215,6 +2217,11 @@ static bool prefer_blt_ring(struct sna *sna)
 	return sna->kgem.ring != KGEM_RENDER;
 }
 
+static bool can_switch_rings(struct sna *sna)
+{
+	return sna->kgem.has_semaphores && !NO_RING_SWITCH;
+}
+
 static bool
 is_solid(PicturePtr picture)
 {
@@ -2252,7 +2259,7 @@ try_blt(struct sna *sna,
 		return TRUE;
 	}
 
-	if (sna->kgem.has_semaphores) {
+	if (can_switch_rings(sna)) {
 		if (is_solid(src))
 			return TRUE;
 	}
@@ -3432,7 +3439,9 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
-	return sna->kgem.has_semaphores || prefer_blt_ring(sna) || untiled_tlb_miss(bo);
+	return (can_switch_rings(sna) ||
+		prefer_blt_ring(sna) ||
+		untiled_tlb_miss(bo));
 }
 
 static Bool
commit 2d0e7c7ecd7371ac7b5fe3f382fc5d04792f7019
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 1 14:52:56 2012 +0000

    sna: Search again for a just-large-enough mapping for inplace uploads
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b446558..d2580e6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -104,6 +104,8 @@ static inline void list_replace(struct list *old,
 #endif
 
 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
+#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE)
+
 #define MAX_GTT_VMA_CACHE 512
 #define MAX_CPU_VMA_CACHE INT16_MAX
 #define MAP_PRESERVE_TIME 10
@@ -3215,6 +3217,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			old = search_linear_cache(kgem, alloc, CREATE_CPU_MAP);
 		if (old == NULL)
 			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE | CREATE_CPU_MAP);
+		if (old == NULL)
+			old = search_linear_cache(kgem, NUM_PAGES(size), CREATE_INACTIVE | CREATE_CPU_MAP);
 		if (old) {
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
@@ -3290,6 +3294,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			}
 		}
 #endif
+		if (old == NULL)
+			old = search_linear_cache(kgem, NUM_PAGES(size),
+						  CREATE_INACTIVE | CREATE_GTT_MAP);
 		if (old) {
 			DBG(("%s: reusing handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
commit 55c7088f54655609fbb00106679a566b46ee8dba
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 1 01:33:52 2012 +0000

    sna: Add debugging code to verify damage extents of fallback paths
    
    After using the CPU, upload the damage and read back the pixels from the
    GPU bo and verify that the two are equivalent.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 40748ec..71d3f06 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -80,12 +80,68 @@
 #define ACCEL_POLY_FILL_ARC 1
 #define ACCEL_POLY_TEXT8 1
 #define ACCEL_POLY_TEXT16 1
+#define ACCEL_POLY_GLYPH 1
 #define ACCEL_IMAGE_TEXT8 1
 #define ACCEL_IMAGE_TEXT16 1
 #define ACCEL_IMAGE_GLYPH 1
-#define ACCEL_POLY_GLYPH 1
 #define ACCEL_PUSH_PIXELS 1
 
+#if 0
+static void __sna_fallback_flush(DrawablePtr d)
+{
+	PixmapPtr pixmap = get_drawable_pixmap(d);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_pixmap *priv;
+	BoxRec box;
+	PixmapPtr tmp;
+	int i, j;
+	char *src, *dst;
+
+	DBG(("%s: uploading CPU damage...\n", __FUNCTION__));
+	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
+	if (priv == NULL)
+		return;
+
+	DBG(("%s: downloading GPU damage...\n", __FUNCTION__));
+	if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
+		return;
+
+	box.x1 = box.y1 = 0;
+	box.x2 = pixmap->drawable.width;
+	box.y2 = pixmap->drawable.height;
+
+	tmp = fbCreatePixmap(pixmap->drawable.pScreen,
+			     pixmap->drawable.width,
+			     pixmap->drawable.height,
+			     pixmap->drawable.depth,
+			     0);
+
+	DBG(("%s: comparing with direct read...\n", __FUNCTION__));
+	sna_read_boxes(sna,
+		       priv->gpu_bo, 0, 0,
+		       tmp, 0, 0,
+		       &box, 1);
+
+	src = pixmap->devPrivate.ptr;
+	dst = tmp->devPrivate.ptr;
+	for (i = 0; i < tmp->drawable.height; i++) {
+		if (memcmp(src, dst, tmp->drawable.width * tmp->drawable.bitsPerPixel >> 3)) {
+			for (j = 0; src[j] == dst[j]; j++)
+				;
+			ErrorF("mismatch at (%d, %d)\n",
+			       8*j / tmp->drawable.bitsPerPixel, i);
+			abort();
+		}
+		src += pixmap->devKind;
+		dst += tmp->devKind;
+	}
+	fbDestroyPixmap(tmp);
+}
+#define FALLBACK_FLUSH(d) __sna_fallback_flush(d)
+#else
+#define FALLBACK_FLUSH(d)
+#endif
+
 static int sna_font_key;
 
 static const uint8_t copy_ROP[] = {
@@ -1453,8 +1509,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	}
 
 done:
-	if (flags & MOVE_WRITE && !DAMAGE_IS_ALL(priv->cpu_damage)) {
+	if (flags & MOVE_WRITE) {
 		DBG(("%s: applying cpu damage\n", __FUNCTION__));
+		assert(!DAMAGE_IS_ALL(priv->cpu_damage));
 		assert_pixmap_contains_box(pixmap, RegionExtents(region));
 		sna_damage_add(&priv->cpu_damage, region);
 		if (priv->gpu_bo &&
@@ -2982,6 +3039,7 @@ fallback:
 	DBG(("%s: fbPutImage(%d, %d, %d, %d)\n",
 	     __FUNCTION__, x, y, w, h));
 	fbPutImage(drawable, gc, depth, x, y, w, h, left, format, bits);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
@@ -3694,6 +3752,7 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				  src_x, src_y,
 				  width, height,
 				  dst_x, dst_y);
+		FALLBACK_FLUSH(dst);
 out:
 		RegionUninit(&region);
 		return ret;
@@ -4625,6 +4684,7 @@ fallback:
 
 	DBG(("%s: fbFillSpans\n", __FUNCTION__));
 	fbFillSpans(drawable, gc, n, pt, width, sorted);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
@@ -4663,6 +4723,7 @@ fallback:
 
 	DBG(("%s: fbSetSpans\n", __FUNCTION__));
 	fbSetSpans(drawable, gc, src, pt, width, n, sorted);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
@@ -5137,6 +5198,7 @@ fallback:
 	DBG(("%s: fbCopyPlane(%d, %d, %d, %d, %d,%d) %x\n",
 	     __FUNCTION__, src_x, src_y, w, h, dst_x, dst_y, (unsigned)bit));
 	ret = fbCopyPlane(src, dst, gc, src_x, src_y, w, h, dst_x, dst_y, bit);
+	FALLBACK_FLUSH(dst);
 out:
 	RegionUninit(&region);
 	return ret;
@@ -5336,6 +5398,7 @@ fallback:
 
 	DBG(("%s: fbPolyPoint\n", __FUNCTION__));
 	fbPolyPoint(drawable, gc, mode, n, pt);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
@@ -6389,6 +6452,7 @@ fallback:
 
 	DBG(("%s: fbPolyLine\n", __FUNCTION__));
 	fbPolyLine(drawable, gc, mode, n, pt);
+	FALLBACK_FLUSH(drawable);
 
 	gc->ops = (GCOps *)&sna_gc_ops;
 out:
@@ -7301,6 +7365,7 @@ fallback:
 
 	DBG(("%s: fbPolySegment\n", __FUNCTION__));
 	fbPolySegment(drawable, gc, n, seg);
+	FALLBACK_FLUSH(drawable);
 
 	gc->ops = (GCOps *)&sna_gc_ops;
 out:
@@ -7850,6 +7915,7 @@ fallback:
 
 	DBG(("%s: fbPolyRectangle\n", __FUNCTION__));
 	fbPolyRectangle(drawable, gc, n, r);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
@@ -8027,6 +8093,7 @@ fallback:
 
 	DBG(("%s -- fbPolyArc\n", __FUNCTION__));
 	fbPolyArc(drawable, gc, n, arc);
+	FALLBACK_FLUSH(drawable);
 
 	gc->ops = (GCOps *)&sna_gc_ops;
 out:
@@ -9792,6 +9859,7 @@ fallback:
 			}
 		} while (--n);
 	}
+	FALLBACK_FLUSH(draw);
 out:
 	RegionUninit(&region);
 }
@@ -10332,6 +10400,7 @@ force_fallback:
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 		fbPolyGlyphBlt(drawable, gc, x, y, n,
 			       info, FONTGLYPHS(gc->font));
+		FALLBACK_FLUSH(drawable);
 	}
 out:
 	RegionUninit(&region);
@@ -10420,6 +10489,7 @@ force_fallback:
 		DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 		fbPolyGlyphBlt(drawable, gc, x, y, n,
 			       info, FONTGLYPHS(gc->font));
+		FALLBACK_FLUSH(drawable);
 	}
 out:
 	RegionUninit(&region);
@@ -10517,6 +10587,7 @@ force_fallback:
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 		fbImageGlyphBlt(drawable, gc, x, y, n,
 				info, FONTGLYPHS(gc->font));
+		FALLBACK_FLUSH(drawable);
 	}
 out:
 	RegionUninit(&region);
@@ -10607,6 +10678,7 @@ force_fallback:
 		DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 		fbImageGlyphBlt(drawable, gc, x, y, n,
 				info, FONTGLYPHS(gc->font));
+		FALLBACK_FLUSH(drawable);
 	}
 out:
 	RegionUninit(&region);
@@ -10869,6 +10941,7 @@ fallback:
 
 	DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
 	fbImageGlyphBlt(drawable, gc, x, y, n, info, base);
+	FALLBACK_FLUSH(drawable);
 
 out:
 	RegionUninit(&region);
@@ -10943,6 +11016,7 @@ fallback:
 
 	DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
 	fbPolyGlyphBlt(drawable, gc, x, y, n, info, base);
+	FALLBACK_FLUSH(drawable);
 
 out:
 	RegionUninit(&region);
@@ -11123,6 +11197,7 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
 	DBG(("%s: fallback, fbPushPixels(%d, %d, %d %d)\n",
 	     __FUNCTION__, w, h, x, y));
 	fbPushPixels(gc, bitmap, drawable, w, h, x, y);
+	FALLBACK_FLUSH(drawable);
 out:
 	RegionUninit(&region);
 }
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index ab825aa..f52ecac 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -514,8 +514,7 @@ static void __sna_damage_reduce(struct sna_damage *damage)
 		damage->extents = region->extents;
 	}
 
-	if (free_boxes)
-		free(boxes);
+	free(free_boxes);
 
 done:
 	damage->mode = DAMAGE_ADD;
@@ -1048,6 +1047,7 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
 					       &damage->region,
 					       region);
 			damage->extents = damage->region.extents;
+			assert(pixman_region_not_empty(&damage->region));
 			return damage;
 		}
 
commit c8fc2cde53ef7aa011ec7c47e7fc5486de0651f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Feb 1 01:27:43 2012 +0000

    sna: Fill extents for ImageGlyphs
    
    The spec says to fill the characters boxes, which is what the hardware
    does. The implementation fills the extents instead. rxvt expects the
    former, emacs the latter. Overdraw is a nuisance, but less than leaving
    glyphs behind...
    
    Reported-by: walch.martin at web.de
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45438
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index a0ea54f..9df8cfd 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -359,7 +359,8 @@ to_sna_from_kgem(struct kgem *kgem)
 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
 #endif
 #define ALIGN(i,m)	(((i) + (m) - 1) & ~((m) - 1))
-#define MIN(a,b)	((a) < (b) ? (a) : (b))
+#define MIN(a,b)	((a) <= (b) ? (a) : (b))
+#define MAX(a,b)	((a) >= (b) ? (a) : (b))
 
 extern xf86CrtcPtr sna_covering_crtc(ScrnInfoPtr scrn,
 				     const BoxRec *box,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4148bdb..40748ec 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -10011,6 +10011,11 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	extents = REGION_RECTS(clip);
 	last_extents = extents + REGION_NUM_RECTS(clip);
 
+	if (bg != -1) /* emulate miImageGlyphBlt */
+		sna_blt_fill_boxes(sna, GXcopy,
+				   bo, drawable->bitsPerPixel,
+				   bg, extents, REGION_NUM_RECTS(clip));
+
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	if (!kgem_check_batch(&sna->kgem, 16) ||
 	    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
@@ -10174,6 +10179,8 @@ sna_glyph_extents(FontPtr font,
 
 		extents->overallWidth += p->metrics.characterWidth;
 	}
+
+	assert(extents->overallWidth > 0);
 }
 
 static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
@@ -10458,10 +10465,17 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 		return;
 
 	sna_glyph_extents(gc->font, info, n, &extents);
-	region.extents.x1 = x + extents.overallLeft;
-	region.extents.y1 = y - extents.overallAscent;
-	region.extents.x2 = x + extents.overallRight;
-	region.extents.y2 = y + extents.overallDescent;
+	region.extents.x1 = x + MIN(0, extents.overallLeft);
+	region.extents.y1 = y - extents.fontAscent;
+	region.extents.x2 = x + MAX(extents.overallWidth, extents.overallRight);
+	region.extents.y2 = y + extents.fontDescent;
+
+	DBG(("%s: count=%ld/%d, extents=(left=%d, right=%d, width=%d, ascent=%d, descent=%d), box=(%d, %d), (%d, %d)\n",
+	     __FUNCTION__, n, count,
+	     extents.overallLeft, extents.overallRight, extents.overallWidth,
+	     extents.fontAscent, extents.fontDescent,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
 	translate_box(&region.extents, drawable);
 	clip_box(&region.extents, gc);
@@ -10473,6 +10487,11 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (!RegionNotEmpty(&region))
 		return;
 
+	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
 	if (FORCE_FALLBACK)
 		goto force_fallback;
 
@@ -10535,10 +10554,17 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 		return;
 
 	sna_glyph_extents(gc->font, info, n, &extents);
-	region.extents.x1 = x + extents.overallLeft;
-	region.extents.y1 = y - extents.overallAscent;
-	region.extents.x2 = x + extents.overallRight;
-	region.extents.y2 = y + extents.overallDescent;
+	region.extents.x1 = x + MIN(0, extents.overallLeft);
+	region.extents.y1 = y - extents.fontAscent;
+	region.extents.x2 = x + MAX(extents.overallWidth, extents.overallRight);
+	region.extents.y2 = y + extents.fontDescent;
+
+	DBG(("%s: count=%ld/%d, extents=(left=%d, right=%d, width=%d, ascent=%d, descent=%d), box=(%d, %d), (%d, %d)\n",
+	     __FUNCTION__, n, count,
+	     extents.overallLeft, extents.overallRight, extents.overallWidth,
+	     extents.fontAscent, extents.fontDescent,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
 	translate_box(&region.extents, drawable);
 	clip_box(&region.extents, gc);
@@ -10550,6 +10576,11 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (!RegionNotEmpty(&region))
 		return;
 
+	DBG(("%s: clipped extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
 	if (FORCE_FALLBACK)
 		goto force_fallback;
 
@@ -10625,6 +10656,11 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	extents = REGION_RECTS(clip);
 	last_extents = extents + REGION_NUM_RECTS(clip);
 
+	if (bg != -1) /* emulate miImageGlyphBlt */
+		sna_blt_fill_boxes(sna, GXcopy,
+				   bo, drawable->bitsPerPixel,
+				   bg, extents, REGION_NUM_RECTS(clip));
+
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	if (!kgem_check_batch(&sna->kgem, 16) ||
 	    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
@@ -10775,11 +10811,18 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	if (n == 0)
 		return;
 
-	QueryGlyphExtents(gc->font, info, n, &extents);
-	region.extents.x1 = x + extents.overallLeft;
-	region.extents.y1 = y - extents.overallAscent;
-	region.extents.x2 = x + extents.overallRight;
-	region.extents.y2 = y + extents.overallDescent;
+	sna_glyph_extents(gc->font, info, n, &extents);
+	region.extents.x1 = x + MIN(0, extents.overallLeft);
+	region.extents.y1 = y - extents.fontAscent;
+	region.extents.x2 = x + MAX(extents.overallWidth, extents.overallRight);
+	region.extents.y2 = y + extents.fontDescent;
+
+	DBG(("%s: count=%d, extents=(left=%d, right=%d, width=%d, ascent=%d, descent=%d), box=(%d, %d), (%d, %d)\n",
+	     __FUNCTION__, n,
+	     extents.overallLeft, extents.overallRight, extents.overallWidth,
+	     extents.fontAscent, extents.fontDescent,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 
 	translate_box(&region.extents, drawable);
 	clip_box(&region.extents, gc);
@@ -10847,7 +10890,7 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	if (n == 0)
 		return;
 
-	QueryGlyphExtents(gc->font, info, n, &extents);
+	sna_glyph_extents(gc->font, info, n, &extents);
 	region.extents.x1 = x + extents.overallLeft;
 	region.extents.y1 = y - extents.overallAscent;
 	region.extents.x2 = x + extents.overallRight;
commit 13508ab5ea136caca90c846ff1026c0c1acd2ad5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 22:38:46 2012 +0000

    sna: PolyGlyph supports all of fill/tile/stipple rules
    
    The hw routines only directly supports solid fill so fallback for the
    interesting cases. An alternative would be to investigate using the
    miPolyGlyph routine to convert the weird fills into spans in order to
    fallback. Sounds cheaper to fallback, so wait for an actual use case.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index cccdd59..b446558 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -719,11 +719,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	DBG(("%s: large object thresold=%d\n",
 	     __FUNCTION__, kgem->large_object_size));
-	DBG(("%s: max object size (gpu=%d, cpu=%d, tile=%d)\n",
+	DBG(("%s: max object size (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n",
 	     __FUNCTION__,
-	     kgem->max_gpu_size,
-	     kgem->max_cpu_size,
-	     kgem->max_tile_size));
+	     kgem->max_gpu_size, kgem->max_cpu_size,
+	     kgem->max_upload_tile_size, kgem->max_copy_tile_size));
 
 	/* Convert the aperture thresholds to pages */
 	kgem->aperture_low /= PAGE_SIZE;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3115bd7..4148bdb 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9968,7 +9968,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	      int _x, int _y, unsigned int _n,
 	      CharInfoPtr *_info,
 	      RegionRec *clip,
-	      bool transparent)
+	      uint32_t fg, uint32_t bg)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
@@ -9979,10 +9979,10 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 	int16_t dx, dy;
 	uint32_t br00;
 
-	uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
+	uint8_t rop = bg == -1 ? copy_ROP[gc->alu] : ROP_S;
 
-	DBG(("%s (%d, %d) x %d, transparent? %d, alu=%d\n",
-	     __FUNCTION__, _x, _y, _n, transparent, rop));
+	DBG(("%s (%d, %d) x %d, fg=%08x, bg=%08x alu=%02x\n",
+	     __FUNCTION__, _x, _y, _n, fg, bg, rop));
 
 	if (wedged(sna)) {
 		DBG(("%s -- fallback, wedged\n", __FUNCTION__));
@@ -10025,7 +10025,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		b[0] |= BLT_DST_TILED;
 		b[1] >>= 2;
 	}
-	b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+	b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 	b[2] = extents->y1 << 16 | extents->x1;
 	b[3] = extents->y2 << 16 | extents->x2;
 	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10033,8 +10033,8 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			      I915_GEM_DOMAIN_RENDER |
 			      KGEM_RELOC_FENCED,
 			      0);
-	b[5] = gc->bgPixel;
-	b[6] = gc->fgPixel;
+	b[5] = bg;
+	b[6] = fg;
 	b[7] = 0;
 	sna->kgem.nbatch += 8;
 
@@ -10079,7 +10079,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 					b[0] |= BLT_DST_TILED;
 					b[1] >>= 2;
 				}
-				b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+				b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 				b[2] = extents->y1 << 16 | extents->x1;
 				b[3] = extents->y2 << 16 | extents->x2;
 				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10087,8 +10087,8 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 						      I915_GEM_DOMAIN_RENDER |
 						      KGEM_RELOC_FENCED,
 						      0);
-				b[5] = gc->bgPixel;
-				b[6] = gc->fgPixel;
+				b[5] = bg;
+				b[6] = fg;
 				b[7] = 0;
 				sna->kgem.nbatch += 8;
 			}
@@ -10269,6 +10269,7 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	ExtentInfoRec extents;
 	RegionRec region;
 	long unsigned i, n;
+	uint32_t fg;
 
 	if (drawable->depth < 8)
 		goto fallback;
@@ -10302,7 +10303,13 @@ sna_poly_text8(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_POLY_TEXT8)
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, true)) {
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		return false;
+
+	if (!gc_is_solid(gc, &fg))
+		goto force_fallback;
+
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10350,6 +10357,7 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	ExtentInfoRec extents;
 	RegionRec region;
 	long unsigned i, n;
+	uint32_t fg;
 
 	if (drawable->depth < 8)
 		goto fallback;
@@ -10383,7 +10391,13 @@ sna_poly_text16(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_POLY_TEXT16)
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, true)) {
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		return false;
+
+	if (!gc_is_solid(gc, &fg))
+		goto force_fallback;
+
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, fg, -1)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10465,7 +10479,10 @@ sna_image_text8(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_IMAGE_TEXT8)
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, false)) {
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		goto force_fallback;
+
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, gc->fgPixel, gc->bgPixel)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10539,7 +10556,10 @@ sna_image_text16(DrawablePtr drawable, GCPtr gc,
 	if (!ACCEL_IMAGE_TEXT16)
 		goto force_fallback;
 
-	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, false)) {
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		goto force_fallback;
+
+	if (!sna_glyph_blt(drawable, gc, x, y, n, info, &region, gc->fgPixel, gc->bgPixel)) {
 force_fallback:
 		DBG(("%s: fallback\n", __FUNCTION__));
 		gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
@@ -10580,14 +10600,14 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
 		       RegionPtr clip,
-		       bool transparent)
+		       uint32_t fg, uint32_t bg)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	const BoxRec *extents, *last_extents;
 	uint32_t *b;
 	int16_t dx, dy;
-	uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
+	uint8_t rop = bg == -1 ? copy_ROP[gc->alu] : ROP_S;
 
 	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
@@ -10619,7 +10639,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		b[0] |= BLT_DST_TILED;
 		b[1] >>= 2;
 	}
-	b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+	b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 	b[2] = extents->y1 << 16 | extents->x1;
 	b[3] = extents->y2 << 16 | extents->x2;
 	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
@@ -10627,8 +10647,8 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			      I915_GEM_DOMAIN_RENDER |
 			      KGEM_RELOC_FENCED,
 			      0);
-	b[5] = gc->bgPixel;
-	b[6] = gc->fgPixel;
+	b[5] = bg;
+	b[6] = fg;
 	b[7] = 0;
 	sna->kgem.nbatch += 8;
 
@@ -10672,7 +10692,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 					b[0] |= BLT_DST_TILED;
 					b[1] >>= 2;
 				}
-				b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
+				b[1] |= 1 << 30 | (bg == -1) << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 				b[2] = extents->y1 << 16 | extents->x1;
 				b[3] = extents->y2 << 16 | extents->x2;
 				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
@@ -10681,8 +10701,8 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 						      I915_GEM_DOMAIN_RENDER |
 						      KGEM_RELOC_FENCED,
 						      0);
-				b[5] = gc->bgPixel;
-				b[6] = gc->fgPixel;
+				b[5] = bg;
+				b[6] = fg;
 				b[7] = 0;
 				sna->kgem.nbatch += 8;
 			}
@@ -10786,9 +10806,13 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 		goto fallback;
 	}
 
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		goto fallback;
+
 	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
-				   bo, damage, &region, false))
+				   bo, damage, &region,
+				   gc->fgPixel, gc->bgPixel))
 		goto out;
 
 fallback:
@@ -10818,6 +10842,7 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	RegionRec region;
 	struct sna_damage **damage;
 	struct kgem_bo *bo;
+	uint32_t fg;
 
 	if (n == 0)
 		return;
@@ -10853,9 +10878,15 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 		goto fallback;
 	}
 
+	if (!PM_IS_SOLID(drawable, gc->planemask))
+		goto fallback;
+
+	if (!gc_is_solid(gc, &fg))
+		goto fallback;
+
 	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
-				   bo, damage, &region, true))
+				   bo, damage, &region, fg, -1))
 		goto out;
 
 fallback:
commit df4e1059a4e09998334dde6aa1c8ccfe76e442c0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 19:19:13 2012 +0000

    sna/gen6: Prefer to do fills using the BLT
    
    Using the BLT is substantially faster than the current shaders for solid
    fill. The downside is that it invokes more ring switching.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 93410b6..ec5412a 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2210,12 +2210,23 @@ gen6_composite_set_target(struct sna *sna,
 	return TRUE;
 }
 
+static bool prefer_blt_ring(struct sna *sna)
+{
+	return sna->kgem.ring != KGEM_RENDER;
+}
+
+static bool
+is_solid(PicturePtr picture)
+{
+	return sna_picture_is_solid(picture, NULL);
+}
+
 static Bool
 try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
-	if (sna->kgem.ring != KGEM_RENDER) {
+	if (prefer_blt_ring(sna)) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
 		return TRUE;
 	}
@@ -2241,15 +2252,12 @@ try_blt(struct sna *sna,
 		return TRUE;
 	}
 
-	return FALSE;
-}
+	if (sna->kgem.has_semaphores) {
+		if (is_solid(src))
+			return TRUE;
+	}
 
-static bool
-is_solid(PicturePtr picture)
-{
-	return  picture->pDrawable->width == 1 &&
-		picture->pDrawable->height == 1 &&
-		picture->repeat;
+	return FALSE;
 }
 
 static bool
@@ -3071,7 +3079,7 @@ static inline bool prefer_blt_copy(struct sna *sna,
 				   struct kgem_bo *src_bo,
 				   struct kgem_bo *dst_bo)
 {
-	return (sna->kgem.ring != KGEM_RENDER ||
+	return (prefer_blt_ring(sna) ||
 		untiled_tlb_miss(src_bo) ||
 		untiled_tlb_miss(dst_bo));
 }
@@ -3424,7 +3432,7 @@ gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op)
 static inline bool prefer_blt_fill(struct sna *sna,
 				   struct kgem_bo *bo)
 {
-	return sna->kgem.ring != KGEM_RENDER || untiled_tlb_miss(bo);
+	return sna->kgem.has_semaphores || prefer_blt_ring(sna) || untiled_tlb_miss(bo);
 }
 
 static Bool
commit 8b012de0a1af4ec97c3197af3f1efdcc67bc2118
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 20:29:58 2012 +0000

    sna/gen5: Always prefer to emit solid fills using the BLT
    
    As the BLT is far, far faster than using a shader.
    
    Improves cairo-demos/chart from 6 to 13 fps.
    
    Reported-by: Michael Larabel <Michael at phoronix.com>
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index dc1e720..03dc8c9 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -1992,12 +1992,6 @@ picture_is_cpu(PicturePtr picture)
 	if (!picture->pDrawable)
 		return FALSE;
 
-	/* If it is a solid, try to use the render paths */
-	if (picture->pDrawable->width == 1 &&
-	    picture->pDrawable->height == 1 &&
-	    picture->repeat)
-		return FALSE;
-
 	if (too_large(picture->pDrawable->width, picture->pDrawable->height))
 		return TRUE;
 
@@ -2009,7 +2003,7 @@ try_blt(struct sna *sna,
 	PicturePtr dst, PicturePtr src,
 	int width, int height)
 {
-	if (sna->kgem.mode == KGEM_BLT) {
+	if (sna->kgem.mode != KGEM_RENDER) {
 		DBG(("%s: already performing BLT\n", __FUNCTION__));
 		return TRUE;
 	}
@@ -2023,6 +2017,10 @@ try_blt(struct sna *sna,
 	if (too_large(dst->pDrawable->width, dst->pDrawable->height))
 		return TRUE;
 
+	/* The blitter is much faster for solids */
+	if (sna_picture_is_solid(src, NULL))
+		return TRUE;
+
 	/* is the source picture only in cpu memory e.g. a shm pixmap? */
 	return picture_is_cpu(src);
 }
@@ -2733,13 +2731,18 @@ gen5_copy_bind_surfaces(struct sna *sna,
 	gen5_emit_state(sna, op, offset);
 }
 
+static inline bool untiled_tlb_miss(struct kgem_bo *bo)
+{
+	return bo->tiling == I915_TILING_NONE && bo->pitch >= 4096;
+}
+
 static inline bool prefer_blt_copy(struct sna *sna,
 				   struct kgem_bo *src_bo,
 				   struct kgem_bo *dst_bo)
 {
-	return (src_bo->tiling == I915_TILING_NONE ||
-		dst_bo->tiling == I915_TILING_NONE ||
-		sna->kgem.mode == KGEM_BLT);
+	return (sna->kgem.ring != KGEM_RENDER ||
+		untiled_tlb_miss(src_bo) ||
+		untiled_tlb_miss(dst_bo));
 }
 
 static Bool
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index b6749ec..9f51028 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -682,7 +682,8 @@ sna_picture_is_solid(PicturePtr picture, uint32_t *color)
 	if (!is_solid(picture))
 		return FALSE;
 
-	*color = get_solid_color(picture, PICT_a8r8g8b8);
+	if (color)
+		*color = get_solid_color(picture, PICT_a8r8g8b8);
 	return TRUE;
 }
 
commit 0a748fc49d60dc2bc9494f95c4934592b111831a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 10:51:02 2012 +0000

    sna: Split the tiling limits between upload and copying
    
    The kernel has a bug that prevents pwriting buffers large than the
    aperture. Whilst waiting for the fix, limit the upload where possible to
    fit within that constraint.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 311bac4..cccdd59 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -692,9 +692,13 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (kgem->max_gpu_size > kgem->max_cpu_size)
 		kgem->max_gpu_size = kgem->max_cpu_size;
 
-	kgem->max_tile_size = MAX_CACHE_SIZE;
-	if (kgem->max_tile_size > kgem->max_gpu_size / 2)
-		kgem->max_tile_size = kgem->max_gpu_size / 2;
+	kgem->max_upload_tile_size = kgem->aperture_mappable / 2;
+	if (kgem->max_upload_tile_size > kgem->max_gpu_size / 2)
+		kgem->max_upload_tile_size = kgem->max_gpu_size / 2;
+
+	kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
+	if (kgem->max_copy_tile_size > kgem->max_gpu_size / 2)
+		kgem->max_copy_tile_size = kgem->max_gpu_size / 2;
 
 	totalram = total_ram_size();
 	if (totalram == 0) {
@@ -3197,9 +3201,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
-	if (alloc > kgem->max_tile_size)
+	if (alloc > MAX_CACHE_SIZE)
 		alloc = ALIGN(size, kgem->partial_buffer_size);
-	if (alloc > kgem->max_tile_size)
+	if (alloc > MAX_CACHE_SIZE)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
 	if (kgem->has_cpu_bo) {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 87dc386..974a716 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -161,7 +161,8 @@ struct kgem {
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_tile_size, max_gpu_size, max_cpu_size;
+	uint32_t max_upload_tile_size, max_copy_tile_size;
+	uint32_t max_gpu_size, max_cpu_size;
 	uint32_t large_object_size, max_object_size;
 	uint32_t partial_buffer_size;
 
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index f4278be..eb5df9d 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -61,7 +61,7 @@ box_intersect(BoxPtr a, const BoxRec *b)
 
 static inline bool upload_too_large(struct sna *sna, int width, int height)
 {
-	return width * height * 4 > sna->kgem.max_tile_size;
+	return width * height * 4 > sna->kgem.max_upload_tile_size;
 }
 
 static inline bool must_tile(struct sna *sna, int width, int height)
@@ -209,7 +209,7 @@ fallback:
 
 			step = MIN(sna->render.max_3d_size,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
-			while (step * step * 4 > sna->kgem.max_tile_size)
+			while (step * step * 4 > sna->kgem.max_upload_tile_size)
 				step /= 2;
 
 			DBG(("%s: tiling download, using %dx%d tiles\n",
@@ -595,7 +595,7 @@ fallback:
 tile:
 			step = MIN(sna->render.max_3d_size,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
-			while (step * step * 4 > sna->kgem.max_tile_size)
+			while (step * step * 4 > sna->kgem.max_upload_tile_size)
 				step /= 2;
 
 			DBG(("%s: tiling upload, using %dx%d tiles\n",
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index a3bf19d..702192a 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -144,7 +144,7 @@ sna_tiling_composite_done(struct sna *sna,
 	step = sna->render.max_3d_size;
 	if (tile->dst_x & (8*512 / tile->dst->pDrawable->bitsPerPixel - 1))
 		step /= 2;
-	while (step * step * 4 > sna->kgem.max_tile_size)
+	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
 
 	DBG(("%s -- %dx%d, count=%d, step size=%d\n", __FUNCTION__,
@@ -331,7 +331,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 	pixman_region_init_rects(&region, box, n);
 
 	step = sna->render.max_3d_size;
-	while (step * step * 4 > sna->kgem.max_tile_size)
+	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
 
 	DBG(("%s (op=%d, format=%x, color=(%04x,%04x,%04x, %04x), tile.size=%d, box=%dx[(%d, %d), (%d, %d)])\n",
@@ -444,7 +444,7 @@ Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
 	pixman_region_init_rects(&region, box, nbox);
 
 	step = sna->render.max_3d_size;
-	while (step * step * 4 > sna->kgem.max_tile_size)
+	while (step * step * 4 > sna->kgem.max_copy_tile_size)
 		step /= 2;
 
 	DBG(("%s (alu=%d), tile.size=%d, box=%dx[(%d, %d), (%d, %d)])\n",
commit 9c1f8a768ca1f762c722f63bab2747e4ff1fd773
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 10:29:02 2012 +0000

    sna: Avoid converting requested Y to X tiling for large pitches on gen4+
    
    The only strong requirement is that to utilize large pitches, the object
    must be tiled. Having it as X tiling is a pure convenience to facilitate
    use of the blitter. A DRI client may want to keep using Y tiling
    instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0b2e1d6..311bac4 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2151,7 +2151,10 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 		if (width*bpp > (MAXSHORT-512) * 8) {
 			DBG(("%s: large pitch [%d], forcing TILING_X\n",
 			     __FUNCTION__, width*bpp/8));
-			tiling = -I915_TILING_X;
+			if (tiling > 0)
+				tiling = -tiling;
+			else if (tiling == 0)
+				tiling = -I915_TILING_X;
 		} else if (tiling && (width|height) > 8192) {
 			DBG(("%s: large tiled buffer [%dx%d], forcing TILING_X\n",
 			     __FUNCTION__, width, height));
commit e872c1011fc7b67683703fd891234f07dd7acd04
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 10:21:05 2012 +0000

    sna/dri: We need to reduce tiling on older gen if we cannot fence
    
    Only apply the architectural limits to enable bo creation for DRI buffers.
    
    Reported-by: Alban Browaeys <prahal at yahoo.com>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45414
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_dri.c b/src/sna/sna_dri.c
index 2245c03..d0e19cf 100644
--- a/src/sna/sna_dri.c
+++ b/src/sna/sna_dri.c
@@ -127,19 +127,37 @@ static inline struct kgem_bo *ref(struct kgem_bo *bo)
 /* Prefer to enable TILING_Y if this buffer will never be a
  * candidate for pageflipping
  */
-static bool color_use_tiling_y(struct sna *sna, DrawablePtr drawable)
+static uint32_t color_tiling(struct sna *sna, DrawablePtr drawable)
 {
-	if (!COLOR_PREFER_TILING_Y)
-		return false;
+	uint32_t tiling;
 
-	return (drawable->width != sna->front->drawable.width ||
-		drawable->height != sna->front->drawable.height);
+	if (COLOR_PREFER_TILING_Y &&
+	    (drawable->width  != sna->front->drawable.width ||
+	     drawable->height != sna->front->drawable.height))
+		tiling = I915_TILING_Y;
+	else
+		tiling = I915_TILING_X;
+
+	return kgem_choose_tiling(&sna->kgem, -tiling,
+				  drawable->width,
+				  drawable->height,
+				  drawable->bitsPerPixel);
+}
+
+static uint32_t other_tiling(struct sna *sna, DrawablePtr drawable)
+{
+	/* XXX Can mix color X / depth Y? */
+	return kgem_choose_tiling(&sna->kgem, -I915_TILING_Y,
+				  drawable->width,
+				  drawable->height,
+				  drawable->bitsPerPixel);
 }
 
 static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 					  PixmapPtr pixmap)
 {
 	struct sna_pixmap *priv;
+	uint32_t tiling;
 
 	priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
 	if (priv == NULL)
@@ -148,9 +166,9 @@ static struct kgem_bo *sna_pixmap_set_dri(struct sna *sna,
 	if (priv->flush)
 		return ref(priv->gpu_bo);
 
-	if (priv->gpu_bo->tiling != I915_TILING_Y &&
-	    color_use_tiling_y(sna, &pixmap->drawable))
-		sna_pixmap_change_tiling(pixmap, I915_TILING_Y);
+	tiling = color_tiling(sna, &pixmap->drawable);
+	if (priv->gpu_bo->tiling != tiling)
+		sna_pixmap_change_tiling(pixmap, tiling);
 
 	/* We need to submit any modifications to and reads from this
 	 * buffer before we send any reply to the Client.
@@ -209,7 +227,7 @@ sna_dri_create_buffer(DrawablePtr drawable,
 				    drawable->width,
 				    drawable->height,
 				    drawable->bitsPerPixel,
-				    color_use_tiling_y(sna, drawable) ?  I915_TILING_Y : I915_TILING_X,
+				    color_tiling(sna, drawable),
 				    CREATE_EXACT);
 		break;
 
@@ -252,8 +270,7 @@ sna_dri_create_buffer(DrawablePtr drawable,
 		bpp = format ? format : drawable->bitsPerPixel,
 		bo = kgem_create_2d(&sna->kgem,
 				    drawable->width, drawable->height, bpp,
-				    //sna->kgem.gen >= 40 ? I915_TILING_Y : I915_TILING_X,
-				    I915_TILING_Y,
+				    other_tiling(sna, drawable),
 				    CREATE_EXACT);
 		break;
 
commit a4caf67d8da37d04f8915d96b10411ba7267937e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 00:35:42 2012 +0000

    sna: Trim tile sizes to fit into bo cache
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index ca7eafa..0b2e1d6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -692,13 +692,14 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (kgem->max_gpu_size > kgem->max_cpu_size)
 		kgem->max_gpu_size = kgem->max_cpu_size;
 
-	kgem->max_tile_size = kgem->aperture_total / 4;
+	kgem->max_tile_size = MAX_CACHE_SIZE;
 	if (kgem->max_tile_size > kgem->max_gpu_size / 2)
 		kgem->max_tile_size = kgem->max_gpu_size / 2;
 
 	totalram = total_ram_size();
 	if (totalram == 0) {
-		DBG(("%s: total ram size unknown, assuming maximum of total aperture\n"));
+		DBG(("%s: total ram size unknown, assuming maximum of total aperture\n",
+		     __FUNCTION__));
 		totalram = kgem->aperture_total;
 	}
 	if (kgem->max_object_size > totalram / 2)
@@ -3193,7 +3194,9 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
-	if (alloc > kgem->max_gpu_size)
+	if (alloc > kgem->max_tile_size)
+		alloc = ALIGN(size, kgem->partial_buffer_size);
+	if (alloc > kgem->max_tile_size)
 		alloc = PAGE_ALIGN(size);
 	alloc /= PAGE_SIZE;
 	if (kgem->has_cpu_bo) {
commit 3f7c1646c78d8854c88b214d3699e51839ba9711
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 31 00:09:42 2012 +0000

    sna: Check that the intermediate IO buffer can also be used for blitting
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index e35be97..3115bd7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -716,8 +716,8 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	} else {
 		struct sna_pixmap *priv;
 
-		DBG(("%s: creating GPU pixmap %dx%d, stride=%d\n",
-		     __FUNCTION__, width, height, pad));
+		DBG(("%s: creating GPU pixmap %dx%d, stride=%d, flags=%x\n",
+		     __FUNCTION__, width, height, pad, flags));
 
 		pixmap = create_pixmap(sna, screen, 0, 0, depth, usage);
 		if (pixmap == NullPixmap)
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 4f86f8d..f4278be 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -59,11 +59,16 @@ box_intersect(BoxPtr a, const BoxRec *b)
 	return a->x1 < a->x2 && a->y1 < a->y2;
 }
 
+static inline bool upload_too_large(struct sna *sna, int width, int height)
+{
+	return width * height * 4 > sna->kgem.max_tile_size;
+}
+
 static inline bool must_tile(struct sna *sna, int width, int height)
 {
 	return (width  > sna->render.max_3d_size ||
 		height > sna->render.max_3d_size ||
-		width * height * 4 > sna->kgem.max_tile_size);
+		upload_too_large(sna, width, height));
 }
 
 static void read_boxes_inplace(struct kgem *kgem,
@@ -118,6 +123,7 @@ void sna_read_boxes(struct sna *sna,
 	void *ptr;
 	int src_pitch, cpp, offset;
 	int n, cmd, br13;
+	bool can_blt;
 
 	DBG(("%s x %d, src=(handle=%d, offset=(%d,%d)), dst=(size=(%d, %d), offset=(%d,%d))\n",
 	     __FUNCTION__, nbox, src_bo->handle, src_dx, src_dy,
@@ -154,6 +160,7 @@ fallback:
 		return;
 	}
 
+	can_blt = kgem_bo_can_blt(kgem, src_bo);
 	extents = box[0];
 	for (n = 1; n < nbox; n++) {
 		if (box[n].x1 < extents.x1)
@@ -161,6 +168,9 @@ fallback:
 		if (box[n].x2 > extents.x2)
 			extents.x2 = box[n].x2;
 
+		if (can_blt)
+			can_blt = (box[n].x2 - box[n].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
+
 		if (box[n].y1 < extents.y1)
 			extents.y1 = box[n].y1;
 		if (box[n].y2 > extents.y2)
@@ -173,9 +183,8 @@ fallback:
 	}
 
 	/* Try to avoid switching rings... */
-	if (kgem->ring == KGEM_RENDER ||
-	    !kgem_bo_can_blt(kgem, src_bo) ||
-	    must_tile(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
+	if (!can_blt || kgem->ring == KGEM_RENDER ||
+	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
 		tmp.drawable.width  = extents.x2 - extents.x1;
@@ -531,6 +540,7 @@ bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 	void *ptr;
 	int offset;
 	int n, cmd, br13;
+	bool can_blt;
 
 	DBG(("%s x %d\n", __FUNCTION__, nbox));
 
@@ -542,6 +552,7 @@ fallback:
 					   box, nbox);
 	}
 
+	can_blt = kgem_bo_can_blt(kgem, dst_bo);
 	extents = box[0];
 	for (n = 1; n < nbox; n++) {
 		if (box[n].x1 < extents.x1)
@@ -549,6 +560,9 @@ fallback:
 		if (box[n].x2 > extents.x2)
 			extents.x2 = box[n].x2;
 
+		if (can_blt)
+			can_blt = (box[n].x2 - box[n].x1) * dst->drawable.bitsPerPixel < 8 * (MAXSHORT - 4);
+
 		if (box[n].y1 < extents.y1)
 			extents.y1 = box[n].y1;
 		if (box[n].y2 > extents.y2)
@@ -556,9 +570,8 @@ fallback:
 	}
 
 	/* Try to avoid switching rings... */
-	if (kgem->ring == KGEM_RENDER ||
-	    !kgem_bo_can_blt(kgem, dst_bo) ||
-	    must_tile(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
+	if (!can_blt || kgem->ring == KGEM_RENDER ||
+	    upload_too_large(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
 		tmp.drawable.width  = extents.x2 - extents.x1;
@@ -579,6 +592,7 @@ fallback:
 			BoxRec tile, stack[64], *clipped, *c;
 			int step;
 
+tile:
 			step = MIN(sna->render.max_3d_size,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
 			while (step * step * 4 > sna->kgem.max_tile_size)
@@ -693,7 +707,7 @@ fallback:
 			kgem_bo_destroy(&sna->kgem, src_bo);
 
 			if (!n)
-				goto fallback;
+				goto tile;
 		}
 
 		return true;
@@ -1069,9 +1083,13 @@ indirect_replace(struct sna *sna,
 	if ((int)pixmap->devKind * pixmap->drawable.height >> 12 > kgem->half_cpu_cache_pages)
 		return false;
 
-	if (bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) {
+	if (kgem->ring == KGEM_RENDER || !kgem_bo_can_blt(kgem, bo)) {
 		BoxRec box;
 
+		assert(!must_tile(sna,
+				  pixmap->drawable.width,
+				  pixmap->drawable.height));
+
 		src_bo = kgem_create_buffer_2d(kgem,
 					       pixmap->drawable.width,
 					       pixmap->drawable.height,
commit e504fab6c5354ae9d18ccefb10bd586fa49b924c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 23:49:18 2012 +0000

    sna: Discard the cleared GPU buffer upon PutImage to the CPU buffer
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 26fd1ab..e35be97 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2533,6 +2533,26 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, false))
 		return true;
 
+	if (priv->clear) {
+		DBG(("%s: applying clear [%08x]\n",
+		     __FUNCTION__, priv->clear_color));
+
+		pixman_fill(pixmap->devPrivate.ptr,
+			    pixmap->devKind/sizeof(uint32_t),
+			    pixmap->drawable.bitsPerPixel,
+			    0, 0,
+			    pixmap->drawable.width,
+			    pixmap->drawable.height,
+			    priv->clear_color);
+
+		sna_damage_all(&priv->cpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+		sna_pixmap_free_gpu(sna, priv);
+		priv->undamaged = false;
+		priv->clear = false;
+	}
+
 	if (!DAMAGE_IS_ALL(priv->cpu_damage)) {
 		DBG(("%s: marking damage\n", __FUNCTION__));
 		if (region_subsumes_drawable(region, &pixmap->drawable)) {
commit ed1c1a7468d78e99cb4f9a4a8b8a6b00c3257a75
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 13:43:59 2012 +0000

    sna: Track large objects and limit prefer-gpu hint to small objects
    
    As the GATT is irrespective of actual RAM size, we need to be careful
    not to be too generous when allocating GPU bo and their shadows. So
    first of all we limit default render targets to those small enough to
    fit comfortably in RAM alongside others, and secondly we try to only
    keep a single copy of large objects in memory.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/configure.ac b/configure.ac
index 63beb64..e953ae5 100644
--- a/configure.ac
+++ b/configure.ac
@@ -132,6 +132,7 @@ required_pixman_version=0.24
 if test "x$SNA" != "xno"; then
 	required_xorg_xserver_version=1.10
 	AC_DEFINE(USE_SNA, 1, [Enable SNA support])
+	AC_CHECK_HEADERS([sys/sysinfo.h])
 fi
 AC_MSG_RESULT([$SNA])
 
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 5ab5c83..ca7eafa 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -44,6 +44,10 @@
 #include <memcheck.h>
 #endif
 
+#if HAVE_SYS_SYSINFO_H
+#include <sys/sysinfo.h>
+#endif
+
 static struct kgem_bo *
 search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
@@ -498,6 +502,18 @@ agp_aperture_size(struct pci_device *dev, int gen)
 }
 
 static size_t
+total_ram_size(void)
+{
+#if HAVE_SYS_SYSINFO_H
+	struct sysinfo info;
+	if (sysinfo(&info) == 0)
+		return info.totalram * info.mem_unit;
+#endif
+
+	return 0;
+}
+
+static size_t
 cpu_cache_size(void)
 {
 	FILE *file = fopen("/proc/cpuinfo", "r");
@@ -556,6 +572,7 @@ static bool semaphores_enabled(void)
 void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 {
 	struct drm_i915_gem_get_aperture aperture;
+	size_t totalram;
 	unsigned int i, j;
 
 	memset(kgem, 0, sizeof(*kgem));
@@ -679,6 +696,24 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	if (kgem->max_tile_size > kgem->max_gpu_size / 2)
 		kgem->max_tile_size = kgem->max_gpu_size / 2;
 
+	totalram = total_ram_size();
+	if (totalram == 0) {
+		DBG(("%s: total ram size unknown, assuming maximum of total aperture\n"));
+		totalram = kgem->aperture_total;
+	}
+	if (kgem->max_object_size > totalram / 2)
+		kgem->max_object_size = totalram / 2;
+	if (kgem->max_cpu_size > totalram / 2)
+		kgem->max_cpu_size = totalram / 2;
+	if (kgem->max_gpu_size > totalram / 4)
+		kgem->max_gpu_size = totalram / 4;
+
+	kgem->large_object_size = MAX_CACHE_SIZE;
+	if (kgem->large_object_size > kgem->max_gpu_size)
+		kgem->large_object_size = kgem->max_gpu_size;
+
+	DBG(("%s: large object thresold=%d\n",
+	     __FUNCTION__, kgem->large_object_size));
 	DBG(("%s: max object size (gpu=%d, cpu=%d, tile=%d)\n",
 	     __FUNCTION__,
 	     kgem->max_gpu_size,
@@ -2179,83 +2214,40 @@ done:
 	return tiling;
 }
 
-bool kgem_can_create_2d(struct kgem *kgem,
-			 int width, int height, int depth)
+unsigned kgem_can_create_2d(struct kgem *kgem,
+			    int width, int height, int depth)
 {
 	int bpp = BitsPerPixel(depth);
 	uint32_t pitch, size;
+	unsigned flags = 0;
 
 	if (depth < 8 || kgem->wedged)
-		return false;
-
-	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp,
-				 I915_TILING_X, &pitch);
-	if (size > 0 && size <= kgem->max_object_size)
-		return true;
-
-	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp,
-				 I915_TILING_NONE, &pitch);
-	if (size > 0 && size <= kgem->max_object_size)
-		return true;
-
-	return false;
-}
-
-bool kgem_can_create_cpu(struct kgem *kgem,
-			 int width, int height, int bpp)
-{
-	uint32_t pitch, size;
-
-	if (bpp < 8 || kgem->wedged)
-		return false;
+		return 0;
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
-	DBG(("%s? %d, cpu size %d, max %d\n",
-	     __FUNCTION__,
-	     size > 0 && size <= kgem->max_cpu_size,
-	     size, kgem->max_cpu_size));
-	return size > 0 && size <= kgem->max_cpu_size;
-}
-
-static bool _kgem_can_create_gpu(struct kgem *kgem,
-				 int width, int height, int bpp)
-{
-	uint32_t pitch, size;
-
-	if (bpp < 8 || kgem->wedged)
-		return false;
+	if (size > 0 && size <= kgem->max_cpu_size)
+		flags |= KGEM_CAN_CREATE_CPU | KGEM_CAN_CREATE_GPU;
+	if (size > kgem->large_object_size)
+		flags |= KGEM_CAN_CREATE_LARGE;
+	if (size > kgem->max_object_size)
+		return 0;
 
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
-				 kgem_choose_tiling(kgem,
-						    I915_TILING_X,
+				 kgem_choose_tiling(kgem, I915_TILING_X,
 						    width, height, bpp),
 				 &pitch);
-	DBG(("%s? %d, gpu size %d, max %d\n",
-	     __FUNCTION__,
-	     size > 0 && size < kgem->max_gpu_size,
-	     size, kgem->max_gpu_size));
-	return size > 0 && size < kgem->max_gpu_size;
-}
+	if (size > 0 && size <= kgem->max_gpu_size)
+		flags |= KGEM_CAN_CREATE_GPU;
+	if (size > kgem->large_object_size)
+		flags |= KGEM_CAN_CREATE_LARGE;
+	if (size > kgem->max_object_size)
+		return 0;
 
-#if DEBUG_KGEM
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
-{
-	bool ret = _kgem_can_create_gpu(kgem, width, height, bpp);
-	DBG(("%s(%dx%d, bpp=%d) = %d\n", __FUNCTION__,
-	     width, height, bpp, ret));
-	return ret;
-}
-#else
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
-{
-	return _kgem_can_create_gpu(kgem, width, height, bpp);
+	return flags;
 }
-#endif
 
 inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
 {
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index fea2d45..87dc386 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -161,7 +161,8 @@ struct kgem {
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_tile_size, max_gpu_size, max_cpu_size, max_object_size;
+	uint32_t max_tile_size, max_gpu_size, max_cpu_size;
+	uint32_t large_object_size, max_object_size;
 	uint32_t partial_buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
@@ -201,9 +202,10 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
-bool kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
-bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp);
-bool kgem_can_create_cpu(struct kgem *kgem, int width, int height, int bpp);
+unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
+#define KGEM_CAN_CREATE_GPU	0x1
+#define KGEM_CAN_CREATE_CPU	0x2
+#define KGEM_CAN_CREATE_LARGE	0x4
 
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
diff --git a/src/sna/sna.h b/src/sna/sna.h
index d9ba773..a0ea54f 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -145,7 +145,7 @@ struct sna_pixmap {
 	uint8_t flush :1;
 	uint8_t clear :1;
 	uint8_t undamaged :1;
-	uint8_t gpu :1;
+	uint8_t create :3;
 	uint8_t header :1;
 };
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5d0e042..26fd1ab 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -246,11 +246,8 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-	if ((sna->kgem.has_cpu_bo || !priv->gpu) &&
-	    kgem_can_create_cpu(&sna->kgem,
-				pixmap->drawable.width,
-				pixmap->drawable.height,
-				pixmap->drawable.bitsPerPixel)) {
+	if ((sna->kgem.has_cpu_bo || (priv->create & KGEM_CAN_CREATE_GPU) == 0) &&
+	    (priv->create & KGEM_CAN_CREATE_CPU)) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
@@ -589,15 +586,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, tiling=%d)\n", __FUNCTION__,
 	     width, height, depth, tiling));
 
-	if (depth < 8)
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
-
 	bpp = BitsPerPixel(depth);
-	if (!kgem_can_create_gpu(&sna->kgem, width, height, bpp))
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
-
 	if (tiling == I915_TILING_Y && !sna->have_render)
 		tiling = I915_TILING_X;
 
@@ -672,46 +661,47 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 {
 	struct sna *sna = to_sna_from_screen(screen);
 	PixmapPtr pixmap;
+	unsigned flags;
 	int pad;
 
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
-	if (!kgem_can_create_2d(&sna->kgem, width, height, depth)) {
+	if (!sna->have_render)
+		goto fallback;
+
+	flags = kgem_can_create_2d(&sna->kgem, width, height, depth);
+	if (flags == 0) {
 		DBG(("%s: can not use GPU, just creating shadow\n",
 		     __FUNCTION__));
-		return create_pixmap(sna, screen, width, height, depth, usage);
+		goto fallback;
 	}
 
-	if (!sna->have_render)
-		return create_pixmap(sna, screen,
-				     width, height, depth,
-				     usage);
-
 #if FAKE_CREATE_PIXMAP_USAGE_SCRATCH_HEADER
 	if (width == 0 || height == 0)
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH_HEADER);
+		goto fallback;
 #endif
 
-	if (usage == CREATE_PIXMAP_USAGE_SCRATCH)
-#if USE_BO_FOR_SCRATCH_PIXMAP
-		return sna_pixmap_create_scratch(screen,
-						 width, height, depth,
-						 I915_TILING_X);
-#else
-	return create_pixmap(sna, screen,
-			     width, height, depth,
-			     usage);
-#endif
+	if (usage == CREATE_PIXMAP_USAGE_SCRATCH) {
+		if (flags & KGEM_CAN_CREATE_GPU)
+			return sna_pixmap_create_scratch(screen,
+							 width, height, depth,
+							 I915_TILING_X);
+		else
+			goto fallback;
+	}
 
-	if (usage == SNA_CREATE_SCRATCH)
-		return sna_pixmap_create_scratch(screen,
-						 width, height, depth,
-						 I915_TILING_Y);
+	if (usage == SNA_CREATE_SCRATCH) {
+		if (flags & KGEM_CAN_CREATE_GPU)
+			return sna_pixmap_create_scratch(screen,
+							 width, height, depth,
+							 I915_TILING_Y);
+		else
+			goto fallback;
+	}
 
 	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
-		return create_pixmap(sna, screen, width, height, depth, usage);
+		goto fallback;
 
 	pad = PixmapBytePad(width, depth);
 	if (pad * height <= 4096) {
@@ -741,17 +731,17 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		priv = __sna_pixmap_attach(sna, pixmap);
 		if (priv == NULL) {
 			free(pixmap);
-			return create_pixmap(sna, screen,
-					     width, height, depth, usage);
+			goto fallback;
 		}
 
 		priv->stride = pad;
-		priv->gpu = kgem_can_create_gpu(&sna->kgem,
-						width, height,
-						pixmap->drawable.bitsPerPixel);
+		priv->create = flags;
 	}
 
 	return pixmap;
+
+fallback:
+	return create_pixmap(sna, screen, width, height, depth, usage);
 }
 
 static Bool sna_destroy_pixmap(PixmapPtr pixmap)
@@ -844,7 +834,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 		sna_damage_destroy(&priv->gpu_damage);
 		priv->clear = false;
 
-		if (priv->gpu && pixmap_inplace(sna, pixmap, priv)) {
+		if (priv->create & KGEM_CAN_CREATE_GPU &&
+		    pixmap_inplace(sna, pixmap, priv)) {
 			DBG(("%s: write inplace\n", __FUNCTION__));
 			if (priv->gpu_bo) {
 				if (kgem_bo_is_busy(priv->gpu_bo) &&
@@ -1004,7 +995,7 @@ skip_inplace_map:
 		priv->undamaged = true;
 	}
 
-	if (flags & MOVE_WRITE) {
+	if (flags & MOVE_WRITE || priv->create & KGEM_CAN_CREATE_LARGE) {
 		DBG(("%s: marking as damaged\n", __FUNCTION__));
 		sna_damage_all(&priv->cpu_damage,
 			       pixmap->drawable.width,
@@ -1179,7 +1170,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	if (priv->clear)
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 
-	if (priv->gpu_bo == NULL && !priv->gpu && flags & MOVE_WRITE)
+	if (priv->gpu_bo == NULL &&
+	    (priv->create & KGEM_CAN_CREATE_GPU) == 0 &&
+	    flags & MOVE_WRITE)
 		return _sna_pixmap_move_to_cpu(pixmap, flags);
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
@@ -1692,7 +1685,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 done:
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return true;
@@ -1733,7 +1726,7 @@ sna_drawable_use_bo(DrawablePtr drawable,
 		goto use_cpu_bo;
 
 	if (priv->gpu_bo == NULL) {
-		if (!priv->gpu) {
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0) {
 			DBG(("%s: untiled, will not force allocation\n",
 			     __FUNCTION__));
 			goto use_cpu_bo;
@@ -1832,7 +1825,7 @@ done:
 
 use_gpu_bo:
 	priv->clear = false;
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive,
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
@@ -1883,10 +1876,6 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d)\n", __FUNCTION__, width, height, depth));
 	assert(width);
 	assert(height);
-	if (!sna->have_render ||
-	    !kgem_can_create_gpu(&sna->kgem, width, height, bpp))
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
 
 	if (sna->freed_pixmap) {
 		pixmap = sna->freed_pixmap;
@@ -2000,7 +1989,7 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 		return NULL;
 
 	/* For large bo, try to keep only a single copy around */
-	if (!priv->gpu && priv->ptr) {
+	if (priv->create & KGEM_CAN_CREATE_LARGE && priv->ptr) {
 		sna_damage_all(&priv->gpu_damage,
 			       pixmap->drawable.width,
 			       pixmap->drawable.height);
@@ -2043,7 +2032,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	sna_damage_reduce(&priv->cpu_damage);
 	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
-		if (!wedged(sna) && priv->gpu)
+		if (!wedged(sna) && priv->create & KGEM_CAN_CREATE_GPU)
 			priv->gpu_bo =
 				kgem_create_2d(&sna->kgem,
 					       pixmap->drawable.width,
@@ -2128,10 +2117,13 @@ done:
 	sna_damage_reduce_all(&priv->gpu_damage,
 			      pixmap->drawable.width,
 			      pixmap->drawable.height);
-	if (DAMAGE_IS_ALL(priv->gpu_damage))
+	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		priv->undamaged = false;
+		if (priv->ptr)
+			sna_pixmap_free_cpu(sna, priv);
+	}
 active:
-	if (!priv->pinned && priv->gpu)
+	if (!priv->pinned && (priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return priv;
@@ -2984,7 +2976,7 @@ move_to_gpu(PixmapPtr pixmap, struct sna_pixmap *priv,
 	if (priv->gpu_bo)
 		return TRUE;
 
-	if (!priv->gpu)
+	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 		return FALSE;
 
 	if (priv->cpu_bo) {
@@ -3241,7 +3233,8 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 		} else {
 			dst_priv->clear = false;
-			if (!dst_priv->pinned && dst_priv->gpu)
+			if (!dst_priv->pinned &&
+			    (dst_priv->create & KGEM_CAN_CREATE_LARGE) == 0)
 				list_move(&dst_priv->inactive,
 					  &sna->active_pixmaps);
 		}
@@ -11557,7 +11550,7 @@ static void sna_accel_inactive(struct sna *sna)
 		priv = list_first_entry(&sna->inactive_clock[1],
 					struct sna_pixmap,
 					inactive);
-		assert(priv->gpu);
+		assert((priv->create & KGEM_CAN_CREATE_LARGE) == 0);
 		assert(priv->gpu_bo);
 
 		/* XXX Rather than discarding the GPU buffer here, we
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 43e8642..fc6e6df 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -332,7 +332,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
-		if (!priv->gpu)
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0)
 			goto done;
 
 		if (priv->source_count*w*h >= pixmap->drawable.width * pixmap->drawable.height &&
@@ -380,7 +380,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 			return false;
 
 		upload = true;
-		if (!priv->gpu ||
+		if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 		    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
 				       I915_TILING_X,
 				       pixmap->drawable.width,
@@ -405,7 +405,7 @@ move_to_gpu(PixmapPtr pixmap, const BoxRec *box)
 		return FALSE;
 
 	count = priv->source_count++;
-	if (!priv->gpu ||
+	if ((priv->create & KGEM_CAN_CREATE_GPU) == 0 ||
 	    kgem_choose_tiling(&to_sna_from_pixmap(pixmap)->kgem,
 			       I915_TILING_X,
 			       pixmap->drawable.width,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index 489f215..2805a01 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -102,7 +102,7 @@ too_small(DrawablePtr drawable)
 	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
 		return false;
 
-	return !priv->gpu;
+	return (priv->create & KGEM_CAN_CREATE_GPU) == 0;
 }
 
 static inline Bool
commit d53d93ffa6e133f46c39595294ecf8e2182b5a68
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 15:11:34 2012 +0000

    sna: Update the partial buffer allocation size when reusing an old mapping
    
    Whilst the old mapping is guaranteed to be larger than the requested
    allocation size, keep track of the actual size allows for better packing
    of future buffers. And the code also performs a sanity check that the
    buffer is the size we claim it to be...
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d97a6ac..5ab5c83 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3365,6 +3365,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old) {
 			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
+			alloc = num_pages(old);
 
 			memcpy(&bo->base, old, sizeof(*old));
 			if (old->rq)
commit 6f99555b6b64a0e1baad1853569f7bf521c327c3
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 11:48:59 2012 +0000

    sna: Allow the creation of render targets larger than the maximum bo cache
    
    Given that we now handle uploads to and from bo that are larger than the
    aperture and that usage of such large bo is rare and so unlikely to
    benefit from caching, allow them to be created as render targets and
    destroy as soon as they become inactive.
    
    In principle, this finally enables GPU acceleration of ocitysmap on gen4+,
    but due to the large cost of creating and destroying large bo it is
    disabled on systems that require clflushing. It is, however, a
    pre-requisite for exploiting the enhanced capabilities of IvyBridge.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 30c46fb..d97a6ac 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -661,7 +661,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->max_object_size = kgem->aperture_total / 2;
 	kgem->max_cpu_size = kgem->aperture_total / 2;
-	kgem->max_gpu_size = MAX_CACHE_SIZE;
+	kgem->max_gpu_size = kgem->aperture_total / 2;
+	if (!kgem->has_llc)
+		kgem->max_gpu_size = MAX_CACHE_SIZE;
 	if (gen < 40) {
 		/* If we have to use fences for blitting, we have to make
 		 * sure we can fit them into the aperture.
@@ -672,8 +674,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	}
 	if (kgem->max_gpu_size > kgem->max_cpu_size)
 		kgem->max_gpu_size = kgem->max_cpu_size;
+
 	kgem->max_tile_size = kgem->aperture_total / 4;
-	if (kgem->max_tile_size < kgem->max_gpu_size / 2)
+	if (kgem->max_tile_size > kgem->max_gpu_size / 2)
 		kgem->max_tile_size = kgem->max_gpu_size / 2;
 
 	DBG(("%s: max object size (gpu=%d, cpu=%d, tile=%d)\n",
commit c65ec096e79aa6bda7b2b3ef235e3fd9698b4da7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 11:41:07 2012 +0000

    sna: Decrease tiling step size in case we need to enlarge the box later
    
    We can juggle rendering into large bo on gen4 by redirecting the
    rendering through a proxy that is tile aligned, and so the render target
    may be slightly larger than the tiling step size. As that is then larger
    than the maximum 3D pipeline, the trick fails and we need to resort to a
    temporary render target with copies in and out. In this case, check that
    the tile is aligned to the most pessimistic tiling width and reduce the
    step size to accomodate the enlargement.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 9a98990..43e8642 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1565,6 +1565,9 @@ sna_render_composite_redirect(struct sna *sna,
 		BoxRec box;
 		int w, h;
 
+		DBG(("%s: dst pitch (%d) fits within render pipeline (%d)\n",
+		     __FUNCTION__, op->dst.bo->pitch, sna->render.max_3d_pitch));
+
 		kgem_get_tile_size(&sna->kgem, op->dst.bo->tiling,
 				   &tile_width, &tile_height, &tile_size);
 
@@ -1615,10 +1618,11 @@ sna_render_composite_redirect(struct sna *sna,
 				return FALSE;
 			}
 
+			assert(op->dst.bo != t->real_bo);
 			op->dst.bo->pitch = t->real_bo->pitch;
 
-			op->dst.x += -box.x1;
-			op->dst.y += -box.y1;
+			op->dst.x -= box.x1;
+			op->dst.y -= box.y1;
 			op->dst.width  = w;
 			op->dst.height = h;
 			return TRUE;
@@ -1675,6 +1679,8 @@ sna_render_composite_redirect_done(struct sna *sna,
 	const struct sna_composite_redirect *t = &op->redirect;
 
 	if (t->real_bo) {
+		assert(op->dst.bo != t->real_bo);
+
 		if (t->box.x2 > t->box.x1) {
 			DBG(("%s: copying temporary to dst\n", __FUNCTION__));
 			sna_blt_copy_boxes(sna, GXcopy,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 0bc4539..a3bf19d 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -140,12 +140,15 @@ sna_tiling_composite_done(struct sna *sna,
 	struct sna_composite_op tmp;
 	int x, y, n, step;
 
+	/* Use a small step to accommodate enlargement through tile alignment */
 	step = sna->render.max_3d_size;
+	if (tile->dst_x & (8*512 / tile->dst->pDrawable->bitsPerPixel - 1))
+		step /= 2;
 	while (step * step * 4 > sna->kgem.max_tile_size)
 		step /= 2;
 
-	DBG(("%s -- %dx%d, count=%d\n", __FUNCTION__,
-	     tile->width, tile->height, tile->rect_count));
+	DBG(("%s -- %dx%d, count=%d, step size=%d\n", __FUNCTION__,
+	     tile->width, tile->height, tile->rect_count, step));
 
 	if (tile->rect_count == 0)
 		goto done;
commit 95f3734dd69b82e007095a599cc21f4c63d6ec00
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 11:40:02 2012 +0000

    sna: Allow creation of proxies to proxies
    
    Just update the offset of the new bo by the offset of the existing
    proxy.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 64f729b..30c46fb 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3105,20 +3105,25 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 
 	DBG(("%s: target handle=%d, offset=%d, length=%d, io=%d\n",
 	     __FUNCTION__, target->handle, offset, length, target->io));
-	assert(target->proxy == NULL);
 
 	bo = __kgem_bo_alloc(target->handle, length);
 	if (bo == NULL)
 		return NULL;
 
+	bo->reusable = false;
 	bo->size.bytes = length;
+
 	bo->io = target->io;
 	bo->dirty = target->dirty;
 	bo->tiling = target->tiling;
 	bo->pitch = target->pitch;
+
+	if (target->proxy) {
+		offset += target->delta;
+		target = target->proxy;
+	}
 	bo->proxy = kgem_bo_reference(target);
 	bo->delta = offset;
-	bo->reusable = false;
 	return bo;
 }
 
commit 488937edb67a60389380b405f8f8a548f51e64c7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Mon Jan 30 11:38:36 2012 +0000

    sna: Base prefer-gpu hint on default tiling choice
    
    As on gen4+, tiling increases the maximum usable pitch we can
    accommodate wider pixmaps on the GPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 7019638..64f729b 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2211,6 +2211,10 @@ bool kgem_can_create_cpu(struct kgem *kgem,
 	size = kgem_surface_size(kgem, false, false,
 				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
+	DBG(("%s? %d, cpu size %d, max %d\n",
+	     __FUNCTION__,
+	     size > 0 && size <= kgem->max_cpu_size,
+	     size, kgem->max_cpu_size));
 	return size > 0 && size <= kgem->max_cpu_size;
 }
 
@@ -2223,8 +2227,15 @@ static bool _kgem_can_create_gpu(struct kgem *kgem,
 		return false;
 
 	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp, I915_TILING_NONE,
+				 width, height, bpp,
+				 kgem_choose_tiling(kgem,
+						    I915_TILING_X,
+						    width, height, bpp),
 				 &pitch);
+	DBG(("%s? %d, gpu size %d, max %d\n",
+	     __FUNCTION__,
+	     size > 0 && size < kgem->max_gpu_size,
+	     size, kgem->max_gpu_size));
 	return size > 0 && size < kgem->max_gpu_size;
 }
 
commit ca252e5b51d7b2f5a7b2c2e0d8fdb024b08096db
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 19:36:55 2012 +0000

    sna: Detect batch overflow and fallback rather an risk an ENOSPC
    
    Having noticed that eog was failing to perform a 8k x 8k copy with
    compiz running on a 965gm, it was time the checks for batch overflow
    were implemented.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 398988a..7250d66 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1824,12 +1824,20 @@ gen2_render_composite(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
+				   NULL))
+			goto cleanup_mask;
+	}
 
 	gen2_emit_composite_state(sna, tmp);
 	return TRUE;
 
+cleanup_mask:
+	if (tmp->mask.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
 cleanup_src:
 	if (tmp->src.bo)
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
@@ -2235,12 +2243,20 @@ gen2_render_composite_spans(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
+	}
 
 	gen2_emit_composite_spans_state(sna, tmp);
 	return TRUE;
 
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
@@ -2435,8 +2451,10 @@ gen2_render_fill_boxes(struct sna *sna,
 	tmp.floats_per_vertex = 2;
 	tmp.floats_per_rect = 6;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen2_emit_fill_composite_state(sna, &tmp, pixel);
 
@@ -2675,6 +2693,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 		if (gen2_render_fill_one_try_blt(sna, dst, bo, color,
 						 x1, y1, x2, y2, alu))
 			return TRUE;
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
 	}
 
 	tmp.op = alu;
@@ -2835,14 +2854,19 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    src_bo->pitch > MAX_3D_PITCH ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH)
+	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
+fallback:
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
 						   dst, dst_bo, dst_dx, dst_dy,
 						   box, n);
+	}
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.op = alu;
@@ -2960,6 +2984,7 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 	    too_large(dst->drawable.width, dst->drawable.height) ||
 	    src_bo->pitch > MAX_3D_PITCH ||
 	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
+fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
 
@@ -2982,8 +3007,11 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 	tmp->base.floats_per_vertex = 4;
 	tmp->base.floats_per_rect = 12;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	tmp->blt  = gen2_render_copy_blt;
 	tmp->done = gen2_render_copy_done;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index da90d82..784d399 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2836,8 +2836,13 @@ gen3_render_composite(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
+				   NULL))
+			goto cleanup_mask;
+	}
 
 	gen3_emit_composite_state(sna, tmp);
 	gen3_align_vertex(sna, tmp);
@@ -3267,13 +3272,21 @@ gen3_render_composite_spans(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
+	}
 
 	gen3_emit_composite_state(sna, &tmp->base);
 	gen3_align_vertex(sna, &tmp->base);
 	return TRUE;
 
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
@@ -3830,14 +3843,19 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 	    src_bo->pitch > MAX_3D_PITCH ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    dst_bo->pitch > MAX_3D_PITCH ||
-	    too_large(dst->drawable.width, dst->drawable.height))
+	    too_large(dst->drawable.width, dst->drawable.height)) {
+fallback:
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
 						   dst, dst_bo, dst_dx, dst_dy,
 						   box, n);
+	}
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	memset(&tmp, 0, sizeof(tmp));
 	tmp.op = alu == GXcopy ? PictOpSrc : PictOpClear;
@@ -3961,6 +3979,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
 	    src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch > MAX_3D_PITCH) {
+fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
 
@@ -3984,8 +4003,11 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	tmp->base.mask.bo = NULL;
 	tmp->base.mask.u.gen3.type = SHADER_NONE;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	tmp->blt  = gen3_render_copy_blt;
 	tmp->done = gen3_render_copy_done;
@@ -4139,8 +4161,10 @@ gen3_render_fill_boxes(struct sna *sna,
 	tmp.mask.u.gen3.type = SHADER_NONE;
 	tmp.u.gen3.num_constants = 0;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen3_emit_composite_state(sna, &tmp);
 	gen3_align_vertex(sna, &tmp);
@@ -4293,8 +4317,10 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 	tmp->base.mask.u.gen3.type = SHADER_NONE;
 	tmp->base.u.gen3.num_constants = 0;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	tmp->blt   = gen3_render_fill_op_blt;
 	tmp->box   = gen3_render_fill_op_box;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index d9542ea..ffdcbb7 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -556,7 +556,7 @@ static Bool gen4_check_dst_format(PictFormat format)
 	case PICT_x4r4g4b4:
 		return TRUE;
 	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
+		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
 		return FALSE;
 	}
 }
@@ -1726,8 +1726,10 @@ gen4_render_video(struct sna *sna,
 	tmp.floats_per_vertex = 3;
 	tmp.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
+	}
 
 	gen4_video_bind_surfaces(sna, &tmp, frame);
 	gen4_align_vertex(sna, &tmp);
@@ -2319,13 +2321,21 @@ gen4_render_composite(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
-			   NULL))
+			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				     tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
+				     NULL))
+			goto cleanup_mask;
+	}
 
 	gen4_bind_surfaces(sna, tmp);
 	gen4_align_vertex(sna, tmp);
 	return TRUE;
 
+cleanup_mask:
+	if (tmp->mask.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
 cleanup_src:
 	if (tmp->src.bo)
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
@@ -2400,6 +2410,8 @@ gen4_render_copy_boxes(struct sna *sna, uint8_t alu,
 {
 	struct sna_composite_op tmp;
 
+	DBG(("%s x %d\n", __FUNCTION__, n));
+
 #if NO_COPY_BOXES
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 		return FALSE;
@@ -2472,8 +2484,11 @@ fallback:
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	gen4_copy_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
@@ -2512,6 +2527,12 @@ gen4_render_copy(struct sna *sna, uint8_t alu,
 		 PixmapPtr dst, struct kgem_bo *dst_bo,
 		 struct sna_copy_op *op)
 {
+	DBG(("%s: src=%ld, dst=%ld, alu=%d\n",
+	     __FUNCTION__,
+	     src->drawable.serialNumber,
+	     dst->drawable.serialNumber,
+	     alu));
+
 #if NO_COPY
 	if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 		return FALSE;
@@ -2575,8 +2596,11 @@ fallback:
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
 	op->base.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	gen4_copy_bind_surfaces(sna, &op->base);
 	gen4_align_vertex(sna, &op->base);
@@ -2731,8 +2755,10 @@ gen4_render_fill_boxes(struct sna *sna,
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen4_fill_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
@@ -2844,8 +2870,10 @@ gen4_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen4.wm_kernel = WM_KERNEL;
 	op->base.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))  {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen4_fill_bind_surfaces(sna, &op->base);
 	gen4_align_vertex(sna, &op->base);
@@ -2884,6 +2912,8 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 {
 	struct sna_composite_op tmp;
 
+	DBG(("%s: color=%08x\n", __FUNCTION__, color));
+
 #if NO_FILL_ONE
 	return gen4_render_fill_one_try_blt(sna, dst, bo, color,
 					    x1, y1, x2, y2, alu);
@@ -2929,8 +2959,10 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.u.gen4.wm_kernel = WM_KERNEL;
 	tmp.u.gen4.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen4_fill_bind_surfaces(sna, &tmp);
 	gen4_align_vertex(sna, &tmp);
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 3465121..dc1e720 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -557,7 +557,7 @@ static Bool gen5_check_dst_format(PictFormat format)
 	case PICT_x4r4g4b4:
 		return TRUE;
 	default:
-		DBG(("%s: unhandled format: %x\n", __FUNCTION__, format));
+		DBG(("%s: unhandled format: %x\n", __FUNCTION__, (int)format));
 		return FALSE;
 	}
 }
@@ -1759,8 +1759,10 @@ gen5_render_video(struct sna *sna,
 	tmp.floats_per_vertex = 3;
 	tmp.floats_per_rect = 9;
 
-	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
+	}
 
 	gen5_video_bind_surfaces(sna, &tmp, frame);
 	gen5_align_vertex(sna, &tmp);
@@ -2352,8 +2354,12 @@ gen5_render_composite(struct sna *sna,
 	tmp->done  = gen5_render_composite_done;
 
 	if (!kgem_check_bo(&sna->kgem,
-			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL))
+			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo, NULL))
+			goto cleanup_mask;
+	}
 
 	if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
 		if (mask == NULL &&
@@ -2372,6 +2378,9 @@ gen5_render_composite(struct sna *sna,
 	gen5_align_vertex(sna, tmp);
 	return TRUE;
 
+cleanup_mask:
+	if (tmp->mask.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
 cleanup_src:
 	if (tmp->src.bo)
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
@@ -2671,13 +2680,21 @@ gen5_render_composite_spans(struct sna *sna,
 
 	if (!kgem_check_bo(&sna->kgem,
 			   tmp->base.dst.bo, tmp->base.src.bo,
-			   NULL))
+			   NULL))  {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
+	}
 
 	gen5_bind_surfaces(sna, &tmp->base);
 	gen5_align_vertex(sna, &tmp->base);
 	return TRUE;
 
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
@@ -2796,8 +2813,11 @@ fallback:
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	if (kgem_bo_is_dirty(src_bo)) {
 		if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
@@ -2946,8 +2966,11 @@ fallback:
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
 	op->base.u.gen5.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))  {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
+	}
 
 	if (kgem_bo_is_dirty(src_bo)) {
 		if (sna_blt_compare_depth(&src->drawable, &dst->drawable) &&
@@ -3093,8 +3116,10 @@ gen5_render_fill_boxes(struct sna *sna,
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen5_fill_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
@@ -3280,8 +3305,10 @@ gen5_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen5.wm_kernel = WM_KERNEL;
 	op->base.u.gen5.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen5_fill_bind_surfaces(sna, &op->base);
 	gen5_align_vertex(sna, &op->base);
@@ -3369,8 +3396,10 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.u.gen5.wm_kernel = WM_KERNEL;
 	tmp.u.gen5.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen5_fill_bind_surfaces(sna, &tmp);
 	gen5_align_vertex(sna, &tmp);
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 0d244f1..93410b6 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1971,6 +1971,7 @@ gen6_render_video(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -2596,6 +2597,10 @@ gen6_render_composite(struct sna *sna,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
 			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
+				   NULL))
+			goto cleanup_mask;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -2603,6 +2608,9 @@ gen6_render_composite(struct sna *sna,
 	gen6_align_vertex(sna, tmp);
 	return TRUE;
 
+cleanup_mask:
+	if (tmp->mask.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
 cleanup_src:
 	if (tmp->src.bo)
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
@@ -3000,6 +3008,10 @@ gen6_render_composite_spans(struct sna *sna,
 			   tmp->base.dst.bo, tmp->base.src.bo,
 			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3007,6 +3019,9 @@ gen6_render_composite_spans(struct sna *sna,
 	gen6_align_vertex(sna, &tmp->base);
 	return TRUE;
 
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
@@ -3198,6 +3213,8 @@ fallback:
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3358,6 +3375,8 @@ fallback:
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3508,8 +3527,10 @@ gen6_render_fill_boxes(struct sna *sna,
 	tmp.u.gen6.nr_inputs = 1;
 	tmp.u.gen6.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen6_emit_fill_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
@@ -3705,8 +3726,10 @@ gen6_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen6.nr_inputs = 1;
 	op->base.u.gen6.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen6_emit_fill_state(sna, &op->base);
 	gen6_align_vertex(sna, &op->base);
@@ -3796,8 +3819,10 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.u.gen6.nr_inputs = 1;
 	tmp.u.gen6.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen6_emit_fill_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
@@ -3893,8 +3918,10 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.u.gen6.nr_inputs = 1;
 	tmp.u.gen6.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen6_emit_fill_state(sna, &tmp);
 	gen6_align_vertex(sna, &tmp);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ff04631..e2486c6 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2036,6 +2036,7 @@ gen7_render_video(struct sna *sna,
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL));
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -2662,6 +2663,10 @@ gen7_render_composite(struct sna *sna,
 			   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
 			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->dst.bo, tmp->src.bo, tmp->mask.bo,
+				   NULL))
+			goto cleanup_mask;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -2669,6 +2674,9 @@ gen7_render_composite(struct sna *sna,
 	gen7_align_vertex(sna, tmp);
 	return TRUE;
 
+cleanup_mask:
+	if (tmp->mask.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->mask.bo);
 cleanup_src:
 	if (tmp->src.bo)
 		kgem_bo_destroy(&sna->kgem, tmp->src.bo);
@@ -3065,6 +3073,10 @@ gen7_render_composite_spans(struct sna *sna,
 			   tmp->base.dst.bo, tmp->base.src.bo,
 			   NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem,
+				   tmp->base.dst.bo, tmp->base.src.bo,
+				   NULL))
+			goto cleanup_src;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3072,6 +3084,9 @@ gen7_render_composite_spans(struct sna *sna,
 	gen7_align_vertex(sna, &tmp->base);
 	return TRUE;
 
+cleanup_src:
+	if (tmp->base.src.bo)
+		kgem_bo_destroy(&sna->kgem, tmp->base.src.bo);
 cleanup_dst:
 	if (tmp->base.redirect.real_bo)
 		kgem_bo_destroy(&sna->kgem, tmp->base.dst.bo);
@@ -3252,6 +3267,8 @@ fallback:
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3412,6 +3429,8 @@ fallback:
 	kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		if (!kgem_check_bo(&sna->kgem, dst_bo, src_bo, NULL))
+			goto fallback;
 		_kgem_set_mode(&sna->kgem, KGEM_RENDER);
 	}
 
@@ -3564,8 +3583,10 @@ gen7_render_fill_boxes(struct sna *sna,
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen7_emit_fill_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
@@ -3761,8 +3782,10 @@ gen7_render_fill(struct sna *sna, uint8_t alu,
 	op->base.u.gen7.nr_inputs = 1;
 	op->base.u.gen7.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, dst_bo, NULL)) {
 		kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, dst_bo, NULL));
+	}
 
 	gen7_emit_fill_state(sna, &op->base);
 	gen7_align_vertex(sna, &op->base);
@@ -3852,8 +3875,10 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen7_emit_fill_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
@@ -3949,8 +3974,10 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
 	tmp.u.gen7.nr_inputs = 1;
 	tmp.u.gen7.ve_id = 1;
 
-	if (!kgem_check_bo(&sna->kgem, bo, NULL))
+	if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo(&sna->kgem, bo, NULL));
+	}
 
 	gen7_emit_fill_state(sna, &tmp);
 	gen7_align_vertex(sna, &tmp);
commit 3aee521bf236994628c4d103a2b8f391a4be2aa7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 19:21:54 2012 +0000

    sna: Add a tiled fallback for large BLT copies
    
    If we are attempting to copy between two large bo, larger than we can
    fit into the aperture, break the copy into smaller steps and use an
    intermediatory.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 7efbcf9..b6749ec 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -153,6 +153,7 @@ static bool sna_blt_fill_init(struct sna *sna,
 	if (!kgem_check_bo_fenced(kgem, bo, NULL) ||
 	    !kgem_check_batch(kgem, 12)) {
 		_kgem_submit(kgem);
+		assert(kgem_check_bo_fenced(kgem, bo, NULL));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -290,6 +291,8 @@ static Bool sna_blt_copy_init(struct sna *sna,
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
+		if (!kgem_check_bo_fenced(kgem, src, dst, NULL))
+			return FALSE;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -337,6 +340,8 @@ static Bool sna_blt_alpha_fixup_init(struct sna *sna,
 	kgem_set_mode(kgem, KGEM_BLT);
 	if (!kgem_check_bo_fenced(kgem, src, dst, NULL)) {
 		_kgem_submit(kgem);
+		if (!kgem_check_bo_fenced(kgem, src, dst, NULL))
+			return FALSE;
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -1109,8 +1114,11 @@ prepare_blt_copy(struct sna *sna,
 	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo))
 		return FALSE;
 
-	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL)) {
+	if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo, priv->gpu_bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		if (!kgem_check_bo_fenced(&sna->kgem,
+					  op->dst.bo, priv->gpu_bo, NULL))
+			return FALSE;
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
@@ -1594,6 +1602,7 @@ sna_blt_composite(struct sna *sna,
 
 	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL)) {
 		_kgem_submit(&sna->kgem);
+		assert(kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL));
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 
@@ -1891,6 +1900,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
 	    !kgem_check_reloc(kgem, 1) ||
 	    !kgem_check_bo_fenced(kgem, bo, NULL)) {
 		_kgem_submit(kgem);
+		assert(kgem_check_bo_fenced(&sna->kgem, bo, NULL));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -1964,6 +1974,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	if (!kgem_check_bo_fenced(kgem, bo, NULL) ||
 	    !kgem_check_batch(kgem, 12)) {
 		_kgem_submit(kgem);
+		assert(kgem_check_bo_fenced(&sna->kgem, bo, NULL));
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
@@ -2127,6 +2138,11 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	    !kgem_check_reloc(kgem, 2) ||
 	    !kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
 		_kgem_submit(kgem);
+		if (!kgem_check_bo_fenced(kgem, dst_bo, src_bo, NULL))
+			return sna_tiling_copy_boxes(sna, alu,
+						     src_bo, src_dx, src_dy,
+						     dst_bo, dst_dx, dst_dy,
+						     bpp, box, nbox);
 		_kgem_set_mode(kgem, KGEM_BLT);
 	}
 
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 5df53dc..94c2744 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -507,6 +507,10 @@ Bool sna_tiling_fill_boxes(struct sna *sna,
 			   const xRenderColor *color,
 			   PixmapPtr dst, struct kgem_bo *dst_bo,
 			   const BoxRec *box, int n);
+Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
+			   struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			   struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			   int bpp, const BoxRec *box, int nbox);
 
 Bool sna_blt_composite(struct sna *sna,
 		       uint32_t op,
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index eac664e..0bc4539 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -417,3 +417,99 @@ done:
 	pixman_region_fini(&region);
 	return ret;
 }
+
+Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu,
+			   struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
+			   struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
+			   int bpp, const BoxRec *box, int nbox)
+{
+	RegionRec region, tile, this;
+	struct kgem_bo *bo;
+	int step;
+	Bool ret = FALSE;
+
+	if (!kgem_bo_can_blt(&sna->kgem, src_bo) ||
+	    !kgem_bo_can_blt(&sna->kgem, dst_bo)) {
+		/* XXX */
+		DBG(("%s: tiling blt fail: src?=%d, dst?=%d\n",
+		     __FUNCTION__,
+		     kgem_bo_can_blt(&sna->kgem, src_bo),
+		     kgem_bo_can_blt(&sna->kgem, dst_bo)));
+		return FALSE;
+	}
+
+	pixman_region_init_rects(&region, box, nbox);
+
+	step = sna->render.max_3d_size;
+	while (step * step * 4 > sna->kgem.max_tile_size)
+		step /= 2;
+
+	DBG(("%s (alu=%d), tile.size=%d, box=%dx[(%d, %d), (%d, %d)])\n",
+	     __FUNCTION__, alu, step, nbox,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	for (tile.extents.y1 = tile.extents.y2 = region.extents.y1;
+	     tile.extents.y2 < region.extents.y2;
+	     tile.extents.y1 = tile.extents.y2) {
+		tile.extents.y2 = tile.extents.y1 + step;
+		if (tile.extents.y2 > region.extents.y2)
+			tile.extents.y2 = region.extents.y2;
+
+		for (tile.extents.x1 = tile.extents.x2 = region.extents.x1;
+		     tile.extents.x2 < region.extents.x2;
+		     tile.extents.x1 = tile.extents.x2) {
+			int w, h;
+
+			tile.extents.x2 = tile.extents.x1 + step;
+			if (tile.extents.x2 > region.extents.x2)
+				tile.extents.x2 = region.extents.x2;
+
+			tile.data = NULL;
+
+			RegionNull(&this);
+			RegionIntersect(&this, &region, &tile);
+			if (!RegionNotEmpty(&this))
+				continue;
+
+			w = this.extents.x2 - this.extents.x1;
+			h = this.extents.y2 - this.extents.y1;
+			bo = kgem_create_2d(&sna->kgem, w, h, bpp,
+					    kgem_choose_tiling(&sna->kgem,
+							       I915_TILING_X,
+							       w, h, bpp),
+					    0);
+			if (bo) {
+				int16_t dx = this.extents.x1;
+				int16_t dy = this.extents.y1;
+
+				assert(bo->pitch <= 8192);
+				assert(bo->tiling != I915_TILING_Y);
+
+				if (!sna_blt_copy_boxes(sna, alu,
+							src_bo, src_dx, src_dy,
+							bo, -dx, -dy,
+							bpp, REGION_RECTS(&this), REGION_NUM_RECTS(&this)))
+					goto err;
+
+				if (!sna_blt_copy_boxes(sna, alu,
+							bo, -dx, -dy,
+							dst_bo, dst_dx, dst_dy,
+							bpp, REGION_RECTS(&this), REGION_NUM_RECTS(&this)))
+					goto err;
+
+				kgem_bo_destroy(&sna->kgem, bo);
+			}
+			RegionUninit(&this);
+		}
+	}
+
+	ret = TRUE;
+	goto done;
+err:
+	kgem_bo_destroy(&sna->kgem, bo);
+	RegionUninit(&this);
+done:
+	pixman_region_fini(&region);
+	return ret;
+}
commit df148c962108a7f3efead0b80ab4fe77f3f79c8b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 18:07:14 2012 +0000

    sna: Limit the tile size for uploading into large pixmaps
    
    As we may have a constrained aperture, we need to be careful not to
    exceed our resources limits when uploading the pixel data. (For example,
    fitting two of the maximum bo into a single batch may fail due to
    fragmentation of the GATT.) So be cautious and use more tiles to reduce
    the size of each individual batch.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index fff2d19..7019638 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -672,9 +672,15 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	}
 	if (kgem->max_gpu_size > kgem->max_cpu_size)
 		kgem->max_gpu_size = kgem->max_cpu_size;
-
-	DBG(("%s: max object size (tiled=%d, linear=%d)\n",
-	     __FUNCTION__, kgem->max_gpu_size, kgem->max_cpu_size));
+	kgem->max_tile_size = kgem->aperture_total / 4;
+	if (kgem->max_tile_size < kgem->max_gpu_size / 2)
+		kgem->max_tile_size = kgem->max_gpu_size / 2;
+
+	DBG(("%s: max object size (gpu=%d, cpu=%d, tile=%d)\n",
+	     __FUNCTION__,
+	     kgem->max_gpu_size,
+	     kgem->max_cpu_size,
+	     kgem->max_tile_size));
 
 	/* Convert the aperture thresholds to pages */
 	kgem->aperture_low /= PAGE_SIZE;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index f386967..fea2d45 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -161,7 +161,7 @@ struct kgem {
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_gpu_size, max_cpu_size, max_object_size;
+	uint32_t max_tile_size, max_gpu_size, max_cpu_size, max_object_size;
 	uint32_t partial_buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 14a7901..4f86f8d 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -62,7 +62,8 @@ box_intersect(BoxPtr a, const BoxRec *b)
 static inline bool must_tile(struct sna *sna, int width, int height)
 {
 	return (width  > sna->render.max_3d_size ||
-		height > sna->render.max_3d_size);
+		height > sna->render.max_3d_size ||
+		width * height * 4 > sna->kgem.max_tile_size);
 }
 
 static void read_boxes_inplace(struct kgem *kgem,
@@ -199,6 +200,9 @@ fallback:
 
 			step = MIN(sna->render.max_3d_size,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
+			while (step * step * 4 > sna->kgem.max_tile_size)
+				step /= 2;
+
 			DBG(("%s: tiling download, using %dx%d tiles\n",
 			     __FUNCTION__, step, step));
 
@@ -577,6 +581,9 @@ fallback:
 
 			step = MIN(sna->render.max_3d_size,
 				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
+			while (step * step * 4 > sna->kgem.max_tile_size)
+				step /= 2;
+
 			DBG(("%s: tiling upload, using %dx%d tiles\n",
 			     __FUNCTION__, step, step));
 
diff --git a/src/sna/sna_tiling.c b/src/sna/sna_tiling.c
index 52572bc..eac664e 100644
--- a/src/sna/sna_tiling.c
+++ b/src/sna/sna_tiling.c
@@ -138,7 +138,11 @@ sna_tiling_composite_done(struct sna *sna,
 {
 	struct sna_tile_state *tile = op->u.priv;
 	struct sna_composite_op tmp;
-	int x, y, n, step = sna->render.max_3d_size;
+	int x, y, n, step;
+
+	step = sna->render.max_3d_size;
+	while (step * step * 4 > sna->kgem.max_tile_size)
+		step /= 2;
 
 	DBG(("%s -- %dx%d, count=%d\n", __FUNCTION__,
 	     tile->width, tile->height, tile->rect_count));
@@ -318,21 +322,26 @@ sna_tiling_fill_boxes(struct sna *sna,
 {
 	RegionRec region, tile, this;
 	struct kgem_bo *bo;
+	int step;
 	Bool ret = FALSE;
 
 	pixman_region_init_rects(&region, box, n);
 
+	step = sna->render.max_3d_size;
+	while (step * step * 4 > sna->kgem.max_tile_size)
+		step /= 2;
+
 	DBG(("%s (op=%d, format=%x, color=(%04x,%04x,%04x, %04x), tile.size=%d, box=%dx[(%d, %d), (%d, %d)])\n",
 	     __FUNCTION__, op, (int)format,
 	     color->red, color->green, color->blue, color->alpha,
-	     sna->render.max_3d_size, n,
+	     step, n,
 	     region.extents.x1, region.extents.y1,
 	     region.extents.x2, region.extents.y2));
 
 	for (tile.extents.y1 = tile.extents.y2 = region.extents.y1;
 	     tile.extents.y2 < region.extents.y2;
 	     tile.extents.y1 = tile.extents.y2) {
-		tile.extents.y2 = tile.extents.y1 + sna->render.max_3d_size;
+		tile.extents.y2 = tile.extents.y1 + step;
 		if (tile.extents.y2 > region.extents.y2)
 			tile.extents.y2 = region.extents.y2;
 
@@ -341,7 +350,7 @@ sna_tiling_fill_boxes(struct sna *sna,
 		     tile.extents.x1 = tile.extents.x2) {
 			PixmapRec tmp;
 
-			tile.extents.x2 = tile.extents.x1 + sna->render.max_3d_size;
+			tile.extents.x2 = tile.extents.x1 + step;
 			if (tile.extents.x2 > region.extents.x2)
 				tile.extents.x2 = region.extents.x2;
 
commit e1e67e8f394480eb4fef1238ccfd49cc36e4b6f2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 15:43:42 2012 +0000

    sna: Fix the "trivial" fix to improve error handling
    
    The logic was just backwards and we tried to upload a shadowless GPU
    pixmap.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1d7b8e9..fff2d19 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3490,6 +3490,12 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 	DBG(("%s : (%d, %d), (%d, %d), stride=%d, bpp=%d\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2, stride, bpp));
 
+	assert(data);
+	assert(width > 0);
+	assert(height > 0);
+	assert(stride);
+	assert(bpp);
+
 	bo = kgem_create_buffer_2d(kgem,
 				   width, height, bpp,
 				   KGEM_BUFFER_WRITE_INPLACE, &dst);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index cbaaafd..9a98990 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1044,7 +1044,7 @@ sna_render_picture_extract(struct sna *sna,
 			return 0;
 	} else {
 		bool upload = true;
-		if (!texture_is_cpu(pixmap, &box) &&
+		if (!texture_is_cpu(pixmap, &box) ||
 		    move_to_gpu(pixmap, &box)) {
 			struct sna_pixmap *priv;
 
commit d3fb1e1e89ccf5cefe6add66de4f960ef07cac60
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 14:20:33 2012 +0000

    sna: Handle GPU creation failure when uploading subtexture
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index abc6325..cbaaafd 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1043,20 +1043,23 @@ sna_render_picture_extract(struct sna *sna,
 		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
 			return 0;
 	} else {
-		if (texture_is_cpu(pixmap, &box) &&
-		    !move_to_gpu(pixmap, &box)) {
-			bo = kgem_upload_source_image(&sna->kgem,
-						      pixmap->devPrivate.ptr,
-						      &box,
-						      pixmap->devKind,
-						      pixmap->drawable.bitsPerPixel);
-		} else {
+		bool upload = true;
+		if (!texture_is_cpu(pixmap, &box) &&
+		    move_to_gpu(pixmap, &box)) {
 			struct sna_pixmap *priv;
 
 			priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
-			if (priv)
+			if (priv) {
 				src_bo = priv->gpu_bo;
+				upload = false;
+			}
 		}
+		if (upload)
+			bo = kgem_upload_source_image(&sna->kgem,
+						      pixmap->devPrivate.ptr,
+						      &box,
+						      pixmap->devKind,
+						      pixmap->drawable.bitsPerPixel);
 	}
 	if (src_bo) {
 		bo = kgem_create_2d(&sna->kgem, w, h,
diff --git a/src/sna/sna_trapezoids.c b/src/sna/sna_trapezoids.c
index 2fd1eaf..eb6c968 100644
--- a/src/sna/sna_trapezoids.c
+++ b/src/sna/sna_trapezoids.c
@@ -3973,6 +3973,8 @@ trap_mask_converter(PicturePtr picture,
 
 	pixmap = get_drawable_pixmap(picture->pDrawable);
 	priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE);
+	if (priv == NULL)
+		return false;
 
 	/* XXX strict adherence to the Render specification */
 	if (picture->polyMode == PolyModePrecise) {
commit 518a99ea34b26aa094f29a4cc1ea5419f63a0e56
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 14:09:46 2012 +0000

    sna: Always create a GPU bo for copying from an existent source GPU bo
    
    Make sure we prevent the readback of an active source GPU bo by always
    prefering to do the copy on the GPU if the data is already resisent.
    This fixes the second regression from e583af9cc, (sna: Experiment with
    creating large objects as CPU bo).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ce35113..5d0e042 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3211,7 +3211,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	}
 
 	/* Try to maintain the data on the GPU */
-	if (dst_priv->gpu_bo == NULL && dst_priv->gpu &&
+	if (dst_priv->gpu_bo == NULL &&
 	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
 	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
 		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap);
commit 624d9843abda9ca6bd1b004d70a6fdc082ba9653
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 13:55:20 2012 +0000

    sna: Ignore map status and pick the first inactive bo for reuse
    
    This fixes the performance regression introduced with e583af9cca,
    (sna: Experiment with creating large objects as CPU bo), as we ended up
    creating fresh bo and incurring setup and thrashing overhead, when we
    already had plenty cached.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d062a1d..1d7b8e9 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2501,9 +2501,6 @@ search_inactive:
 			continue;
 		}
 
-		if ((flags & CREATE_CPU_MAP) == 0 && IS_CPU_MAP(bo->map))
-			continue;
-
 		if (bo->tiling != tiling ||
 		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
 			if (tiling != gem_set_tiling(kgem->fd,
commit 5c6255ba2f12f04938fd586ca02562ee3cae05af
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sun Jan 29 11:02:38 2012 +0000

    sna: Determine whether to use a partial proxy based on the pitch
    
    On gen4+ devices the maximum render pitch is much larger than is simply
    required for the maximum coordinates. This makes it possible to use
    proxy textures as a subimage into the oversized texture without having
    to blit into a temporary copy for virtually every single bo we use.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index 2a97cea..398988a 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -55,6 +55,7 @@
 #define PREFER_BLT_COPY 1
 
 #define MAX_3D_SIZE 2048
+#define MAX_3D_PITCH 8192
 
 #define BATCH(v) batch_emit(sna, v)
 #define BATCH_F(v) batch_emit_float(sna, v)
@@ -547,7 +548,7 @@ gen2_get_batch(struct sna *sna)
 
 static void gen2_emit_target(struct sna *sna, const struct sna_composite_op *op)
 {
-	assert(op->dst.bo->pitch >= 8 && op->dst.bo->pitch <= 8192);
+	assert(op->dst.bo->pitch >= 8 && op->dst.bo->pitch <= MAX_3D_PITCH);
 	assert(sna->render_state.gen2.vertex_offset == 0);
 
 	if (sna->render_state.gen2.target == op->dst.bo->unique_id) {
@@ -1736,7 +1737,7 @@ gen2_render_composite(struct sna *sna,
 
 	tmp->op = op;
 	if (too_large(tmp->dst.width, tmp->dst.height) ||
-	    tmp->dst.bo->pitch > 8192) {
+	    tmp->dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, tmp,
 						   dst_x, dst_y, width, height))
 			return FALSE;
@@ -2192,7 +2193,7 @@ gen2_render_composite_spans(struct sna *sna,
 
 	tmp->base.op = op;
 	if (too_large(tmp->base.dst.width, tmp->base.dst.height) ||
-	    tmp->base.dst.bo->pitch > 8192) {
+	    tmp->base.dst.bo->pitch > MAX_3D_PITCH) {
 		if (!sna_render_composite_redirect(sna, &tmp->base,
 						   dst_x, dst_y, width, height))
 			return FALSE;
@@ -2388,7 +2389,7 @@ gen2_render_fill_boxes(struct sna *sna,
 	     color->red, color->green, color->blue, color->alpha));
 
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch < 8 || dst_bo->pitch > 8192 ||
+	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH ||
 	    !gen2_check_dst_format(format)) {
 		DBG(("%s: try blt, too large or incompatible destination\n",
 		     __FUNCTION__));
@@ -2589,7 +2590,7 @@ gen2_render_fill(struct sna *sna, uint8_t alu,
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch < 8 || dst_bo->pitch > 8192)
+	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH)
 		return sna_blt_fill(sna, alu,
 				    dst_bo, dst->drawable.bitsPerPixel,
 				    color,
@@ -2665,7 +2666,7 @@ gen2_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
-	    bo->pitch < 8 || bo->pitch > 8192)
+	    bo->pitch < 8 || bo->pitch > MAX_3D_PITCH)
 		return gen2_render_fill_one_try_blt(sna, dst, bo, color,
 						    x1, y1, x2, y2, alu);
 
@@ -2832,9 +2833,9 @@ gen2_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	if (src_bo == dst_bo || /* XXX handle overlap using 3D ? */
 	    too_large(src->drawable.width, src->drawable.height) ||
-	    src_bo->pitch > 8192 ||
+	    src_bo->pitch > MAX_3D_PITCH ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch < 8 || dst_bo->pitch > 8192)
+	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH)
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
 						   dst, dst_bo, dst_dx, dst_dy,
@@ -2957,7 +2958,8 @@ gen2_render_copy(struct sna *sna, uint8_t alu,
 	/* Must use the BLT if we can't RENDER... */
 	if (too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    src_bo->pitch > 8192 || dst_bo->pitch < 8 || dst_bo->pitch > 8192) {
+	    src_bo->pitch > MAX_3D_PITCH ||
+	    dst_bo->pitch < 8 || dst_bo->pitch > MAX_3D_PITCH) {
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
 
@@ -3045,5 +3047,6 @@ Bool gen2_render_init(struct sna *sna)
 	render->flush = gen2_render_flush;
 
 	render->max_3d_size = MAX_3D_SIZE;
+	render->max_3d_pitch = MAX_3D_PITCH;
 	return TRUE;
 }
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 931142d..da90d82 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -65,6 +65,7 @@ enum {
 };
 
 #define MAX_3D_SIZE 2048
+#define MAX_3D_PITCH 8192
 
 #define OUT_BATCH(v) batch_emit(sna, v)
 #define OUT_BATCH_F(v) batch_emit_float(sna, v)
@@ -143,7 +144,7 @@ static inline uint32_t gen3_buf_tiling(uint32_t tiling)
 static inline Bool
 gen3_check_pitch_3d(struct kgem_bo *bo)
 {
-	return bo->pitch <= 8192;
+	return bo->pitch <= MAX_3D_PITCH;
 }
 
 static uint32_t gen3_get_blend_cntl(int op,
@@ -3826,9 +3827,9 @@ gen3_render_copy_boxes(struct sna *sna, uint8_t alu,
 
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    src_bo == dst_bo || /* XXX handle overlap using 3D ? */
-	    src_bo->pitch > 8192 ||
+	    src_bo->pitch > MAX_3D_PITCH ||
 	    too_large(src->drawable.width, src->drawable.height) ||
-	    dst_bo->pitch > 8192 ||
+	    dst_bo->pitch > MAX_3D_PITCH ||
 	    too_large(dst->drawable.width, dst->drawable.height))
 		return sna_blt_copy_boxes_fallback(sna, alu,
 						   src, src_bo, src_dx, src_dy,
@@ -3959,7 +3960,7 @@ gen3_render_copy(struct sna *sna, uint8_t alu,
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(src->drawable.width, src->drawable.height) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    src_bo->pitch > 8192 || dst_bo->pitch > 8192) {
+	    src_bo->pitch > MAX_3D_PITCH || dst_bo->pitch > MAX_3D_PITCH) {
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return FALSE;
 
@@ -4083,7 +4084,7 @@ gen3_render_fill_boxes(struct sna *sna,
 	     color->red, color->green, color->blue, color->alpha));
 
 	if (too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch > 8192 ||
+	    dst_bo->pitch > MAX_3D_PITCH ||
 	    !gen3_check_dst_format(format)) {
 		DBG(("%s: try blt, too large or incompatible destination\n",
 		     __FUNCTION__));
@@ -4265,7 +4266,7 @@ gen3_render_fill(struct sna *sna, uint8_t alu,
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    dst_bo->pitch > 8192)
+	    dst_bo->pitch > MAX_3D_PITCH)
 		return sna_blt_fill(sna, alu,
 				    dst_bo, dst->drawable.bitsPerPixel,
 				    color,
@@ -4346,7 +4347,7 @@ gen3_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
 	/* Must use the BLT if we can't RENDER... */
 	if (!(alu == GXcopy || alu == GXclear) ||
 	    too_large(dst->drawable.width, dst->drawable.height) ||
-	    bo->pitch > 8192)
+	    bo->pitch > MAX_3D_PITCH)
 		return gen3_render_fill_one_try_blt(sna, dst, bo, color,
 						    x1, y1, x2, y2, alu);
 
@@ -4424,5 +4425,6 @@ Bool gen3_render_init(struct sna *sna)
 	render->fini = gen3_render_fini;
 
 	render->max_3d_size = MAX_3D_SIZE;
+	render->max_3d_pitch = MAX_3D_PITCH;
 	return TRUE;
 }
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 91d5f49..d9542ea 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -3220,5 +3220,6 @@ Bool gen4_render_init(struct sna *sna)
 	sna->render.fini = gen4_render_fini;
 
 	sna->render.max_3d_size = 8192;
+	sna->render.max_3d_pitch = 1 << 18;
 	return TRUE;
 }
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 2c6d020..3465121 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -3702,5 +3702,6 @@ Bool gen5_render_init(struct sna *sna)
 	sna->render.fini = gen5_render_fini;
 
 	sna->render.max_3d_size = MAX_3D_SIZE;
+	sna->render.max_3d_pitch = 1 << 18;
 	return TRUE;
 }
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 41e05d0..0d244f1 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -4044,5 +4044,6 @@ Bool gen6_render_init(struct sna *sna)
 	sna->render.fini = gen6_render_fini;
 
 	sna->render.max_3d_size = GEN6_MAX_SIZE;
+	sna->render.max_3d_pitch = 1 << 18;
 	return TRUE;
 }
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 282b724..ff04631 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -4097,5 +4097,6 @@ Bool gen7_render_init(struct sna *sna)
 	sna->render.fini = gen7_render_fini;
 
 	sna->render.max_3d_size = GEN7_MAX_SIZE;
+	sna->render.max_3d_pitch = 1 << 18;
 	return TRUE;
 }
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 208c8f2..d062a1d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -704,6 +704,39 @@ static uint32_t kgem_untiled_pitch(struct kgem *kgem,
 	return ALIGN(width, scanout ? 64 : kgem->min_alignment);
 }
 
+void kgem_get_tile_size(struct kgem *kgem, int tiling,
+			int *tile_width, int *tile_height, int *tile_size)
+{
+	if (kgem->gen < 30) {
+		if (tiling) {
+			*tile_width = 512;
+			*tile_height = 16;
+			*tile_size = 2048;
+		} else {
+			*tile_width = 1;
+			*tile_height = 1;
+			*tile_size = 1;
+		}
+	} else switch (tiling) {
+	default:
+	case I915_TILING_NONE:
+		*tile_width = 1;
+		*tile_height = 1;
+		*tile_size = 1;
+		break;
+	case I915_TILING_X:
+		*tile_width = 512;
+		*tile_height = 8;
+		*tile_size = 4096;
+		break;
+	case I915_TILING_Y:
+		*tile_width = kgem->gen <= 30 ? 512 : 128;
+		*tile_height = 32;
+		*tile_size = 4096;
+		break;
+	}
+}
+
 static uint32_t kgem_surface_size(struct kgem *kgem,
 				  bool relaxed_fencing,
 				  bool scanout,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 2631e81..f386967 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -361,6 +361,8 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 		   const void *data, int length);
 
 int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_get_tile_size(struct kgem *kgem, int tiling,
+			int *tile_width, int *tile_height, int *tile_size);
 
 static inline int kgem_bo_size(struct kgem_bo *bo)
 {
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 7077f36..abc6325 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -816,10 +816,27 @@ sna_render_picture_partial(struct sna *sna,
 	struct kgem_bo *bo = NULL;
 	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
 	BoxRec box;
+	int tile_width, tile_height, tile_size;
+	int offset;
 
 	DBG(("%s (%d, %d)x(%d, %d) [dst=(%d, %d)]\n",
 	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
 
+	if (use_cpu_bo(sna, pixmap, &box)) {
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
+			return 0;
+
+		bo = sna_pixmap(pixmap)->cpu_bo;
+	} else {
+		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+			return 0;
+
+		bo = sna_pixmap(pixmap)->gpu_bo;
+	}
+
+	if (bo->pitch > sna->render.max_3d_pitch)
+		return 0;
+
 	box.x1 = x;
 	box.y1 = y;
 	box.x2 = x + w;
@@ -855,51 +872,65 @@ sna_render_picture_partial(struct sna *sna,
 		}
 	}
 
-	/* Presume worst case tile-row alignment for Y-tiling */
-	box.y1 = box.y1 & (64 - 1);
-	box.y2 = ALIGN(box.y2, 64);
+	kgem_get_tile_size(&sna->kgem, bo->tiling,
+			   &tile_width, &tile_height, &tile_size);
+
+	/* Ensure we align to an even tile row */
+	box.y1 = box.y1 & ~(2*tile_height - 1);
+	box.y2 = ALIGN(box.y2, 2*tile_height);
+	if (box.y2 > pixmap->drawable.height)
+		box.y2 = pixmap->drawable.height;
+
+	box.x1 = box.x1 & ~(tile_width * 8 / pixmap->drawable.bitsPerPixel - 1);
+	box.x2 = ALIGN(box.x2, tile_width * 8 / pixmap->drawable.bitsPerPixel);
+	if (box.x2 > pixmap->drawable.width)
+		box.x2 = pixmap->drawable.width;
+
 	w = box.x2 - box.x1;
 	h = box.y2 - box.y1;
 	DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
 	     box.x1, box.y1, box.x2, box.y2, w, h,
 	     pixmap->drawable.width, pixmap->drawable.height));
-	if (w <= 0 || h <= 0 || h > sna->render.max_3d_size)
+	if (w <= 0 || h <= 0 ||
+	    w > sna->render.max_3d_size ||
+	    h > sna->render.max_3d_size)
 		return 0;
 
-	memset(&channel->embedded_transform,
-	       0,
-	       sizeof(channel->embedded_transform));
-	channel->embedded_transform.matrix[0][0] = 1 << 16;
-	channel->embedded_transform.matrix[0][2] = 0;
-	channel->embedded_transform.matrix[1][1] = 1 << 16;
-	channel->embedded_transform.matrix[1][2] = -box.y1 << 16;
-	channel->embedded_transform.matrix[2][2] = 1 << 16;
-	if (channel->transform)
+	/* How many tiles across are we? */
+	offset = box.x1 * pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+	channel->bo = kgem_create_proxy(bo,
+					box.y1 * bo->pitch + offset,
+					h * bo->pitch);
+	if (channel->bo == NULL)
+		return 0;
+
+	channel->bo->pitch = bo->pitch;
+
+	if (channel->transform) {
+		memset(&channel->embedded_transform,
+		       0,
+		       sizeof(channel->embedded_transform));
+		channel->embedded_transform.matrix[0][0] = 1 << 16;
+		channel->embedded_transform.matrix[0][2] = -box.x1 << 16;
+		channel->embedded_transform.matrix[1][1] = 1 << 16;
+		channel->embedded_transform.matrix[1][2] = -box.y1 << 16;
+		channel->embedded_transform.matrix[2][2] = 1 << 16;
 		pixman_transform_multiply(&channel->embedded_transform,
 					  &channel->embedded_transform,
 					  channel->transform);
-	channel->transform = &channel->embedded_transform;
-
-	if (use_cpu_bo(sna, pixmap, &box)) {
-		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
-			return 0;
-
-		bo = sna_pixmap(pixmap)->cpu_bo;
+		channel->transform = &channel->embedded_transform;
 	} else {
-		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
-			return 0;
-
-		bo = sna_pixmap(pixmap)->gpu_bo;
+		x -= box.x1;
+		y -= box.y1;
 	}
 
 	channel->offset[0] = x - dst_x;
 	channel->offset[1] = y - dst_y;
-	channel->scale[0] = 1.f/pixmap->drawable.width;
+	channel->scale[0] = 1.f/w;
 	channel->scale[1] = 1.f/h;
-	channel->width  = pixmap->drawable.width;
+	channel->width  = w;
 	channel->height = h;
-	channel->bo = kgem_create_proxy(bo, box.y1 * bo->pitch, h * bo->pitch);
-	return channel->bo != NULL;
+	return 1;
 }
 
 int
@@ -927,8 +958,7 @@ sna_render_picture_extract(struct sna *sna,
 		return -1;
 	}
 
-	if (pixmap->drawable.width < sna->render.max_3d_size &&
-	    sna_render_picture_partial(sna, picture, channel,
+	if (sna_render_picture_partial(sna, picture, channel,
 				       x, y, w, h,
 				       dst_x, dst_y))
 		return 1;
@@ -1527,30 +1557,67 @@ sna_render_composite_redirect(struct sna *sna,
 		return FALSE;
 	}
 
-	if (op->dst.pixmap->drawable.width <= sna->render.max_3d_size) {
-		int y1, y2;
+	if (op->dst.bo->pitch <= sna->render.max_3d_pitch) {
+		int tile_width, tile_height, tile_size;
+		BoxRec box;
+		int w, h;
 
-		assert(op->dst.pixmap->drawable.height > sna->render.max_3d_size);
-		y1 =  y + op->dst.y;
-		y2 =  y1 + height;
-		y1 &= y1 & (64 - 1);
-		y2 = ALIGN(y2, 64);
+		kgem_get_tile_size(&sna->kgem, op->dst.bo->tiling,
+				   &tile_width, &tile_height, &tile_size);
+
+		box.x1 = x;
+		box.x2 = x + width;
+		box.y1 = y;
+		box.y2 = y + height;
+
+		/* Ensure we align to an even tile row */
+		box.y1 = box.y1 & ~(2*tile_height - 1);
+		box.y2 = ALIGN(box.y2, 2*tile_height);
+		if (box.y2 > op->dst.pixmap->drawable.height)
+			box.y2 = op->dst.pixmap->drawable.height;
+
+		box.x1 = box.x1 & ~(tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel - 1);
+		box.x2 = ALIGN(box.x2, tile_width * 8 / op->dst.pixmap->drawable.bitsPerPixel);
+		if (box.x2 > op->dst.pixmap->drawable.width)
+			box.x2 = op->dst.pixmap->drawable.width;
+
+		w = box.x2 - box.x1;
+		h = box.y2 - box.y1;
+		DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
+		     box.x1, box.y1, box.x2, box.y2, w, h,
+		     op->dst.pixmap->drawable.width,
+		     op->dst.pixmap->drawable.height));
+		if (w <= sna->render.max_3d_size &&
+		    h <= sna->render.max_3d_size) {
+			int offset;
 
-		if (y2 - y1 <= sna->render.max_3d_size) {
 			t->box.x2 = t->box.x1 = op->dst.x;
 			t->box.y2 = t->box.y1 = op->dst.y;
-			t->real_bo = priv->gpu_bo;
+			t->real_bo = op->dst.bo;
 			t->real_damage = op->damage;
 			if (op->damage) {
 				t->damage = sna_damage_create();
 				op->damage = &t->damage;
 			}
 
-			op->dst.bo = kgem_create_proxy(priv->gpu_bo,
-						       y1 * priv->gpu_bo->pitch,
-						       (y2 - y1) * priv->gpu_bo->pitch);
-			op->dst.y += -y1;
-			op->dst.height = y2 - y1;
+			/* How many tiles across are we? */
+			offset = box.x1 * op->dst.pixmap->drawable.bitsPerPixel / 8 / tile_width * tile_size;
+			op->dst.bo = kgem_create_proxy(op->dst.bo,
+						       box.y1 * op->dst.bo->pitch + offset,
+						       h * op->dst.bo->pitch);
+			if (!op->dst.bo) {
+				t->real_bo = NULL;
+				if (t->damage)
+					__sna_damage_destroy(t->damage);
+				return FALSE;
+			}
+
+			op->dst.bo->pitch = t->real_bo->pitch;
+
+			op->dst.x += -box.x1;
+			op->dst.y += -box.y1;
+			op->dst.width  = w;
+			op->dst.height = h;
 			return TRUE;
 		}
 	}
@@ -1583,7 +1650,7 @@ sna_render_composite_redirect(struct sna *sna,
 		return FALSE;
 	}
 
-	t->real_bo = priv->gpu_bo;
+	t->real_bo = op->dst.bo;
 	t->real_damage = op->damage;
 	if (op->damage) {
 		t->damage = sna_damage_create();
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index b23a8a7..5df53dc 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -186,6 +186,7 @@ struct sna_copy_op {
 
 struct sna_render {
 	int max_3d_size;
+	int max_3d_pitch;
 
 	Bool (*composite)(struct sna *sna, uint8_t op,
 			  PicturePtr dst, PicturePtr src, PicturePtr mask,
commit 65466f86263b3788b438fe021a12ade371190b01
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Sat Jan 28 01:54:47 2012 +0000

    sna: Allow ridiculously large bo, up to half the total GATT
    
    Such large bo place extreme stress on the system, for example trying to
    mmap a 1GiB into the CPU domain currently fails due to a kernel bug. :(
    So if you can avoid the swap thrashing during the upload, the ddx can now
    handle 16k x 16k images on gen4+ on the GPU. That is fine until you want
    two such images...
    
    The real complication comes in uploading (and downloading) from such
    large textures as they are too large for a single operation with
    automatic detiling via either the BLT or the RENDER ring. We could do
    manual tiling/switching or, as this patch does, tile the transfer in
    chunks small enough to fit into either pipeline.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/blt.c b/src/sna/blt.c
index fb3dd35..65d586c 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -154,6 +154,8 @@ memcpy_blt(const void *src, void *dst, int bpp,
 	uint8_t *dst_bytes;
 	int byte_width;
 
+	assert(src);
+	assert(dst);
 	assert(width && height);
 	assert(bpp >= 8);
 
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index d813d95..41e05d0 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -1193,6 +1193,7 @@ gen6_bind_bo(struct sna *sna,
 			       bo, domains, 0);
 	ss[2] = ((width - 1)  << GEN6_SURFACE_WIDTH_SHIFT |
 		 (height - 1) << GEN6_SURFACE_HEIGHT_SHIFT);
+	assert(bo->pitch <= (1 << 18));
 	ss[3] = (gen6_tiling_bits(bo->tiling) |
 		 (bo->pitch - 1) << GEN6_SURFACE_PITCH_SHIFT);
 	ss[4] = 0;
@@ -3136,10 +3137,19 @@ fallback:
 		if (!sna_blt_compare_depth(&src->drawable, &dst->drawable))
 			return false;
 
-		return sna_blt_copy_boxes_fallback(sna, alu,
-						   src, src_bo, src_dx, src_dy,
-						   dst, dst_bo, dst_dx, dst_dy,
-						   box, n);
+		if (sna_blt_copy_boxes_fallback(sna, alu,
+						 src, src_bo, src_dx, src_dy,
+						 dst, dst_bo, dst_dx, dst_dy,
+						 box, n))
+			return true;
+
+		return false;
+#if 0
+		return sna_tiling_copy_boxes(sna,
+					     src, src_bo, src_dx, src_dy,
+					     dst, dst_bo, dst_dx, dst_dy,
+					     box, n);
+#endif
 	}
 
 	if (dst->drawable.depth == src->drawable.depth) {
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 86a4372..208c8f2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -45,7 +45,7 @@
 #endif
 
 static struct kgem_bo *
-search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags);
+search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags);
 
 static inline void _list_del(struct list *list)
 {
@@ -99,7 +99,6 @@ static inline void list_replace(struct list *old,
 #define DBG(x) ErrorF x
 #endif
 
-#define PAGE_SIZE 4096
 #define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE)
 #define MAX_GTT_VMA_CACHE 512
 #define MAX_CPU_VMA_CACHE INT16_MAX
@@ -120,6 +119,14 @@ struct kgem_partial_bo {
 static struct kgem_bo *__kgem_freed_bo;
 static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
 
+static inline int bytes(struct kgem_bo *bo)
+{
+	return kgem_bo_size(bo);
+}
+
+#define bucket(B) (B)->size.pages.bucket
+#define num_pages(B) (B)->size.pages.count
+
 #ifndef NDEBUG
 static bool validate_partials(struct kgem *kgem)
 {
@@ -128,10 +135,10 @@ static bool validate_partials(struct kgem *kgem)
 	list_for_each_entry_safe(bo, next, &kgem->partial, base.list) {
 		if (bo->base.list.next == &kgem->partial)
 			return true;
-		if (bo->base.size - bo->used < next->base.size - next->used) {
+		if (bytes(&bo->base) - bo->used < bytes(&next->base) - next->used) {
 			ErrorF("this rem: %d, next rem: %d\n",
-			       bo->base.size - bo->used,
-			       next->base.size - next->used);
+			       bytes(&bo->base) - bo->used,
+			       bytes(&next->base) - next->used);
 			goto err;
 		}
 	}
@@ -140,7 +147,7 @@ static bool validate_partials(struct kgem *kgem)
 err:
 	list_for_each_entry(bo, &kgem->partial, base.list)
 		ErrorF("bo: used=%d / %d, rem=%d\n",
-		       bo->used, bo->base.size, bo->base.size - bo->used);
+		       bo->used, bytes(&bo->base), bytes(&bo->base) - bo->used);
 	return false;
 }
 #else
@@ -312,7 +319,7 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 	assert(!bo->purged);
 	assert(!kgem_busy(kgem, bo->handle));
 
-	assert(length <= bo->size);
+	assert(length <= bytes(bo));
 	if (gem_write(kgem->fd, bo->handle, 0, length, data))
 		return FALSE;
 
@@ -322,17 +329,13 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 	return TRUE;
 }
 
-static uint32_t gem_create(int fd, int size)
+static uint32_t gem_create(int fd, int num_pages)
 {
 	struct drm_i915_gem_create create;
 
-#if DEBUG_KGEM
-	assert((size & (PAGE_SIZE-1)) == 0);
-#endif
-
 	VG_CLEAR(create);
 	create.handle = 0;
-	create.size = size;
+	create.size = PAGE_SIZE * num_pages;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create);
 
 	return create.handle;
@@ -415,7 +418,7 @@ static void gem_close(int fd, uint32_t handle)
 	(void)drmIoctl(fd, DRM_IOCTL_GEM_CLOSE, &close);
 }
 
-static inline unsigned long __fls(unsigned long word)
+constant inline static unsigned long __fls(unsigned long word)
 {
 	asm("bsr %1,%0"
 	    : "=r" (word)
@@ -423,24 +426,21 @@ static inline unsigned long __fls(unsigned long word)
 	return word;
 }
 
-constant inline static int cache_bucket(int size)
+constant inline static int cache_bucket(int num_pages)
 {
-	uint32_t order = __fls(size / PAGE_SIZE);
-	assert(order < NUM_CACHE_BUCKETS);
-	return order;
+	return __fls(num_pages);
 }
 
 static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
-				      int handle, int size)
+				      int handle, int num_pages)
 {
-	assert(size);
+	assert(num_pages);
 	memset(bo, 0, sizeof(*bo));
 
 	bo->refcnt = 1;
 	bo->handle = handle;
-	bo->size = size;
-	bo->bucket = cache_bucket(size);
-	assert(bo->size < 1 << (12 + bo->bucket + 1));
+	num_pages(bo) = num_pages;
+	bucket(bo) = cache_bucket(num_pages);
 	bo->reusable = true;
 	bo->domain = DOMAIN_CPU;
 	list_init(&bo->request);
@@ -450,7 +450,7 @@ static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo,
 	return bo;
 }
 
-static struct kgem_bo *__kgem_bo_alloc(int handle, int size)
+static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages)
 {
 	struct kgem_bo *bo;
 
@@ -463,7 +463,7 @@ static struct kgem_bo *__kgem_bo_alloc(int handle, int size)
 			return NULL;
 	}
 
-	return __kgem_bo_init(bo, handle, size);
+	return __kgem_bo_init(bo, handle, num_pages);
 }
 
 static struct kgem_request _kgem_static_request;
@@ -481,14 +481,14 @@ static struct kgem_request *__kgem_request_alloc(void)
 	return rq;
 }
 
-static struct list *inactive(struct kgem *kgem, int size)
+static struct list *inactive(struct kgem *kgem, int num_pages)
 {
-	return &kgem->inactive[cache_bucket(size)];
+	return &kgem->inactive[cache_bucket(num_pages)];
 }
 
-static struct list *active(struct kgem *kgem, int size, int tiling)
+static struct list *active(struct kgem *kgem, int num_pages, int tiling)
 {
-	return &kgem->active[cache_bucket(size)][tiling];
+	return &kgem->active[cache_bucket(num_pages)][tiling];
 }
 
 static size_t
@@ -575,6 +575,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	list_init(&kgem->partial);
 	list_init(&kgem->requests);
 	list_init(&kgem->flushing);
+	list_init(&kgem->large);
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++)
 		list_init(&kgem->inactive[i]);
 	for (i = 0; i < ARRAY_SIZE(kgem->active); i++) {
@@ -658,11 +659,9 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		 * disable dual-stream mode */
 		kgem->min_alignment = 64;
 
+	kgem->max_object_size = kgem->aperture_total / 2;
 	kgem->max_cpu_size = kgem->aperture_total / 2;
-	if (kgem->max_cpu_size > MAX_OBJECT_SIZE)
-		kgem->max_cpu_size = MAX_OBJECT_SIZE;
-
-	kgem->max_gpu_size = -1;
+	kgem->max_gpu_size = MAX_CACHE_SIZE;
 	if (gen < 40) {
 		/* If we have to use fences for blitting, we have to make
 		 * sure we can fit them into the aperture.
@@ -677,6 +676,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	DBG(("%s: max object size (tiled=%d, linear=%d)\n",
 	     __FUNCTION__, kgem->max_gpu_size, kgem->max_cpu_size));
 
+	/* Convert the aperture thresholds to pages */
+	kgem->aperture_low /= PAGE_SIZE;
+	kgem->aperture_high /= PAGE_SIZE;
+
 	kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
 	if ((int)kgem->fence_max < 0)
 		kgem->fence_max = 5; /* minimum safe value for all hw */
@@ -811,7 +814,7 @@ kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo)
 	exec->handle = bo->handle;
 	exec->offset = bo->presumed_offset;
 
-	kgem->aperture += bo->size;
+	kgem->aperture += num_pages(bo);
 
 	return exec;
 }
@@ -875,7 +878,7 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 	     bo->handle, kgem->vma[type].count));
 
 	VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-	munmap(CPU_MAP(bo->map), bo->size);
+	munmap(CPU_MAP(bo->map), bytes(bo));
 	bo->map = NULL;
 
 	if (!list_is_empty(&bo->vma)) {
@@ -917,16 +920,22 @@ inline static void kgem_bo_move_to_inactive(struct kgem *kgem,
 	assert(bo->rq == NULL);
 	assert(bo->domain != DOMAIN_GPU);
 
-	list_move(&bo->list, &kgem->inactive[bo->bucket]);
+	if (bucket(bo) >= NUM_CACHE_BUCKETS) {
+		kgem_bo_free(kgem, bo);
+		return;
+	}
+
+	list_move(&bo->list, &kgem->inactive[bucket(bo)]);
 	if (bo->map) {
 		int type = IS_CPU_MAP(bo->map);
-		if (!type && !kgem_bo_is_mappable(kgem, bo)) {
+		if (bucket(bo) >= NUM_CACHE_BUCKETS ||
+		    (!type && !kgem_bo_is_mappable(kgem, bo))) {
 			list_del(&bo->vma);
-			munmap(CPU_MAP(bo->map), bo->size);
+			munmap(CPU_MAP(bo->map), bytes(bo));
 			bo->map = NULL;
 		}
 		if (bo->map) {
-			list_move(&bo->vma, &kgem->vma[type].inactive[bo->bucket]);
+			list_move(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]);
 			kgem->vma[type].count++;
 		}
 	}
@@ -1002,8 +1011,14 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	bo->scanout = bo->flush = false;
 	if (bo->rq) {
+		struct list *cache;
+
 		DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle));
-		list_add(&bo->list, &kgem->active[bo->bucket][bo->tiling]);
+		if (bucket(bo) < NUM_CACHE_BUCKETS)
+			cache = &kgem->active[bucket(bo)][bo->tiling];
+		else
+			cache = &kgem->large;
+		list_add(&bo->list, cache);
 		return;
 	}
 
@@ -1012,10 +1027,17 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 
 	if (bo->needs_flush) {
 		if ((bo->needs_flush = kgem_busy(kgem, bo->handle))) {
+			struct list *cache;
+
 			DBG(("%s: handle=%d -> flushing\n",
 			     __FUNCTION__, bo->handle));
+
 			list_add(&bo->request, &kgem->flushing);
-			list_add(&bo->list, &kgem->active[bo->bucket][bo->tiling]);
+			if (bucket(bo) < NUM_CACHE_BUCKETS)
+				cache = &kgem->active[bucket(bo)][bo->tiling];
+			else
+				cache = &kgem->large;
+			list_add(&bo->list, cache);
 			bo->rq = &_kgem_static_request;
 			return;
 		}
@@ -1231,7 +1253,7 @@ static void kgem_close_inactive(struct kgem *kgem)
 
 static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
 {
-	int remain = bo->base.size - bo->used;
+	int remain = bytes(&bo->base) - bo->used;
 
 	while (bo->base.list.prev != &kgem->partial) {
 		struct kgem_partial_bo *p;
@@ -1239,7 +1261,7 @@ static void bubble_sort_partial(struct kgem *kgem, struct kgem_partial_bo *bo)
 		p = list_entry(bo->base.list.prev,
 			       struct kgem_partial_bo,
 			       base.list);
-		if (remain <= p->base.size - p->used)
+		if (remain <= bytes(&p->base) - p->used)
 			break;
 
 		assert(p->base.list.next == &bo->base.list);
@@ -1282,7 +1304,7 @@ static void kgem_finish_partials(struct kgem *kgem)
 		assert(bo->base.rq == kgem->next_request);
 		if (bo->used && bo->need_io) {
 			if (bo->base.refcnt == 1 &&
-			    bo->used < bo->base.size / 2) {
+			    bo->used < bytes(&bo->base) / 2) {
 				struct kgem_bo *shrink;
 
 				shrink = search_linear_cache(kgem,
@@ -1293,10 +1315,10 @@ static void kgem_finish_partials(struct kgem *kgem)
 
 					DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
 					     __FUNCTION__,
-					     bo->used, bo->base.size, shrink->size,
+					     bo->used, bytes(&bo->base), bytes(shrink),
 					     bo->base.handle, shrink->handle));
 
-					assert(bo->used <= shrink->size);
+					assert(bo->used <= bytes(shrink));
 					gem_write(kgem->fd, shrink->handle,
 						  0, bo->used, bo->mem);
 
@@ -1330,9 +1352,9 @@ static void kgem_finish_partials(struct kgem *kgem)
 			}
 
 			DBG(("%s: handle=%d, uploading %d/%d\n",
-			     __FUNCTION__, bo->base.handle, bo->used, bo->base.size));
+			     __FUNCTION__, bo->base.handle, bo->used, bytes(&bo->base)));
 			assert(!kgem_busy(kgem, bo->base.handle));
-			assert(bo->used <= bo->base.size);
+			assert(bo->used <= bytes(&bo->base));
 			gem_write(kgem->fd, bo->base.handle,
 				  0, bo->used, bo->mem);
 			bo->need_io = 0;
@@ -1616,7 +1638,7 @@ void _kgem_submit(struct kgem *kgem)
 					       i,
 					       kgem->exec[i].handle,
 					       (int)kgem->exec[i].offset,
-					       found ? found->size : -1,
+					       found ? bytes(found) : -1,
 					       found ? found->tiling : -1,
 					       (int)(kgem->exec[i].flags & EXEC_OBJECT_NEEDS_FENCE),
 					       found ? found->purged : -1);
@@ -1690,7 +1712,7 @@ static void kgem_expire_partial(struct kgem *kgem)
 			continue;
 
 		DBG(("%s: discarding unused partial buffer: %d/%d, write? %d\n",
-		     __FUNCTION__, bo->used, bo->base.size, bo->write));
+		     __FUNCTION__, bo->used, bytes(&bo->base), bo->write));
 		list_del(&bo->base.list);
 		kgem_bo_unref(kgem, &bo->base);
 	}
@@ -1773,7 +1795,7 @@ bool kgem_expire_cache(struct kgem *kgem)
 				list_move_tail(&bo->list, &preserve);
 			} else {
 				count++;
-				size += bo->size;
+				size += bytes(bo);
 				kgem_bo_free(kgem, bo);
 				DBG(("%s: expiring %d\n",
 				     __FUNCTION__, bo->handle));
@@ -1834,28 +1856,31 @@ void kgem_cleanup_cache(struct kgem *kgem)
 }
 
 static struct kgem_bo *
-search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
+search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags)
 {
 	struct kgem_bo *bo, *first = NULL;
 	bool use_active = (flags & CREATE_INACTIVE) == 0;
 	struct list *cache;
 
+	if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE)
+		return NULL;
+
 	if (!use_active &&
-	    list_is_empty(inactive(kgem, size)) &&
-	    !list_is_empty(active(kgem, size, I915_TILING_NONE)) &&
+	    list_is_empty(inactive(kgem, num_pages)) &&
+	    !list_is_empty(active(kgem, num_pages, I915_TILING_NONE)) &&
 	    !kgem_retire(kgem))
 		return NULL;
 
 	if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
-		cache = &kgem->vma[for_cpu].inactive[cache_bucket(size)];
+		cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)];
 		list_for_each_entry(bo, cache, vma) {
 			assert(IS_CPU_MAP(bo->map) == for_cpu);
-			assert(bo->bucket == cache_bucket(size));
+			assert(bucket(bo) == cache_bucket(num_pages));
 
-			if (size > bo->size) {
+			if (num_pages > num_pages(bo)) {
 				DBG(("inactive too small: %d < %d\n",
-				     bo->size, size));
+				     num_pages(bo), num_pages));
 				continue;
 			}
 
@@ -1874,8 +1899,8 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
 			bo->tiling = I915_TILING_NONE;
 			bo->pitch = 0;
 			bo->delta = 0;
-			DBG(("  %s: found handle=%d (size=%d) in linear vma cache\n",
-			     __FUNCTION__, bo->handle, bo->size));
+			DBG(("  %s: found handle=%d (num_pages=%d) in linear vma cache\n",
+			     __FUNCTION__, bo->handle, num_pages(bo)));
 			assert(use_active || bo->domain != DOMAIN_GPU);
 			assert(!bo->needs_flush);
 			//assert(!kgem_busy(kgem, bo->handle));
@@ -1883,13 +1908,13 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
 		}
 	}
 
-	cache = use_active ? active(kgem, size, I915_TILING_NONE) : inactive(kgem, size);
+	cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages);
 	list_for_each_entry(bo, cache, list) {
 		assert(bo->refcnt == 0);
 		assert(bo->reusable);
 		assert(!!bo->rq == !!use_active);
 
-		if (size > bo->size)
+		if (num_pages > num_pages(bo))
 			continue;
 
 		if (use_active && bo->tiling != I915_TILING_NONE)
@@ -1946,8 +1971,8 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
 		assert(bo->tiling == I915_TILING_NONE);
 		bo->pitch = 0;
 		bo->delta = 0;
-		DBG(("  %s: found handle=%d (size=%d) in linear %s cache\n",
-		     __FUNCTION__, bo->handle, bo->size,
+		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
+		     __FUNCTION__, bo->handle, num_pages(bo),
 		     use_active ? "active" : "inactive"));
 		assert(use_active || bo->domain != DOMAIN_GPU);
 		assert(!bo->needs_flush || use_active);
@@ -1965,8 +1990,8 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
 
 		first->pitch = 0;
 		first->delta = 0;
-		DBG(("  %s: found handle=%d (size=%d) in linear %s cache\n",
-		     __FUNCTION__, first->handle, first->size,
+		DBG(("  %s: found handle=%d (num_pages=%d) in linear %s cache\n",
+		     __FUNCTION__, first->handle, num_pages(first),
 		     use_active ? "active" : "inactive"));
 		assert(use_active || first->domain != DOMAIN_GPU);
 		assert(!first->needs_flush || use_active);
@@ -1990,7 +2015,7 @@ struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name)
 		return NULL;
 
 	DBG(("%s: new handle=%d\n", __FUNCTION__, open_arg.handle));
-	bo = __kgem_bo_alloc(open_arg.handle, open_arg.size);
+	bo = __kgem_bo_alloc(open_arg.handle, open_arg.size / PAGE_SIZE);
 	if (bo == NULL) {
 		gem_close(kgem->fd, open_arg.handle);
 		return NULL;
@@ -2007,7 +2032,7 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
 
 	DBG(("%s(%d)\n", __FUNCTION__, size));
 
-	size = PAGE_ALIGN(size);
+	size = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 	bo = search_linear_cache(kgem, size, CREATE_INACTIVE);
 	if (bo)
 		return kgem_bo_reference(bo);
@@ -2019,7 +2044,7 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
 	DBG(("%s: new handle=%d\n", __FUNCTION__, handle));
 	bo = __kgem_bo_alloc(handle, size);
 	if (bo == NULL) {
-		gem_close(kgem->fd, size);
+		gem_close(kgem->fd, handle);
 		return NULL;
 	}
 
@@ -2028,8 +2053,6 @@ struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size)
 
 int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int bpp)
 {
-	uint32_t pitch;
-
 	if (DBG_NO_TILING)
 		return tiling < 0 ? tiling : I915_TILING_NONE;
 
@@ -2058,17 +2081,6 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 		}
 	}
 
-	/* First check that we can fence the whole object */
-	if (tiling &&
-	    kgem_surface_size(kgem, false, false,
-			      width, height, bpp, tiling,
-			      &pitch) > kgem->max_gpu_size) {
-		DBG(("%s: too large (%dx%d) to be fenced, discarding tiling\n",
-		     __FUNCTION__, width, height));
-		tiling = I915_TILING_NONE;
-		goto done;
-	}
-
 	if (tiling < 0)
 		return tiling;
 
@@ -2125,18 +2137,42 @@ done:
 	return tiling;
 }
 
-bool kgem_can_create_cpu(struct kgem *kgem,
+bool kgem_can_create_2d(struct kgem *kgem,
 			 int width, int height, int depth)
 {
+	int bpp = BitsPerPixel(depth);
 	uint32_t pitch, size;
 
 	if (depth < 8 || kgem->wedged)
 		return false;
 
 	size = kgem_surface_size(kgem, false, false,
-				 width, height, BitsPerPixel(depth),
+				 width, height, bpp,
+				 I915_TILING_X, &pitch);
+	if (size > 0 && size <= kgem->max_object_size)
+		return true;
+
+	size = kgem_surface_size(kgem, false, false,
+				 width, height, bpp,
 				 I915_TILING_NONE, &pitch);
-	return size > 0 && size < kgem->max_cpu_size;
+	if (size > 0 && size <= kgem->max_object_size)
+		return true;
+
+	return false;
+}
+
+bool kgem_can_create_cpu(struct kgem *kgem,
+			 int width, int height, int bpp)
+{
+	uint32_t pitch, size;
+
+	if (bpp < 8 || kgem->wedged)
+		return false;
+
+	size = kgem_surface_size(kgem, false, false,
+				 width, height, bpp,
+				 I915_TILING_NONE, &pitch);
+	return size > 0 && size <= kgem->max_cpu_size;
 }
 
 static bool _kgem_can_create_gpu(struct kgem *kgem,
@@ -2179,7 +2215,7 @@ inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo)
 		size = 512 * 1024;
 	else
 		size = 1024 * 1024;
-	while (size < bo->size)
+	while (size < bytes(bo))
 		size *= 2;
 
 	return size;
@@ -2213,10 +2249,52 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				 kgem->has_relaxed_fencing,
 				 flags & CREATE_SCANOUT,
 				 width, height, bpp, tiling, &pitch);
-	assert(size && size < kgem->max_cpu_size);
-	assert(tiling == I915_TILING_NONE || size < kgem->max_gpu_size);
+	assert(size && size <= kgem->max_object_size);
+	size /= PAGE_SIZE;
 	bucket = cache_bucket(size);
 
+	if (bucket >= NUM_CACHE_BUCKETS) {
+		DBG(("%s: large bo num pages=%d, bucket=%d\n",
+		     __FUNCTION__, size, bucket));
+
+		if (flags & CREATE_INACTIVE)
+			goto create;
+
+		tiled_height = kgem_aligned_height(kgem, height, I915_TILING_Y);
+		untiled_pitch = kgem_untiled_pitch(kgem,
+						   width, bpp,
+						   flags & CREATE_SCANOUT);
+
+		list_for_each_entry(bo, &kgem->large, list) {
+			assert(!bo->purged);
+			assert(bo->refcnt == 0);
+			assert(bo->reusable);
+
+			if (bo->tiling) {
+				if (bo->pitch < pitch) {
+					DBG(("tiled and pitch too small: tiling=%d, (want %d), pitch=%d, need %d\n",
+					     bo->tiling, tiling,
+					     bo->pitch, pitch));
+					continue;
+				}
+			} else
+				bo->pitch = untiled_pitch;
+
+			if (bo->pitch * tiled_height > bytes(bo))
+				continue;
+
+			kgem_bo_remove_from_active(kgem, bo);
+
+			bo->unique_id = kgem_get_unique_id(kgem);
+			bo->delta = 0;
+			DBG(("  1:from active: pitch=%d, tiling=%d, handle=%d, id=%d\n",
+			     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
+			return kgem_bo_reference(bo);
+		}
+
+		goto create;
+	}
+
 	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
 		if (kgem->has_llc && tiling == I915_TILING_NONE)
@@ -2227,16 +2305,16 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		cache = &kgem->vma[for_cpu].inactive[bucket];
 		do {
 			list_for_each_entry(bo, cache, vma) {
-				assert(bo->bucket == bucket);
+				assert(bucket(bo) == bucket);
 				assert(bo->refcnt == 0);
 				assert(bo->map);
 				assert(IS_CPU_MAP(bo->map) == for_cpu);
 				assert(bo->rq == NULL);
 				assert(list_is_empty(&bo->request));
 
-				if (size > bo->size) {
+				if (size > num_pages(bo)) {
 					DBG(("inactive too small: %d < %d\n",
-					     bo->size, size));
+					     num_pages(bo), size));
 					continue;
 				}
 
@@ -2275,13 +2353,14 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	if (retry > 3)
 		retry = 3;
 search_again:
+	assert(bucket < NUM_CACHE_BUCKETS);
 	cache = &kgem->active[bucket][tiling];
 	if (tiling) {
 		tiled_height = kgem_aligned_height(kgem, height, tiling);
 		list_for_each_entry(bo, cache, list) {
 			assert(!bo->purged);
 			assert(bo->refcnt == 0);
-			assert(bo->bucket == bucket);
+			assert(bucket(bo) == bucket);
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
 
@@ -2292,7 +2371,7 @@ search_again:
 				continue;
 			}
 
-			if (bo->pitch * tiled_height > bo->size)
+			if (bo->pitch * tiled_height > bytes(bo))
 				continue;
 
 			kgem_bo_remove_from_active(kgem, bo);
@@ -2305,13 +2384,13 @@ search_again:
 		}
 	} else {
 		list_for_each_entry(bo, cache, list) {
-			assert(bo->bucket == bucket);
+			assert(bucket(bo) == bucket);
 			assert(!bo->purged);
 			assert(bo->refcnt == 0);
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
 
-			if (bo->size < size)
+			if (num_pages(bo) < size)
 				continue;
 
 			kgem_bo_remove_from_active(kgem, bo);
@@ -2340,7 +2419,7 @@ search_again:
 							 kgem->has_relaxed_fencing,
 							 flags & CREATE_SCANOUT,
 							 width, height, bpp, tiling, &pitch);
-			cache = active(kgem, tiled_height, i);
+			cache = active(kgem, tiled_height / PAGE_SIZE, i);
 			tiled_height = kgem_aligned_height(kgem, height, i);
 			list_for_each_entry(bo, cache, list) {
 				assert(!bo->purged);
@@ -2357,7 +2436,7 @@ search_again:
 				} else
 					bo->pitch = untiled_pitch;
 
-				if (bo->pitch * tiled_height > bo->size)
+				if (bo->pitch * tiled_height > bytes(bo))
 					continue;
 
 				kgem_bo_remove_from_active(kgem, bo);
@@ -2378,13 +2457,14 @@ skip_active_search:
 		retry = 3;
 search_inactive:
 	/* Now just look for a close match and prefer any currently active */
+	assert(bucket < NUM_CACHE_BUCKETS);
 	cache = &kgem->inactive[bucket];
 	list_for_each_entry_safe(bo, next, cache, list) {
-		assert(bo->bucket == bucket);
+		assert(bucket(bo) == bucket);
 
-		if (size > bo->size) {
+		if (size > num_pages(bo)) {
 			DBG(("inactive too small: %d < %d\n",
-			     bo->size, size));
+			     num_pages(bo), size));
 			continue;
 		}
 
@@ -2439,6 +2519,7 @@ search_inactive:
 		goto search_inactive;
 	}
 
+create:
 	handle = gem_create(kgem->fd, size);
 	if (handle == 0)
 		return NULL;
@@ -2455,7 +2536,7 @@ search_inactive:
 	if (tiling != I915_TILING_NONE)
 		bo->tiling = gem_set_tiling(kgem->fd, handle, tiling, pitch);
 
-	assert(bo->size >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
+	assert(bytes(bo) >= bo->pitch * kgem_aligned_height(kgem, height, bo->tiling));
 
 	DBG(("  new pitch=%d, tiling=%d, handle=%d, id=%d\n",
 	     bo->pitch, bo->tiling, bo->handle, bo->unique_id));
@@ -2470,9 +2551,9 @@ static void _kgem_bo_delete_partial(struct kgem *kgem, struct kgem_bo *bo)
 		return;
 
 	DBG(("%s: size=%d, offset=%d, parent used=%d\n",
-	     __FUNCTION__, bo->size, bo->delta, io->used));
+	     __FUNCTION__, bo->size.bytes, bo->delta, io->used));
 
-	if (bo->delta + bo->size == io->used) {
+	if (bo->delta + bo->size.bytes == io->used) {
 		io->used = bo->delta;
 		bubble_sort_partial(kgem, io);
 	}
@@ -2508,25 +2589,30 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	va_list ap;
 	struct kgem_bo *bo;
 	int num_exec = 0;
-	int size = 0;
+	int num_pages = 0;
 
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		if (bo->exec)
 			continue;
 
-		size += bo->size;
+		if (bo->proxy) {
+			bo = bo->proxy;
+			if (bo->exec)
+				continue;
+		}
+		num_pages += num_pages(bo);
 		num_exec++;
 	}
 	va_end(ap);
 
-	if (!size)
+	if (!num_pages)
 		return true;
 
 	if (kgem->aperture > kgem->aperture_low)
 		return false;
 
-	if (size + kgem->aperture > kgem->aperture_high)
+	if (num_pages + kgem->aperture > kgem->aperture_high)
 		return false;
 
 	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
@@ -2541,11 +2627,13 @@ bool kgem_check_bo_fenced(struct kgem *kgem, ...)
 	struct kgem_bo *bo;
 	int num_fence = 0;
 	int num_exec = 0;
-	int size = 0;
+	int num_pages = 0;
 	int fenced_size = 0;
 
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
+		if (bo->proxy)
+			bo = bo->proxy;
 		if (bo->exec) {
 			if (kgem->gen >= 40 || bo->tiling == I915_TILING_NONE)
 				continue;
@@ -2558,7 +2646,7 @@ bool kgem_check_bo_fenced(struct kgem *kgem, ...)
 			continue;
 		}
 
-		size += bo->size;
+		num_pages += num_pages(bo);
 		num_exec++;
 		if (kgem->gen < 40 && bo->tiling) {
 			fenced_size += kgem_bo_fenced_size(kgem, bo);
@@ -2573,13 +2661,13 @@ bool kgem_check_bo_fenced(struct kgem *kgem, ...)
 	if (kgem->nfence + num_fence > kgem->fence_max)
 		return false;
 
-	if (!size)
+	if (!num_pages)
 		return true;
 
 	if (kgem->aperture > kgem->aperture_low)
 		return false;
 
-	if (size + kgem->aperture > kgem->aperture_high)
+	if (num_pages + kgem->aperture > kgem->aperture_high)
 		return false;
 
 	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
@@ -2698,7 +2786,7 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		assert(bo->rq == NULL);
 
 		VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0));
-		munmap(CPU_MAP(bo->map), bo->size);
+		munmap(CPU_MAP(bo->map), bytes(bo));
 		bo->map = NULL;
 		list_del(&bo->vma);
 		kgem->vma[type].count--;
@@ -2736,11 +2824,11 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 	ptr = bo->map;
 	if (ptr == NULL) {
-		assert(bo->size <= kgem->aperture_mappable / 4);
+		assert(bytes(bo) <= kgem->aperture_mappable / 4);
 
-		kgem_trim_vma_cache(kgem, MAP_GTT, bo->bucket);
+		kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
 
-		ptr = gem_mmap(kgem->fd, bo->handle, bo->size,
+		ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo),
 			       PROT_READ | PROT_WRITE);
 		if (ptr == NULL)
 			return NULL;
@@ -2780,8 +2868,8 @@ void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->map)
 		return bo->map;
 
-	kgem_trim_vma_cache(kgem, MAP_GTT, bo->bucket);
-	return bo->map = gem_mmap(kgem->fd, bo->handle, bo->size,
+	kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
+	return bo->map = gem_mmap(kgem->fd, bo->handle, bytes(bo),
 				  PROT_READ | PROT_WRITE);
 }
 
@@ -2789,7 +2877,7 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 {
 	struct drm_i915_gem_mmap mmap_arg;
 
-	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bo->size));
+	DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bytes(bo)));
 	assert(!bo->purged);
 	assert(list_is_empty(&bo->list));
 
@@ -2799,18 +2887,19 @@ void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo)
 	if (bo->map)
 		kgem_bo_release_map(kgem, bo);
 
-	kgem_trim_vma_cache(kgem, MAP_CPU, bo->bucket);
+	kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo));
 
 	VG_CLEAR(mmap_arg);
 	mmap_arg.handle = bo->handle;
 	mmap_arg.offset = 0;
-	mmap_arg.size = bo->size;
+	mmap_arg.size = bytes(bo);
 	if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) {
-		assert(0);
+		ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain\n",
+		       __FUNCTION__, bo->handle, bytes(bo));
 		return NULL;
 	}
 
-	VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, bo->size, 0, 1));
+	VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, bytes(bo), 0, 1));
 
 	DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle));
 	bo->map = MAKE_CPU_MAP(mmap_arg.addr_ptr);
@@ -2876,6 +2965,9 @@ struct kgem_bo *kgem_create_map(struct kgem *kgem,
 	if (!kgem->has_vmap)
 		return NULL;
 
+	if (size >= MAX_CACHE_SIZE)
+		return NULL;
+
 	handle = gem_vmap(kgem->fd, ptr, size, read_only);
 	if (handle == 0)
 		return NULL;
@@ -2972,6 +3064,7 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	if (bo == NULL)
 		return NULL;
 
+	bo->size.bytes = length;
 	bo->io = target->io;
 	bo->dirty = target->dirty;
 	bo->tiling = target->tiling;
@@ -2982,11 +3075,11 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	return bo;
 }
 
-static struct kgem_partial_bo *partial_bo_alloc(int size)
+static struct kgem_partial_bo *partial_bo_alloc(int num_pages)
 {
 	struct kgem_partial_bo *bo;
 
-	bo = malloc(sizeof(*bo) + 128 + size);
+	bo = malloc(sizeof(*bo) + 128 + num_pages * PAGE_SIZE);
 	if (bo) {
 		bo->mem = (void *)ALIGN((uintptr_t)bo + sizeof(*bo), 64);
 		bo->mmapped = false;
@@ -3010,20 +3103,20 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
 	/* we should never be asked to create anything TOO large */
-	assert(size < kgem->max_cpu_size);
+	assert(size <= kgem->max_cpu_size);
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
 		if (flags == KGEM_BUFFER_LAST && bo->write) {
 			/* We can reuse any write buffer which we can fit */
-			if (size <= bo->base.size) {
+			if (size <= bytes(&bo->base)) {
 				if (bo->base.refcnt == 1 && bo->base.exec) {
 					DBG(("%s: reusing write buffer for read of %d bytes? used=%d, total=%d\n",
-					     __FUNCTION__, size, bo->used, bo->base.size));
+					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 					offset = 0;
 					goto done;
-				} else if (bo->used + size <= bo->base.size) {
+				} else if (bo->used + size <= bytes(&bo->base)) {
 					DBG(("%s: reusing unfinished write buffer for read of %d bytes? used=%d, total=%d\n",
-					     __FUNCTION__, size, bo->used, bo->base.size));
+					     __FUNCTION__, size, bo->used, bytes(&bo->base)));
 					offset = bo->used;
 					goto done;
 				}
@@ -3037,24 +3130,25 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			continue;
 		}
 
-		if (bo->used + size <= bo->base.size) {
+		if (bo->used + size <= bytes(&bo->base)) {
 			DBG(("%s: reusing partial buffer? used=%d + size=%d, total=%d\n",
-			     __FUNCTION__, bo->used, size, bo->base.size));
+			     __FUNCTION__, bo->used, size, bytes(&bo->base)));
 			offset = bo->used;
 			bo->used += size;
 			goto done;
 		}
 
 		DBG(("%s: too small (%d < %d)\n",
-		     __FUNCTION__, bo->base.size - bo->used, size));
+		     __FUNCTION__, bytes(&bo->base) - bo->used, size));
 		break;
 	}
 
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
-	if (alloc >= kgem->max_cpu_size)
+	if (alloc > kgem->max_gpu_size)
 		alloc = PAGE_ALIGN(size);
+	alloc /= PAGE_SIZE;
 	if (kgem->has_cpu_bo) {
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
@@ -3098,7 +3192,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->base.io = true;
 			bo->mmapped = true;
 
-			alloc = bo->base.size;
+			alloc = num_pages(&bo->base);
 			goto init;
 		} else {
 			bo->base.refcnt = 0; /* for valgrind */
@@ -3107,7 +3201,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 
-	if (alloc > kgem->aperture_mappable / 4)
+	if (PAGE_SIZE * alloc > kgem->aperture_mappable / 4)
 		flags &= ~KGEM_BUFFER_INPLACE;
 
 	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) {
@@ -3164,7 +3258,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				bo->mmapped = true;
 				bo->base.refcnt = 1;
 
-				alloc = bo->base.size;
+				alloc = num_pages(&bo->base);
 				goto init;
 			} else {
 				kgem_bo_free(kgem, &bo->base);
@@ -3173,11 +3267,11 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 	}
 #else
-	alloc = ALIGN(size, 64*1024);
+	alloc = ALIGN(size, 64*1024) / PAGE_SIZE;
 #endif
 	/* Be more parsimonious with pwrite/pread buffers */
 	if ((flags & KGEM_BUFFER_INPLACE) == 0)
-		alloc = PAGE_ALIGN(size);
+		alloc = PAGE_ALIGN(size) / PAGE_SIZE;
 	flags &= ~KGEM_BUFFER_INPLACE;
 
 	old = NULL;
@@ -3188,7 +3282,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	if (old) {
 		DBG(("%s: reusing ordinary handle %d for io\n",
 		     __FUNCTION__, old->handle));
-		alloc = old->size;
+		alloc = num_pages(old);
 		bo = partial_bo_alloc(alloc);
 		if (bo == NULL)
 			return NULL;
@@ -3240,21 +3334,40 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		}
 
 		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-		if (bo->mem == NULL) {
-			kgem_bo_free(kgem, &bo->base);
+		if (bo->mem != NULL) {
+			if (flags & KGEM_BUFFER_WRITE)
+				kgem_bo_sync__cpu(kgem, &bo->base);
+
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->mmapped = true;
+			goto init;
+		}
+
+		DBG(("%s: failing back to new pwrite buffer\n", __FUNCTION__));
+		old = &bo->base;
+		bo = partial_bo_alloc(alloc);
+		if (bo == NULL) {
+			free(old);
 			return NULL;
 		}
 
-		if (flags & KGEM_BUFFER_WRITE)
-			kgem_bo_sync__cpu(kgem, &bo->base);
+		memcpy(&bo->base, old, sizeof(*old));
+		free(old);
+
+		assert(bo->mem);
+		assert(!bo->mmapped);
 
-		bo->need_io = false;
+		list_init(&bo->base.request);
+		list_init(&bo->base.vma);
+		list_init(&bo->base.list);
+		bo->base.refcnt = 1;
+		bo->need_io = flags & KGEM_BUFFER_WRITE;
 		bo->base.io = true;
-		bo->mmapped = true;
 	}
 init:
 	bo->base.reusable = false;
-	assert(bo->base.size == alloc);
+	assert(num_pages(&bo->base) == alloc);
 	assert(!bo->need_io || !bo->base.needs_flush);
 	assert(!bo->need_io || bo->base.domain != DOMAIN_GPU);
 
@@ -3263,12 +3376,12 @@ init:
 	offset = 0;
 
 	list_add(&bo->base.list, &kgem->partial);
-	DBG(("%s(size=%d) new handle=%d\n",
+	DBG(("%s(pages=%d) new handle=%d\n",
 	     __FUNCTION__, alloc, bo->base.handle));
 
 done:
 	/* adjust the position within the list to maintain decreasing order */
-	alloc = bo->base.size - bo->used;
+	alloc = bytes(&bo->base) - bo->used;
 	{
 		struct kgem_partial_bo *p, *first;
 
@@ -3276,9 +3389,9 @@ done:
 					     struct kgem_partial_bo,
 					     base.list);
 		while (&p->base.list != &kgem->partial &&
-		       alloc < p->base.size - p->used) {
+		       alloc < bytes(&p->base) - p->used) {
 			DBG(("%s: this=%d, right=%d\n",
-			     __FUNCTION__, alloc, p->base.size -p->used));
+			     __FUNCTION__, alloc, bytes(&p->base) -p->used));
 			p = list_first_entry(&p->base.list,
 					     struct kgem_partial_bo,
 					     base.list);
@@ -3287,6 +3400,7 @@ done:
 			list_move_tail(&bo->base.list, &p->base.list);
 		assert(validate_partials(kgem));
 	}
+	assert(bo->mem);
 	*ret = (char *)bo->mem + offset;
 	return kgem_create_proxy(&bo->base, offset, size);
 }
@@ -3300,6 +3414,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	int stride;
 
 	assert(width > 0 && height > 0);
+	assert(ret != NULL);
 	stride = ALIGN(width, 2) * bpp >> 3;
 	stride = ALIGN(stride, kgem->min_alignment);
 
@@ -3307,8 +3422,12 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 	     __FUNCTION__, width, height, bpp, stride));
 
 	bo = kgem_create_buffer(kgem, stride * ALIGN(height, 2), flags, ret);
-	if (bo == NULL)
+	if (bo == NULL) {
+		DBG(("%s: allocation failure for upload buffer\n",
+		     __FUNCTION__));
 		return NULL;
+	}
+	assert(*ret != NULL);
 
 	if (height & 1) {
 		struct kgem_partial_bo *io = (struct kgem_partial_bo *)bo->proxy;
@@ -3319,7 +3438,7 @@ struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
 		 */
 		if (io->used)
 			io->used -= stride;
-		bo->size -= stride;
+		bo->size.bytes -= stride;
 		bubble_sort_partial(kgem, io);
 	}
 
@@ -3357,8 +3476,9 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
 	struct kgem_partial_bo *bo;
-	uint32_t offset = _bo->delta, length = _bo->size;
+	uint32_t offset = _bo->delta, length = _bo->size.bytes;
 
+	assert(_bo->io);
 	assert(_bo->exec == NULL);
 	if (_bo->proxy)
 		_bo = _bo->proxy;
@@ -3461,7 +3581,7 @@ kgem_replace_bo(struct kgem *kgem,
 	assert(src->tiling == I915_TILING_NONE);
 
 	size = height * pitch;
-	size = PAGE_ALIGN(size);
+	size = PAGE_ALIGN(size) / PAGE_SIZE;
 
 	dst = search_linear_cache(kgem, size, 0);
 	if (dst == NULL)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0dc67da..2631e81 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -66,10 +66,16 @@ struct kgem_bo {
 	uint32_t handle;
 	uint32_t presumed_offset;
 	uint32_t delta;
-	uint32_t size:28;
-	uint32_t bucket:4;
-#define MAX_OBJECT_SIZE (1 << 28)
-
+	union {
+		struct {
+			uint32_t count:27;
+#define PAGE_SIZE 4096
+			uint32_t bucket:5;
+#define NUM_CACHE_BUCKETS 16
+#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
+		} pages;
+		uint32_t bytes;
+	} size;
 	uint32_t pitch : 18; /* max 128k */
 	uint32_t tiling : 2;
 	uint32_t reusable : 1;
@@ -100,8 +106,6 @@ enum {
 	NUM_MAP_TYPES,
 };
 
-#define NUM_CACHE_BUCKETS 16
-
 struct kgem {
 	int fd;
 	int wedged;
@@ -117,7 +121,10 @@ struct kgem {
 		KGEM_BLT,
 	} mode, ring;
 
-	struct list flushing, active[NUM_CACHE_BUCKETS][3], inactive[NUM_CACHE_BUCKETS];
+	struct list flushing;
+	struct list large;
+	struct list active[NUM_CACHE_BUCKETS][3];
+	struct list inactive[NUM_CACHE_BUCKETS];
 	struct list partial;
 	struct list requests;
 	struct kgem_request *next_request;
@@ -154,7 +161,7 @@ struct kgem {
 	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
 	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_gpu_size, max_cpu_size;
+	uint32_t max_gpu_size, max_cpu_size, max_object_size;
 	uint32_t partial_buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
@@ -194,8 +201,9 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
+bool kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
 bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp);
-bool kgem_can_create_cpu(struct kgem *kgem, int width, int height, int depth);
+bool kgem_can_create_cpu(struct kgem *kgem, int width, int height, int bpp);
 
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
@@ -354,11 +362,46 @@ Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
 
 int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
 
+static inline int kgem_bo_size(struct kgem_bo *bo)
+{
+	assert(!(bo->proxy && bo->io));
+	return PAGE_SIZE * bo->size.pages.count;
+}
+
+static inline int kgem_buffer_size(struct kgem_bo *bo)
+{
+	assert(bo->proxy && bo->io);
+	return bo->size.bytes;
+}
+
+static inline bool kgem_bo_can_blt(struct kgem *kgem,
+				   struct kgem_bo *bo)
+{
+	int pitch;
+
+	if (bo->tiling == I915_TILING_Y) {
+		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
+		     __FUNCTION__, bo->handle));
+		return false;
+	}
+
+	pitch = bo->pitch;
+	if (kgem->gen >= 40 && bo->tiling)
+		pitch /= 4;
+	if (pitch > MAXSHORT) {
+		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
+		     __FUNCTION__, pitch));
+		return false;
+	}
+
+	return true;
+}
+
 static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 				       struct kgem_bo *bo)
 {
 	DBG_HDR(("%s: domain=%d, offset: %d size: %d\n",
-		 __FUNCTION__, bo->domain, bo->presumed_offset, bo->size));
+		 __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
 
 	if (bo->domain == DOMAIN_GTT)
 		return true;
@@ -371,9 +414,9 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 		return false;
 
 	if (!bo->presumed_offset)
-		return bo->size <= kgem->aperture_mappable / 4;
+		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
 
-	return bo->presumed_offset + bo->size <= kgem->aperture_mappable;
+	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
 }
 
 static inline bool kgem_bo_mapped(struct kgem_bo *bo)
diff --git a/src/sna/kgem_debug_gen5.c b/src/sna/kgem_debug_gen5.c
index f21220f..9e7360a 100644
--- a/src/sna/kgem_debug_gen5.c
+++ b/src/sna/kgem_debug_gen5.c
@@ -79,7 +79,7 @@ static void gen5_update_vertex_buffer(struct kgem *kgem, const uint32_t *data)
 	} else {
 		bo = kgem_debug_get_bo_for_reloc_entry(kgem, reloc);
 		base = kgem_bo_map__debug(kgem, bo);
-		size = bo->size;
+		size = kgem_bo_size(bo);
 	}
 	ptr = (char *)base + reloc->delta;
 
diff --git a/src/sna/sna.h b/src/sna/sna.h
index 5910daf..d9ba773 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -92,7 +92,6 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 #define DEBUG_NO_RENDER 0
 #define DEBUG_NO_BLT 0
-#define DEBUG_NO_IO 0
 
 #define DEBUG_FLUSH_BATCH 0
 #define DEBUG_FLUSH_SYNC 0
@@ -647,7 +646,7 @@ void sna_read_boxes(struct sna *sna,
 		    struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
 		    PixmapPtr dst, int16_t dst_dx, int16_t dst_dy,
 		    const BoxRec *box, int n);
-void sna_write_boxes(struct sna *sna, PixmapPtr dst,
+bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 		     struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 		     const void *src, int stride, int16_t src_dx, int16_t src_dy,
 		     const BoxRec *box, int n);
@@ -657,10 +656,10 @@ void sna_write_boxes__xor(struct sna *sna, PixmapPtr dst,
 			  const BoxRec *box, int nbox,
 			  uint32_t and, uint32_t or);
 
-struct kgem_bo *sna_replace(struct sna *sna,
-			    PixmapPtr pixmap,
-			    struct kgem_bo *bo,
-			    const void *src, int stride);
+bool sna_replace(struct sna *sna,
+		 PixmapPtr pixmap,
+		 struct kgem_bo **bo,
+		 const void *src, int stride);
 struct kgem_bo *sna_replace__xor(struct sna *sna,
 				 PixmapPtr pixmap,
 				 struct kgem_bo *bo,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f2997d0..ce35113 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -243,9 +243,14 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 	if (priv->ptr)
 		goto done;
 
+	DBG(("%s: pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber));
 	assert(priv->stride);
 
-	if (sna->kgem.has_cpu_bo || !priv->gpu) {
+	if ((sna->kgem.has_cpu_bo || !priv->gpu) &&
+	    kgem_can_create_cpu(&sna->kgem,
+				pixmap->drawable.width,
+				pixmap->drawable.height,
+				pixmap->drawable.bitsPerPixel)) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
@@ -270,8 +275,11 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 		}
 	}
 
-	if (priv->ptr == NULL)
+	if (priv->ptr == NULL) {
+		DBG(("%s: allocating ordinary memory for shadow pixels [%d bytes]\n",
+		     __FUNCTION__, priv->stride * pixmap->drawable.height));
 		priv->ptr = malloc(priv->stride * pixmap->drawable.height);
+	}
 
 	assert(priv->ptr);
 done:
@@ -289,7 +297,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
 
 	if (priv->cpu_bo) {
 		DBG(("%s: discarding CPU buffer, handle=%d, size=%d\n",
-		     __FUNCTION__, priv->cpu_bo->handle, priv->cpu_bo->size));
+		     __FUNCTION__, priv->cpu_bo->handle, kgem_bo_size(priv->cpu_bo)));
 
 		kgem_bo_destroy(&sna->kgem, priv->cpu_bo);
 		priv->cpu_bo = NULL;
@@ -515,10 +523,10 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 		break;
 
 	default:
-		if (!kgem_can_create_gpu(&sna->kgem,
-					 pixmap->drawable.width,
-					 pixmap->drawable.height,
-					 pixmap->drawable.bitsPerPixel))
+		if (!kgem_can_create_2d(&sna->kgem,
+					pixmap->drawable.width,
+					pixmap->drawable.height,
+					pixmap->drawable.depth))
 			return NULL;
 		break;
 	}
@@ -669,8 +677,11 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
-	if (!kgem_can_create_cpu(&sna->kgem, width, height, depth))
+	if (!kgem_can_create_2d(&sna->kgem, width, height, depth)) {
+		DBG(("%s: can not use GPU, just creating shadow\n",
+		     __FUNCTION__));
 		return create_pixmap(sna, screen, width, height, depth, usage);
+	}
 
 	if (!sna->have_render)
 		return create_pixmap(sna, screen,
@@ -704,6 +715,8 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 
 	pad = PixmapBytePad(width, depth);
 	if (pad * height <= 4096) {
+		DBG(("%s: small buffer [%d], attaching to shadow pixmap\n",
+		     __FUNCTION__, pad * height));
 		pixmap = create_pixmap(sna, screen,
 				       width, height, depth, usage);
 		if (pixmap == NullPixmap)
@@ -713,6 +726,9 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	} else {
 		struct sna_pixmap *priv;
 
+		DBG(("%s: creating GPU pixmap %dx%d, stride=%d\n",
+		     __FUNCTION__, width, height, pad));
+
 		pixmap = create_pixmap(sna, screen, 0, 0, depth, usage);
 		if (pixmap == NullPixmap)
 			return NullPixmap;
@@ -1609,19 +1625,20 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 				    box->x1 <= 0 && box->y1 <= 0 &&
 				    box->x2 >= pixmap->drawable.width &&
 				    box->y2 >= pixmap->drawable.height) {
-					priv->gpu_bo =
-						sna_replace(sna, pixmap,
-							    priv->gpu_bo,
-							    pixmap->devPrivate.ptr,
-							    pixmap->devKind);
+					ok = sna_replace(sna, pixmap,
+							 &priv->gpu_bo,
+							 pixmap->devPrivate.ptr,
+							 pixmap->devKind);
 				} else {
-					sna_write_boxes(sna, pixmap,
-							priv->gpu_bo, 0, 0,
-							pixmap->devPrivate.ptr,
-							pixmap->devKind,
-							0, 0,
-							box, n);
+					ok = sna_write_boxes(sna, pixmap,
+							     priv->gpu_bo, 0, 0,
+							     pixmap->devPrivate.ptr,
+							     pixmap->devKind,
+							     0, 0,
+							     box, n);
 				}
+				if (!ok)
+					return false;
 			}
 		}
 
@@ -1637,12 +1654,14 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, 1);
 		if (!ok)
-			sna_write_boxes(sna, pixmap,
-					priv->gpu_bo, 0, 0,
-					pixmap->devPrivate.ptr,
-					pixmap->devKind,
-					0, 0,
-					box, 1);
+			ok = sna_write_boxes(sna, pixmap,
+					     priv->gpu_bo, 0, 0,
+					     pixmap->devPrivate.ptr,
+					     pixmap->devKind,
+					     0, 0,
+					     box, 1);
+		if (!ok)
+			return false;
 
 		sna_damage_subtract(&priv->cpu_damage, &r);
 		priv->undamaged = true;
@@ -1658,12 +1677,14 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 						    pixmap, priv->gpu_bo, 0, 0,
 						    box, n);
 		if (!ok)
-			sna_write_boxes(sna, pixmap,
-					priv->gpu_bo, 0, 0,
-					pixmap->devPrivate.ptr,
-					pixmap->devKind,
-					0, 0,
-					box, n);
+			ok = sna_write_boxes(sna, pixmap,
+					     priv->gpu_bo, 0, 0,
+					     pixmap->devPrivate.ptr,
+					     pixmap->devKind,
+					     0, 0,
+					     box, n);
+		if (!ok)
+			return false;
 
 		sna_damage_subtract(&priv->cpu_damage, &r);
 		priv->undamaged = true;
@@ -1671,7 +1692,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, BoxPtr box, unsigned int flags)
 	}
 
 done:
-	if (!priv->pinned)
+	if (!priv->pinned && priv->gpu)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return true;
@@ -1811,7 +1832,7 @@ done:
 
 use_gpu_bo:
 	priv->clear = false;
-	if (!priv->pinned)
+	if (!priv->pinned && priv->gpu)
 		list_move(&priv->inactive,
 			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
 	*damage = NULL;
@@ -1978,6 +1999,17 @@ sna_pixmap_force_to_gpu(PixmapPtr pixmap, unsigned flags)
 	if (!sna_pixmap_move_to_gpu(pixmap, flags))
 		return NULL;
 
+	/* For large bo, try to keep only a single copy around */
+	if (!priv->gpu && priv->ptr) {
+		sna_damage_all(&priv->gpu_damage,
+			       pixmap->drawable.width,
+			       pixmap->drawable.height);
+		sna_damage_destroy(&priv->cpu_damage);
+		priv->undamaged = false;
+		list_del(&priv->list);
+		sna_pixmap_free_cpu(to_sna_from_pixmap(pixmap), priv);
+	}
+
 	return priv;
 }
 
@@ -2070,19 +2102,20 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 			if (n == 1 && !priv->pinned &&
 			    (box->x2 - box->x1) >= pixmap->drawable.width &&
 			    (box->y2 - box->y1) >= pixmap->drawable.height) {
-				priv->gpu_bo =
-					sna_replace(sna, pixmap,
-						    priv->gpu_bo,
-						    pixmap->devPrivate.ptr,
-						    pixmap->devKind);
+				ok = sna_replace(sna, pixmap,
+						 &priv->gpu_bo,
+						 pixmap->devPrivate.ptr,
+						 pixmap->devKind);
 			} else {
-				sna_write_boxes(sna, pixmap,
+				ok = sna_write_boxes(sna, pixmap,
 						priv->gpu_bo, 0, 0,
 						pixmap->devPrivate.ptr,
 						pixmap->devKind,
 						0, 0,
 						box, n);
 			}
+			if (!ok)
+				return NULL;
 		}
 	}
 
@@ -2098,7 +2131,7 @@ done:
 	if (DAMAGE_IS_ALL(priv->gpu_damage))
 		priv->undamaged = false;
 active:
-	if (!priv->pinned)
+	if (!priv->pinned && priv->gpu)
 		list_move(&priv->inactive, &sna->active_pixmaps);
 	priv->clear = false;
 	return priv;
@@ -2321,11 +2354,8 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	    !priv->pinned && nbox == 1 &&
 	    box->x1 <= 0 && box->y1 <= 0 &&
 	    box->x2 >= pixmap->drawable.width &&
-	    box->y2 >= pixmap->drawable.height) {
-		priv->gpu_bo =
-			sna_replace(sna, pixmap, priv->gpu_bo, bits, stride);
-		return TRUE;
-	}
+	    box->y2 >= pixmap->drawable.height)
+		return sna_replace(sna, pixmap, &priv->gpu_bo, bits, stride);
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
 	x += dx + drawable->x;
@@ -2341,15 +2371,13 @@ sna_put_image_upload_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		kgem_bo_destroy(&sna->kgem, src_bo);
 	}
 
-	if (!ok && gc->alu == GXcopy) {
-		sna_write_boxes(sna, pixmap,
-				priv->gpu_bo, 0, 0,
-				bits,
-				stride,
-				-x, -y,
-				box, nbox);
-		ok = TRUE;
-	}
+	if (!ok && gc->alu == GXcopy)
+		ok = sna_write_boxes(sna, pixmap,
+				     priv->gpu_bo, 0, 0,
+				     bits,
+				     stride,
+				     -x, -y,
+				     box, nbox);
 
 	return ok;
 }
@@ -3213,7 +3241,7 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			}
 		} else {
 			dst_priv->clear = false;
-			if (!dst_priv->pinned)
+			if (!dst_priv->pinned && dst_priv->gpu)
 				list_move(&dst_priv->inactive,
 					  &sna->active_pixmaps);
 		}
@@ -3400,10 +3428,10 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 				assert(src_dy + box->y1 + dst_pixmap->drawable.height <= src_pixmap->drawable.height);
 				assert(src_dx + box->x1 + dst_pixmap->drawable.width <= src_pixmap->drawable.width);
 
-				dst_priv->gpu_bo =
-					sna_replace(sna, dst_pixmap,
-						    dst_priv->gpu_bo,
-						    bits, stride);
+				if (!sna_replace(sna, dst_pixmap,
+						 &dst_priv->gpu_bo,
+						 bits, stride))
+					goto fallback;
 
 				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 					sna_damage_destroy(&dst_priv->cpu_damage);
@@ -3416,12 +3444,13 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			} else {
 				DBG(("%s: dst is on the GPU, src is on the CPU, uploading into dst\n",
 				     __FUNCTION__));
-				sna_write_boxes(sna, dst_pixmap,
-						dst_priv->gpu_bo, dst_dx, dst_dy,
-						src_pixmap->devPrivate.ptr,
-						src_pixmap->devKind,
-						src_dx, src_dy,
-						box, n);
+				if (!sna_write_boxes(sna, dst_pixmap,
+						     dst_priv->gpu_bo, dst_dx, dst_dy,
+						     src_pixmap->devPrivate.ptr,
+						     src_pixmap->devKind,
+						     src_dx, src_dy,
+						     box, n))
+					goto fallback;
 
 				if (!DAMAGE_IS_ALL(dst_priv->gpu_damage)) {
 					RegionTranslate(&region, dst_dx, dst_dy);
@@ -11502,7 +11531,7 @@ static void sna_accel_inactive(struct sna *sna)
 		count = bytes = 0;
 		list_for_each_entry(priv, &sna->inactive_clock[1], inactive)
 			if (!priv->pinned)
-				count++, bytes += priv->gpu_bo->size;
+				count++, bytes += kgem_bo_size(priv->gpu_bo);
 
 		DBG(("%s: trimming %d inactive GPU buffers, %d bytes\n",
 		    __FUNCTION__, count, bytes));
@@ -11528,6 +11557,9 @@ static void sna_accel_inactive(struct sna *sna)
 		priv = list_first_entry(&sna->inactive_clock[1],
 					struct sna_pixmap,
 					inactive);
+		assert(priv->gpu);
+		assert(priv->gpu_bo);
+
 		/* XXX Rather than discarding the GPU buffer here, we
 		 * could mark it purgeable and allow the shrinker to
 		 * reap its storage only under memory pressure.
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 535628c..7efbcf9 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -235,7 +235,7 @@ inline static void sna_blt_fill_one(struct sna *sna,
 
 	assert(x >= 0);
 	assert(y >= 0);
-	assert((y+height) * blt->bo[0]->pitch <= blt->bo[0]->size);
+	assert((y+height) * blt->bo[0]->pitch <= kgem_bo_size(blt->bo[0]));
 
 	if (!kgem_check_batch(kgem, 3))
 		sna_blt_fill_begin(sna, blt);
@@ -358,10 +358,10 @@ static void sna_blt_alpha_fixup_one(struct sna *sna,
 
 	assert(src_x >= 0);
 	assert(src_y >= 0);
-	assert((src_y + height) * blt->bo[0]->pitch <= blt->bo[0]->size);
+	assert((src_y + height) * blt->bo[0]->pitch <= kgem_bo_size(blt->bo[0]));
 	assert(dst_x >= 0);
 	assert(dst_y >= 0);
-	assert((dst_y + height) * blt->bo[1]->pitch <= blt->bo[1]->size);
+	assert((dst_y + height) * blt->bo[1]->pitch <= kgem_bo_size(blt->bo[1]));
 	assert(width > 0);
 	assert(height > 0);
 
@@ -409,10 +409,10 @@ static void sna_blt_copy_one(struct sna *sna,
 
 	assert(src_x >= 0);
 	assert(src_y >= 0);
-	assert((src_y + height) * blt->bo[0]->pitch <= blt->bo[0]->size);
+	assert((src_y + height) * blt->bo[0]->pitch <= kgem_bo_size(blt->bo[0]));
 	assert(dst_x >= 0);
 	assert(dst_y >= 0);
-	assert((dst_y + height) * blt->bo[1]->pitch <= blt->bo[1]->size);
+	assert((dst_y + height) * blt->bo[1]->pitch <= kgem_bo_size(blt->bo[1]));
 	assert(width > 0);
 	assert(height > 0);
 
@@ -787,7 +787,7 @@ inline static void _sna_blt_fill_box(struct sna *sna,
 
 	assert(box->x1 >= 0);
 	assert(box->y1 >= 0);
-	assert(box->y2 * blt->bo[0]->pitch <= blt->bo[0]->size);
+	assert(box->y2 * blt->bo[0]->pitch <= kgem_bo_size(blt->bo[0]));
 
 	if (!kgem_check_batch(kgem, 3))
 		sna_blt_fill_begin(sna, blt);
@@ -1106,7 +1106,7 @@ prepare_blt_copy(struct sna *sna,
 	PixmapPtr src = op->u.blt.src_pixmap;
 	struct sna_pixmap *priv = sna_pixmap(src);
 
-	if (priv->gpu_bo->tiling == I915_TILING_Y)
+	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo))
 		return FALSE;
 
 	if (!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL)) {
@@ -1176,9 +1176,8 @@ blt_put_composite(struct sna *sna,
 		data += (src_x - dst_x) * bpp / 8;
 		data += (src_y - dst_y) * pitch;
 
-		dst_priv->gpu_bo =
-			sna_replace(sna, op->dst.pixmap, dst_priv->gpu_bo,
-				    data, pitch);
+		sna_replace(sna, op->dst.pixmap, &dst_priv->gpu_bo,
+			    data, pitch);
 	} else {
 		BoxRec box;
 
@@ -1215,9 +1214,8 @@ fastcall static void blt_put_composite_box(struct sna *sna,
 		data += (box->y1 + op->u.blt.sy) * pitch;
 		data += (box->x1 + op->u.blt.sx) * bpp;
 
-		dst_priv->gpu_bo =
-			sna_replace(sna, op->dst.pixmap, op->dst.bo,
-				    data, pitch);
+		sna_replace(sna, op->dst.pixmap, &dst_priv->gpu_bo,
+			    data, pitch);
 	} else {
 		sna_write_boxes(sna, op->dst.pixmap,
 				op->dst.bo, op->dst.x, op->dst.y,
@@ -1250,9 +1248,8 @@ static void blt_put_composite_boxes(struct sna *sna,
 		data += (box->y1 + op->u.blt.sy) * pitch;
 		data += (box->x1 + op->u.blt.sx) * bpp;
 
-		dst_priv->gpu_bo =
-			sna_replace(sna, op->dst.pixmap, op->dst.bo,
-				    data, pitch);
+		sna_replace(sna, op->dst.pixmap, &dst_priv->gpu_bo,
+			    data, pitch);
 	} else {
 		sna_write_boxes(sna, op->dst.pixmap,
 				op->dst.bo, op->dst.x, op->dst.y,
@@ -1573,9 +1570,13 @@ sna_blt_composite(struct sna *sna,
 
 	tmp->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
 	priv = sna_pixmap_move_to_gpu(tmp->dst.pixmap, MOVE_WRITE | MOVE_READ);
-	if (priv == NULL || priv->gpu_bo->tiling == I915_TILING_Y) {
-		DBG(("%s: dst not on the gpu or using Y-tiling\n",
-		     __FUNCTION__));
+	if (priv == NULL) {
+		DBG(("%s: dst not attached\n", __FUNCTION__));
+		return FALSE;
+	}
+	if (!kgem_bo_can_blt(&sna->kgem, priv->gpu_bo)) {
+		DBG(("%s: can not blit to dst, tiling? %d, pitch? %d\n",
+		     __FUNCTION__, priv->gpu_bo->tiling, priv->gpu_bo->pitch));
 		return FALSE;
 	}
 
@@ -1747,7 +1748,7 @@ bool sna_blt_fill(struct sna *sna, uint8_t alu,
 
 	DBG(("%s(alu=%d, pixel=%x, bpp=%d)\n", __FUNCTION__, alu, pixel, bpp));
 
-	if (bo->tiling == I915_TILING_Y) {
+	if (!kgem_bo_can_blt(&sna->kgem, bo)) {
 		DBG(("%s: rejected due to incompatible Y-tiling\n",
 		     __FUNCTION__));
 		return FALSE;
@@ -1797,10 +1798,10 @@ bool sna_blt_copy(struct sna *sna, uint8_t alu,
 	return FALSE;
 #endif
 
-	if (src->tiling == I915_TILING_Y)
+	if (!kgem_bo_can_blt(&sna->kgem, src))
 		return FALSE;
 
-	if (dst->tiling == I915_TILING_Y)
+	if (!kgem_bo_can_blt(&sna->kgem, dst))
 		return FALSE;
 
 	if (!sna_blt_copy_init(sna, &op->base.u.blt,
@@ -1926,7 +1927,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 	DBG(("%s (%d, %08x, %d) x %d\n",
 	     __FUNCTION__, bpp, pixel, alu, nbox));
 
-	if (bo->tiling == I915_TILING_Y) {
+	if (!kgem_bo_can_blt(kgem, bo)) {
 		DBG(("%s: fallback -- dst uses Y-tiling\n", __FUNCTION__));
 		return FALSE;
 	}
@@ -2020,7 +2021,7 @@ Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
 
 			assert(box->x1 >= 0);
 			assert(box->y1 >= 0);
-			assert(box->y2 * bo->pitch <= bo->size);
+			assert(box->y2 * bo->pitch <= kgem_bo_size(bo));
 
 			b = kgem->batch + kgem->nbatch;
 			kgem->nbatch += 3;
@@ -2075,8 +2076,13 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 	    src_bo->tiling, dst_bo->tiling,
 	    src_bo->pitch, dst_bo->pitch));
 
-	if (src_bo->tiling == I915_TILING_Y || dst_bo->tiling == I915_TILING_Y)
+	if (!kgem_bo_can_blt(kgem, src_bo) || !kgem_bo_can_blt(kgem, dst_bo)) {
+		DBG(("%s: cannot blt to src? %d or dst? %d\n",
+		     __FUNCTION__,
+		     kgem_bo_can_blt(kgem, src_bo),
+		     kgem_bo_can_blt(kgem, dst_bo)));
 		return FALSE;
+	}
 
 	cmd = XY_SRC_COPY_BLT_CMD;
 	if (bpp == 32)
@@ -2087,7 +2093,7 @@ Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
 		cmd |= BLT_SRC_TILED;
 		src_pitch >>= 2;
 	}
-	assert(src_pitch < MAXSHORT);
+	assert(src_pitch <= MAXSHORT);
 
 	br13 = dst_bo->pitch;
 	if (kgem->gen >= 40 && dst_bo->tiling) {
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index f3ca212..14a7901 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -44,6 +44,27 @@
 
 /* XXX Need to avoid using GTT fenced access for I915_TILING_Y on 855GM */
 
+static Bool
+box_intersect(BoxPtr a, const BoxRec *b)
+{
+	if (a->x1 < b->x1)
+		a->x1 = b->x1;
+	if (a->x2 > b->x2)
+		a->x2 = b->x2;
+	if (a->y1 < b->y1)
+		a->y1 = b->y1;
+	if (a->y2 > b->y2)
+		a->y2 = b->y2;
+
+	return a->x1 < a->x2 && a->y1 < a->y2;
+}
+
+static inline bool must_tile(struct sna *sna, int width, int height)
+{
+	return (width  > sna->render.max_3d_size ||
+		height > sna->render.max_3d_size);
+}
+
 static void read_boxes_inplace(struct kgem *kgem,
 			       struct kgem_bo *bo, int16_t src_dx, int16_t src_dy,
 			       PixmapPtr pixmap, int16_t dst_dx, int16_t dst_dy,
@@ -105,13 +126,13 @@ void sna_read_boxes(struct sna *sna,
 	for (n = 0; n < nbox; n++) {
 		if (box[n].x1 + src_dx < 0 || box[n].y1 + src_dy < 0 ||
 		    (box[n].x2 + src_dx) * dst->drawable.bitsPerPixel/8 > src_bo->pitch ||
-		    (box[n].y2 + src_dy) * src_bo->pitch > src_bo->size)
+		    (box[n].y2 + src_dy) * src_bo->pitch > kgem_bo_size(src_bo))
 		{
 			FatalError("source out-of-bounds box[%d]=(%d, %d), (%d, %d) + (%d, %d), pitch=%d, size=%d\n", n,
 				   box[n].x1, box[n].y1,
 				   box[n].x2, box[n].y2,
 				   src_dx, src_dy,
-				   src_bo->pitch, src_bo->size);
+				   src_bo->pitch, kgem_bo_size(src_bo));
 		}
 	}
 #endif
@@ -132,7 +153,6 @@ fallback:
 		return;
 	}
 
-	/* Is it worth detiling? */
 	extents = box[0];
 	for (n = 1; n < nbox; n++) {
 		if (box[n].x1 < extents.x1)
@@ -145,11 +165,16 @@ fallback:
 		if (box[n].y2 > extents.y2)
 			extents.y2 = box[n].y2;
 	}
-	if ((extents.y2 - extents.y1) * src_bo->pitch < 4096)
-		goto fallback;
+	if (kgem_bo_is_mappable(kgem, src_bo)) {
+		/* Is it worth detiling? */
+		if ((extents.y2 - extents.y1) * src_bo->pitch < 4096)
+			goto fallback;
+	}
 
 	/* Try to avoid switching rings... */
-	if (src_bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) {
+	if (kgem->ring == KGEM_RENDER ||
+	    !kgem_bo_can_blt(kgem, src_bo) ||
+	    must_tile(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
 		PixmapRec tmp;
 
 		tmp.drawable.width  = extents.x2 - extents.x1;
@@ -161,38 +186,124 @@ fallback:
 		assert(tmp.drawable.width);
 		assert(tmp.drawable.height);
 
-		dst_bo = kgem_create_buffer_2d(kgem,
-					       tmp.drawable.width,
-					       tmp.drawable.height,
-					       tmp.drawable.bitsPerPixel,
-					       KGEM_BUFFER_LAST,
-					       &ptr);
-		if (!dst_bo)
-			goto fallback;
+		if (must_tile(sna, tmp.drawable.width, tmp.drawable.height)) {
+			BoxRec tile, stack[64], *clipped, *c;
+			int step;
+
+			if (n > ARRAY_SIZE(stack)) {
+				clipped = malloc(sizeof(BoxRec) * n);
+				if (clipped == NULL)
+					goto fallback;
+			} else
+				clipped = stack;
+
+			step = MIN(sna->render.max_3d_size,
+				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
+			DBG(("%s: tiling download, using %dx%d tiles\n",
+			     __FUNCTION__, step, step));
+
+			for (tile.y1 = extents.y1; tile.y1 < extents.y2; tile.y1 = tile.y2) {
+				tile.y2 = tile.y1 + step;
+				if (tile.y2 > extents.y2)
+					tile.y2 = extents.y2;
+
+				for (tile.x1 = extents.x1; tile.x1 < extents.x2; tile.x1 = tile.x2) {
+					tile.x2 = tile.x1 + step;
+					if (tile.x2 > extents.x2)
+						tile.x2 = extents.x2;
+
+					tmp.drawable.width  = tile.x2 - tile.x1;
+					tmp.drawable.height = tile.y2 - tile.y1;
+
+					c = clipped;
+					for (n = 0; n < nbox; n++) {
+						*c = box[n];
+						if (!box_intersect(c, &tile))
+							continue;
+
+						DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+						     __FUNCTION__,
+						     c->x1, c->y1,
+						     c->x2, c->y2,
+						     src_dx, src_dy,
+						     c->x1 - tile.x1,
+						     c->y1 - tile.y1));
+						c++;
+					}
+					if (c == clipped)
+						continue;
+
+					dst_bo = kgem_create_buffer_2d(kgem,
+								       tmp.drawable.width,
+								       tmp.drawable.height,
+								       tmp.drawable.bitsPerPixel,
+								       KGEM_BUFFER_LAST,
+								       &ptr);
+					if (!dst_bo)
+						goto fallback;
+
+					if (!sna->render.copy_boxes(sna, GXcopy,
+								    dst, src_bo, src_dx, src_dy,
+								    &tmp, dst_bo, -tile.x1, -tile.y1,
+								    clipped, c-clipped)) {
+						kgem_bo_destroy(&sna->kgem, dst_bo);
+						goto fallback;
+					}
+
+					kgem_bo_submit(&sna->kgem, dst_bo);
+					kgem_buffer_read_sync(kgem, dst_bo);
+
+					while (c-- != clipped) {
+						memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
+							   dst_bo->pitch, dst->devKind,
+							   c->x1 - tile.x1,
+							   c->y1 - tile.y1,
+							   c->x1 + dst_dx,
+							   c->y1 + dst_dy,
+							   c->x2 - c->x1,
+							   c->y2 - c->y1);
+					}
+
+					kgem_bo_destroy(&sna->kgem, dst_bo);
+				}
+			}
 
-		if (!sna->render.copy_boxes(sna, GXcopy,
-					    dst, src_bo, src_dx, src_dy,
-					    &tmp, dst_bo, -extents.x1, -extents.y1,
-					    box, nbox)) {
-			kgem_bo_destroy(&sna->kgem, dst_bo);
-			goto fallback;
-		}
+			if (clipped != stack)
+				free(clipped);
+		} else {
+			dst_bo = kgem_create_buffer_2d(kgem,
+						       tmp.drawable.width,
+						       tmp.drawable.height,
+						       tmp.drawable.bitsPerPixel,
+						       KGEM_BUFFER_LAST,
+						       &ptr);
+			if (!dst_bo)
+				goto fallback;
+
+			if (!sna->render.copy_boxes(sna, GXcopy,
+						    dst, src_bo, src_dx, src_dy,
+						    &tmp, dst_bo, -extents.x1, -extents.y1,
+						    box, nbox)) {
+				kgem_bo_destroy(&sna->kgem, dst_bo);
+				goto fallback;
+			}
 
-		kgem_bo_submit(&sna->kgem, dst_bo);
-		kgem_buffer_read_sync(kgem, dst_bo);
+			kgem_bo_submit(&sna->kgem, dst_bo);
+			kgem_buffer_read_sync(kgem, dst_bo);
+
+			for (n = 0; n < nbox; n++) {
+				memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
+					   dst_bo->pitch, dst->devKind,
+					   box[n].x1 - extents.x1,
+					   box[n].y1 - extents.y1,
+					   box[n].x1 + dst_dx,
+					   box[n].y1 + dst_dy,
+					   box[n].x2 - box[n].x1,
+					   box[n].y2 - box[n].y1);
+			}
 
-		for (n = 0; n < nbox; n++) {
-			memcpy_blt(ptr, dst->devPrivate.ptr, tmp.drawable.bitsPerPixel,
-				   dst_bo->pitch, dst->devKind,
-				   box[n].x1 - extents.x1,
-				   box[n].y1 - extents.y1,
-				   box[n].x1 + dst_dx,
-				   box[n].y1 + dst_dy,
-				   box[n].x2 - box[n].x1,
-				   box[n].y2 - box[n].y1);
+			kgem_bo_destroy(&sna->kgem, dst_bo);
 		}
-
-		kgem_bo_destroy(&sna->kgem, dst_bo);
 		return;
 	}
 
@@ -270,7 +381,7 @@ fallback:
 			assert(tmp_box[n].x1 + src_dx >= 0);
 			assert((tmp_box[n].x2 + src_dx) * dst->drawable.bitsPerPixel/8 <= src_bo->pitch);
 			assert(tmp_box[n].y1 + src_dy >= 0);
-			assert((tmp_box[n].y2 + src_dy) * src_bo->pitch <= src_bo->size);
+			assert((tmp_box[n].y2 + src_dy) * src_bo->pitch <= kgem_bo_size(src_bo));
 
 			b[0] = cmd;
 			b[1] = br13 | pitch;
@@ -299,7 +410,7 @@ fallback:
 		_kgem_set_mode(kgem, KGEM_BLT);
 		tmp_box += nbox_this_time;
 	} while (1);
-	assert(offset == dst_bo->size);
+	assert(offset == kgem_buffer_size(dst_bo));
 
 	kgem_buffer_read_sync(kgem, dst_bo);
 
@@ -331,12 +442,12 @@ fallback:
 
 		src += pitch * height;
 	} while (--nbox);
-	assert(src - (char *)ptr == dst_bo->size);
+	assert(src - (char *)ptr == kgem_buffer_size(dst_bo));
 	kgem_bo_destroy(kgem, dst_bo);
 	sna->blt_state.fill_bo = 0;
 }
 
-static void write_boxes_inplace(struct kgem *kgem,
+static bool write_boxes_inplace(struct kgem *kgem,
 				const void *src, int stride, int bpp, int16_t src_dx, int16_t src_dy,
 				struct kgem_bo *bo, int16_t dst_dx, int16_t dst_dy,
 				const BoxRec *box, int n)
@@ -346,11 +457,14 @@ static void write_boxes_inplace(struct kgem *kgem,
 	DBG(("%s x %d, handle=%d, tiling=%d\n",
 	     __FUNCTION__, n, bo->handle, bo->tiling));
 
+	if (!kgem_bo_is_mappable(kgem, bo))
+		return false;
+
 	kgem_bo_submit(kgem, bo);
 
 	dst = kgem_bo_map(kgem, bo);
 	if (dst == NULL)
-		return;
+		return false;
 
 	assert(dst != src);
 
@@ -364,7 +478,7 @@ static void write_boxes_inplace(struct kgem *kgem,
 		assert(box->x1 + dst_dx >= 0);
 		assert((box->x2 + dst_dx)*bpp <= 8*bo->pitch);
 		assert(box->y1 + dst_dy >= 0);
-		assert((box->y2 + dst_dy)*bo->pitch <= bo->size);
+		assert((box->y2 + dst_dy)*bo->pitch <= kgem_bo_size(bo));
 
 		assert(box->x1 + src_dx >= 0);
 		assert((box->x2 + src_dx)*bpp <= 8*stride);
@@ -377,6 +491,7 @@ static void write_boxes_inplace(struct kgem *kgem,
 			   box->x2 - box->x1, box->y2 - box->y1);
 		box++;
 	} while (--n);
+	return true;
 }
 
 static bool upload_inplace(struct kgem *kgem,
@@ -384,9 +499,6 @@ static bool upload_inplace(struct kgem *kgem,
 			   const BoxRec *box,
 			   int n, int bpp)
 {
-	if (DEBUG_NO_IO)
-		return kgem_bo_is_mappable(kgem, bo);
-
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
 	 * operation.
@@ -404,13 +516,14 @@ static bool upload_inplace(struct kgem *kgem,
 	return !kgem_bo_map_will_stall(kgem, bo);
 }
 
-void sna_write_boxes(struct sna *sna, PixmapPtr dst,
+bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
 		     struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
 		     const void *src, int stride, int16_t src_dx, int16_t src_dy,
 		     const BoxRec *box, int nbox)
 {
 	struct kgem *kgem = &sna->kgem;
 	struct kgem_bo *src_bo;
+	BoxRec extents;
 	void *ptr;
 	int offset;
 	int n, cmd, br13;
@@ -419,30 +532,30 @@ void sna_write_boxes(struct sna *sna, PixmapPtr dst,
 
 	if (upload_inplace(kgem, dst_bo, box, nbox, dst->drawable.bitsPerPixel)) {
 fallback:
-		write_boxes_inplace(kgem,
-				    src, stride, dst->drawable.bitsPerPixel, src_dx, src_dy,
-				    dst_bo, dst_dx, dst_dy,
-				    box, nbox);
-		return;
+		return write_boxes_inplace(kgem,
+					   src, stride, dst->drawable.bitsPerPixel, src_dx, src_dy,
+					   dst_bo, dst_dx, dst_dy,
+					   box, nbox);
 	}
 
-	/* Try to avoid switching rings... */
-	if (dst_bo->tiling == I915_TILING_Y || kgem->ring == KGEM_RENDER) {
-		PixmapRec tmp;
-		BoxRec extents;
+	extents = box[0];
+	for (n = 1; n < nbox; n++) {
+		if (box[n].x1 < extents.x1)
+			extents.x1 = box[n].x1;
+		if (box[n].x2 > extents.x2)
+			extents.x2 = box[n].x2;
 
-		extents = box[0];
-		for (n = 1; n < nbox; n++) {
-			if (box[n].x1 < extents.x1)
-				extents.x1 = box[n].x1;
-			if (box[n].x2 > extents.x2)
-				extents.x2 = box[n].x2;
+		if (box[n].y1 < extents.y1)
+			extents.y1 = box[n].y1;
+		if (box[n].y2 > extents.y2)
+			extents.y2 = box[n].y2;
+	}
 
-			if (box[n].y1 < extents.y1)
-				extents.y1 = box[n].y1;
-			if (box[n].y2 > extents.y2)
-				extents.y2 = box[n].y2;
-		}
+	/* Try to avoid switching rings... */
+	if (kgem->ring == KGEM_RENDER ||
+	    !kgem_bo_can_blt(kgem, dst_bo) ||
+	    must_tile(sna, extents.x2 - extents.x1, extents.y2 - extents.y1)) {
+		PixmapRec tmp;
 
 		tmp.drawable.width  = extents.x2 - extents.x1;
 		tmp.drawable.height = extents.y2 - extents.y1;
@@ -453,37 +566,130 @@ fallback:
 		assert(tmp.drawable.width);
 		assert(tmp.drawable.height);
 
-		src_bo = kgem_create_buffer_2d(kgem,
-					       tmp.drawable.width,
-					       tmp.drawable.height,
-					       tmp.drawable.bitsPerPixel,
-					       KGEM_BUFFER_WRITE_INPLACE,
-					       &ptr);
-		if (!src_bo)
-			goto fallback;
+		DBG(("%s: upload (%d, %d)x(%d, %d), max %dx%d\n",
+		     __FUNCTION__,
+		     extents.x1, extents.y1,
+		     tmp.drawable.width, tmp.drawable.height,
+		     sna->render.max_3d_size, sna->render.max_3d_size));
+		if (must_tile(sna, tmp.drawable.width, tmp.drawable.height)) {
+			BoxRec tile, stack[64], *clipped, *c;
+			int step;
+
+			step = MIN(sna->render.max_3d_size,
+				   8*(MAXSHORT&~63) / dst->drawable.bitsPerPixel);
+			DBG(("%s: tiling upload, using %dx%d tiles\n",
+			     __FUNCTION__, step, step));
+
+			if (n > ARRAY_SIZE(stack)) {
+				clipped = malloc(sizeof(BoxRec) * n);
+				if (clipped == NULL)
+					goto fallback;
+			} else
+				clipped = stack;
+
+			for (tile.y1 = extents.y1; tile.y1 < extents.y2; tile.y1 = tile.y2) {
+				tile.y2 = tile.y1 + step;
+				if (tile.y2 > extents.y2)
+					tile.y2 = extents.y2;
+
+				for (tile.x1 = extents.x1; tile.x1 < extents.x2; tile.x1 = tile.x2) {
+					tile.x2 = tile.x1 + step;
+					if (tile.x2 > extents.x2)
+						tile.x2 = extents.x2;
+
+					tmp.drawable.width  = tile.x2 - tile.x1;
+					tmp.drawable.height = tile.y2 - tile.y1;
+
+					src_bo = kgem_create_buffer_2d(kgem,
+								       tmp.drawable.width,
+								       tmp.drawable.height,
+								       tmp.drawable.bitsPerPixel,
+								       KGEM_BUFFER_WRITE_INPLACE,
+								       &ptr);
+					if (!src_bo)
+						goto fallback;
+
+					c = clipped;
+					for (n = 0; n < nbox; n++) {
+						*c = box[n];
+						if (!box_intersect(c, &tile))
+							continue;
+
+						DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+						     __FUNCTION__,
+						     c->x1, c->y1,
+						     c->x2, c->y2,
+						     src_dx, src_dy,
+						     c->x1 - tile.x1,
+						     c->y1 - tile.y1));
+						memcpy_blt(src, ptr, tmp.drawable.bitsPerPixel,
+							   stride, src_bo->pitch,
+							   c->x1 + src_dx,
+							   c->y1 + src_dy,
+							   c->x1 - tile.x1,
+							   c->y1 - tile.y1,
+							   c->x2 - c->x1,
+							   c->y2 - c->y1);
+						c++;
+					}
+
+					if (c != clipped)
+						n = sna->render.copy_boxes(sna, GXcopy,
+									   &tmp, src_bo, -tile.x1, -tile.y1,
+									   dst, dst_bo, dst_dx, dst_dy,
+									   clipped, c - clipped);
+					else
+						n = 1;
+
+					kgem_bo_destroy(&sna->kgem, src_bo);
+
+					if (!n)
+						goto fallback;
+				}
+			}
 
-		for (n = 0; n < nbox; n++) {
-			memcpy_blt(src, ptr, tmp.drawable.bitsPerPixel,
-				   stride, src_bo->pitch,
-				   box[n].x1 + src_dx,
-				   box[n].y1 + src_dy,
-				   box[n].x1 - extents.x1,
-				   box[n].y1 - extents.y1,
-				   box[n].x2 - box[n].x1,
-				   box[n].y2 - box[n].y1);
-		}
+			if (clipped != stack)
+				free(clipped);
+		} else {
+			src_bo = kgem_create_buffer_2d(kgem,
+						       tmp.drawable.width,
+						       tmp.drawable.height,
+						       tmp.drawable.bitsPerPixel,
+						       KGEM_BUFFER_WRITE_INPLACE,
+						       &ptr);
+			if (!src_bo)
+				goto fallback;
+
+			for (n = 0; n < nbox; n++) {
+				DBG(("%s: box(%d, %d), (%d, %d), src=(%d, %d), dst=(%d, %d)\n",
+				     __FUNCTION__,
+				     box[n].x1, box[n].y1,
+				     box[n].x2, box[n].y2,
+				     src_dx, src_dy,
+				     box[n].x1 - extents.x1,
+				     box[n].y1 - extents.y1));
+				memcpy_blt(src, ptr, tmp.drawable.bitsPerPixel,
+					   stride, src_bo->pitch,
+					   box[n].x1 + src_dx,
+					   box[n].y1 + src_dy,
+					   box[n].x1 - extents.x1,
+					   box[n].y1 - extents.y1,
+					   box[n].x2 - box[n].x1,
+					   box[n].y2 - box[n].y1);
+			}
 
-		n = sna->render.copy_boxes(sna, GXcopy,
-					   &tmp, src_bo, -extents.x1, -extents.y1,
-					   dst, dst_bo, dst_dx, dst_dy,
-					   box, nbox);
+			n = sna->render.copy_boxes(sna, GXcopy,
+						   &tmp, src_bo, -extents.x1, -extents.y1,
+						   dst, dst_bo, dst_dx, dst_dy,
+						   box, nbox);
 
-		kgem_bo_destroy(&sna->kgem, src_bo);
+			kgem_bo_destroy(&sna->kgem, src_bo);
 
-		if (!n)
-			goto fallback;
+			if (!n)
+				goto fallback;
+		}
 
-		return;
+		return true;
 	}
 
 	cmd = XY_SRC_COPY_BLT_CMD;
@@ -586,7 +792,7 @@ fallback:
 			box++;
 			offset += pitch * height;
 		} while (--nbox_this_time);
-		assert(offset == src_bo->size);
+		assert(offset == kgem_buffer_size(src_bo));
 
 		if (nbox) {
 			_kgem_submit(kgem);
@@ -597,6 +803,7 @@ fallback:
 	} while (nbox);
 
 	sna->blt_state.fill_bo = 0;
+	return true;
 }
 
 static void
@@ -823,7 +1030,7 @@ fallback:
 			box++;
 			offset += pitch * height;
 		} while (--nbox_this_time);
-		assert(offset == src_bo->size);
+		assert(offset == kgem_buffer_size(src_bo));
 
 		if (nbox) {
 			_kgem_submit(kgem);
@@ -951,11 +1158,12 @@ indirect_replace(struct sna *sna,
 	return ret;
 }
 
-struct kgem_bo *sna_replace(struct sna *sna,
-			    PixmapPtr pixmap,
-			    struct kgem_bo *bo,
-			    const void *src, int stride)
+bool sna_replace(struct sna *sna,
+		 PixmapPtr pixmap,
+		 struct kgem_bo **_bo,
+		 const void *src, int stride)
 {
+	struct kgem_bo *bo = *_bo;
 	struct kgem *kgem = &sna->kgem;
 	void *dst;
 
@@ -968,7 +1176,7 @@ struct kgem_bo *sna_replace(struct sna *sna,
 
 	if ((!kgem_bo_mapped(bo) || bo->rq) &&
 	    indirect_replace(sna, pixmap, bo, src, stride))
-		return bo;
+		return true;
 
 	if (kgem_bo_is_busy(bo)) {
 		struct kgem_bo *new_bo;
@@ -979,26 +1187,26 @@ struct kgem_bo *sna_replace(struct sna *sna,
 					pixmap->drawable.bitsPerPixel,
 					bo->tiling,
 					CREATE_GTT_MAP | CREATE_INACTIVE);
-		if (new_bo) {
-			kgem_bo_destroy(kgem, bo);
+		if (new_bo)
 			bo = new_bo;
-		}
 	}
 
 	if (bo->tiling == I915_TILING_NONE && bo->pitch == stride) {
-		kgem_bo_write(kgem, bo, src,
-			      (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8);
+		if (!kgem_bo_write(kgem, bo, src,
+				   (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8))
+			goto err;
 	} else {
 		if (kgem_bo_is_mappable(kgem, bo)) {
 			dst = kgem_bo_map(kgem, bo);
-			if (dst) {
-				memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
-					   stride, bo->pitch,
-					   0, 0,
-					   0, 0,
-					   pixmap->drawable.width,
-					   pixmap->drawable.height);
-			}
+			if (!dst)
+				goto err;
+
+			memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
+				   stride, bo->pitch,
+				   0, 0,
+				   0, 0,
+				   pixmap->drawable.width,
+				   pixmap->drawable.height);
 		} else {
 			BoxRec box;
 
@@ -1006,14 +1214,23 @@ struct kgem_bo *sna_replace(struct sna *sna,
 			box.x2 = pixmap->drawable.width;
 			box.y2 = pixmap->drawable.height;
 
-			sna_write_boxes(sna, pixmap,
-					bo, 0, 0,
-					src, stride, 0, 0,
-					&box, 1);
+			if (!sna_write_boxes(sna, pixmap,
+					     bo, 0, 0,
+					     src, stride, 0, 0,
+					     &box, 1))
+				goto err;
 		}
 	}
 
-	return bo;
+	if (bo != *_bo)
+		kgem_bo_destroy(kgem, *_bo);
+	*_bo = bo;
+	return true;
+
+err:
+	if (bo != *_bo)
+		kgem_bo_destroy(kgem, bo);
+	return false;
 }
 
 struct kgem_bo *sna_replace__xor(struct sna *sna,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index f9151e0..7077f36 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -696,6 +696,11 @@ static int sna_render_picture_downsample(struct sna *sna,
 	DBG(("%s: creating temporary GPU bo %dx%d\n",
 	     __FUNCTION__, width, height));
 
+	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, ow, oh,
+						dst_x, dst_y);
+
 	tmp = screen->CreatePixmap(screen,
 				   width, height,
 				   pixmap->drawable.depth,
@@ -1306,9 +1311,6 @@ do_fixup:
 		return 0;
 	}
 
-	/* XXX Convolution filter? */
-	memset(ptr, 0, channel->bo->size);
-
 	/* Composite in the original format to preserve idiosyncracies */
 	if (picture->format == channel->pict_format)
 		dst = pixman_image_create_bits(picture->format,
@@ -1354,7 +1356,7 @@ do_fixup:
 					       w, h);
 			pixman_image_unref(src);
 		} else {
-			memset(ptr, 0, channel->bo->size);
+			memset(ptr, 0, kgem_buffer_size(channel->bo));
 			dst = src;
 		}
 	}
@@ -1528,7 +1530,7 @@ sna_render_composite_redirect(struct sna *sna,
 	if (op->dst.pixmap->drawable.width <= sna->render.max_3d_size) {
 		int y1, y2;
 
-		assert(op->dst.pixmap.drawable.height > sna->render.max_3d_size);
+		assert(op->dst.pixmap->drawable.height > sna->render.max_3d_size);
 		y1 =  y + op->dst.y;
 		y2 =  y1 + height;
 		y1 &= y1 & (64 - 1);
diff --git a/src/sna/sna_video.c b/src/sna/sna_video.c
index 7b759a7..cec0473 100644
--- a/src/sna/sna_video.c
+++ b/src/sna/sna_video.c
@@ -100,7 +100,7 @@ sna_video_buffer(struct sna *sna,
 		 struct sna_video_frame *frame)
 {
 	/* Free the current buffer if we're going to have to reallocate */
-	if (video->buf && video->buf->size < frame->size)
+	if (video->buf && kgem_bo_size(video->buf) < frame->size)
 		sna_video_free_buffers(sna, video);
 
 	if (video->buf == NULL)
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index d99f884..1aaf972 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -271,7 +271,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 			return BadAlloc;
 		}
 
-		assert(frame.bo->size >= frame.size);
+		assert(kgem_bo_size(frame.bo) >= frame.size);
 	} else {
 		frame.bo = kgem_create_linear(&sna->kgem, frame.size);
 		if (frame.bo == NULL) {
commit 03211f4b0b7e32b6d7dc28e60be72db204b8c8d4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 23:18:05 2012 +0000

    sna: Guard against the upload buffer growing past the maximum bo size
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a5c47d6..86a4372 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3010,7 +3010,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
 	/* we should never be asked to create anything TOO large */
-	assert(size < kgem->max_cpu_buffer);
+	assert(size < kgem->max_cpu_size);
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
 		if (flags == KGEM_BUFFER_LAST && bo->write) {
@@ -3053,6 +3053,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 #if !DBG_NO_MAP_UPLOAD
 	/* Be a little more generous and hope to hold fewer mmappings */
 	alloc = ALIGN(2*size, kgem->partial_buffer_size);
+	if (alloc >= kgem->max_cpu_size)
+		alloc = PAGE_ALIGN(size);
 	if (kgem->has_cpu_bo) {
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
commit 2afd49a28429cdeb36583cfc31cc9b1742c1fb83
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 22:18:30 2012 +0000

    sna: Limit inplace upload buffers to maximum mappable size
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45323
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 95b67cf..a5c47d6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3001,6 +3001,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 {
 	struct kgem_partial_bo *bo;
 	unsigned offset, alloc;
+	struct kgem_bo *old;
 
 	DBG(("%s: size=%d, flags=%x [write?=%d, inplace?=%d, last?=%d]\n",
 	     __FUNCTION__, size, flags,
@@ -3008,6 +3009,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 	     !!(flags & KGEM_BUFFER_INPLACE),
 	     !!(flags & KGEM_BUFFER_LAST)));
 	assert(size);
+	/* we should never be asked to create anything TOO large */
+	assert(size < kgem->max_cpu_buffer);
 
 	list_for_each_entry(bo, &kgem->partial, base.list) {
 		if (flags == KGEM_BUFFER_LAST && bo->write) {
@@ -3047,14 +3050,10 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		break;
 	}
 
-	/* Be a little more generous and hope to hold fewer mmappings */
-	bo = NULL;
-
 #if !DBG_NO_MAP_UPLOAD
-	alloc = ALIGN(size, kgem->partial_buffer_size);
+	/* Be a little more generous and hope to hold fewer mmappings */
+	alloc = ALIGN(2*size, kgem->partial_buffer_size);
 	if (kgem->has_cpu_bo) {
-		struct kgem_bo *old;
-
 		bo = malloc(sizeof(*bo));
 		if (bo == NULL)
 			return NULL;
@@ -3098,14 +3097,18 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			bo->mmapped = true;
 
 			alloc = bo->base.size;
+			goto init;
 		} else {
 			bo->base.refcnt = 0; /* for valgrind */
 			kgem_bo_free(kgem, &bo->base);
 			bo = NULL;
 		}
-	} else if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) {
-		struct kgem_bo *old;
+	}
+
+	if (alloc > kgem->aperture_mappable / 4)
+		flags &= ~KGEM_BUFFER_INPLACE;
 
+	if ((flags & KGEM_BUFFER_WRITE_INPLACE) == KGEM_BUFFER_WRITE_INPLACE) {
 		/* The issue with using a GTT upload buffer is that we may
 		 * cause eviction-stalls in order to free up some GTT space.
 		 * An is-mappable? ioctl could help us detect when we are
@@ -3160,6 +3163,7 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 				bo->base.refcnt = 1;
 
 				alloc = bo->base.size;
+				goto init;
 			} else {
 				kgem_bo_free(kgem, &bo->base);
 				bo = NULL;
@@ -3169,88 +3173,84 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 #else
 	alloc = ALIGN(size, 64*1024);
 #endif
+	/* Be more parsimonious with pwrite/pread buffers */
+	if ((flags & KGEM_BUFFER_INPLACE) == 0)
+		alloc = PAGE_ALIGN(size);
+	flags &= ~KGEM_BUFFER_INPLACE;
+
+	old = NULL;
+	if ((flags & KGEM_BUFFER_WRITE) == 0)
+		old = search_linear_cache(kgem, alloc, 0);
+	if (old == NULL)
+		old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
+	if (old) {
+		DBG(("%s: reusing ordinary handle %d for io\n",
+		     __FUNCTION__, old->handle));
+		alloc = old->size;
+		bo = partial_bo_alloc(alloc);
+		if (bo == NULL)
+			return NULL;
 
-	if (bo == NULL) {
-		struct kgem_bo *old;
-
-		/* Be more parsimonious with pwrite/pread buffers */
-		if ((flags & KGEM_BUFFER_INPLACE) == 0)
-			alloc = PAGE_ALIGN(size);
-		flags &= ~KGEM_BUFFER_INPLACE;
+		memcpy(&bo->base, old, sizeof(*old));
+		if (old->rq)
+			list_replace(&old->request,
+				     &bo->base.request);
+		else
+			list_init(&bo->base.request);
+		list_replace(&old->vma, &bo->base.vma);
+		list_init(&bo->base.list);
+		free(old);
+		bo->base.refcnt = 1;
+
+		bo->need_io = flags & KGEM_BUFFER_WRITE;
+		bo->base.io = true;
+	} else {
+		bo = malloc(sizeof(*bo));
+		if (bo == NULL)
+			return NULL;
 
-		old = NULL;
-		if ((flags & KGEM_BUFFER_WRITE) == 0)
-			old = search_linear_cache(kgem, alloc, 0);
-		if (old == NULL)
-			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
+		old = search_linear_cache(kgem, alloc,
+					  CREATE_INACTIVE | CREATE_CPU_MAP);
 		if (old) {
-			DBG(("%s: reusing ordinary handle %d for io\n",
+			DBG(("%s: reusing cpu map handle=%d for buffer\n",
 			     __FUNCTION__, old->handle));
-			alloc = old->size;
-			bo = partial_bo_alloc(alloc);
-			if (bo == NULL)
-				return NULL;
 
 			memcpy(&bo->base, old, sizeof(*old));
 			if (old->rq)
-				list_replace(&old->request,
-					     &bo->base.request);
+				list_replace(&old->request, &bo->base.request);
 			else
 				list_init(&bo->base.request);
 			list_replace(&old->vma, &bo->base.vma);
 			list_init(&bo->base.list);
 			free(old);
 			bo->base.refcnt = 1;
-
-			bo->need_io = flags & KGEM_BUFFER_WRITE;
-			bo->base.io = true;
 		} else {
-			bo = malloc(sizeof(*bo));
-			if (bo == NULL)
-				return NULL;
-
-			old = search_linear_cache(kgem, alloc,
-						  CREATE_INACTIVE | CREATE_CPU_MAP);
-			if (old) {
-				DBG(("%s: reusing cpu map handle=%d for buffer\n",
-				     __FUNCTION__, old->handle));
-
-				memcpy(&bo->base, old, sizeof(*old));
-				if (old->rq)
-					list_replace(&old->request, &bo->base.request);
-				else
-					list_init(&bo->base.request);
-				list_replace(&old->vma, &bo->base.vma);
-				list_init(&bo->base.list);
-				free(old);
-				bo->base.refcnt = 1;
-			} else {
-				if (!__kgem_bo_init(&bo->base,
-						    gem_create(kgem->fd, alloc),
-						    alloc)) {
-					free(bo);
-					return NULL;
-				}
-				DBG(("%s: created handle=%d for buffer\n",
-				     __FUNCTION__, bo->base.handle));
-
-				bo->base.domain = DOMAIN_CPU;
-			}
-
-			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
-			if (bo->mem == NULL) {
-				kgem_bo_free(kgem, &bo->base);
+			if (!__kgem_bo_init(&bo->base,
+					    gem_create(kgem->fd, alloc),
+					    alloc)) {
+				free(bo);
 				return NULL;
 			}
+			DBG(("%s: created handle=%d for buffer\n",
+			     __FUNCTION__, bo->base.handle));
 
-			if (flags & KGEM_BUFFER_WRITE)
-				kgem_bo_sync__cpu(kgem, &bo->base);
+			bo->base.domain = DOMAIN_CPU;
+		}
 
-			bo->need_io = false;
-			bo->base.io = true;
-			bo->mmapped = true;
+		bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+		if (bo->mem == NULL) {
+			kgem_bo_free(kgem, &bo->base);
+			return NULL;
 		}
+
+		if (flags & KGEM_BUFFER_WRITE)
+			kgem_bo_sync__cpu(kgem, &bo->base);
+
+		bo->need_io = false;
+		bo->base.io = true;
+		bo->mmapped = true;
 	}
+init:
 	bo->base.reusable = false;
 	assert(bo->base.size == alloc);
 	assert(!bo->need_io || !bo->base.needs_flush);
commit 8f4bae01e39392faa8978090db4cbe28fa00b013
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 20:28:44 2012 +0000

    sna/video: Ensure the video pixmap is on the GPU
    
    The presumption that the pixmap is the scanout and so will always be
    pinned is false if there is a shadow or under a compositor. In those
    cases, the pixmap may be idle and so the GPU bo reaped. This was
    compounded by that the video path did not mark the pixmap as busy. So
    whilst watching a video under xfce4 with compositing enabled (has to be
    a non-GL compositor) the video would suddenly stall.
    
    Reported-by: Paul Neumann <paul104x at yahoo.de>
    Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=45279
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index bc14967..5910daf 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -476,13 +476,6 @@ sna_drawable_move_to_gpu(DrawablePtr drawable, unsigned flags)
 	return sna_pixmap_move_to_gpu(get_drawable_pixmap(drawable), flags) != NULL;
 }
 
-static inline Bool
-sna_pixmap_is_gpu(PixmapPtr pixmap)
-{
-	struct sna_pixmap *priv = pixmap ? sna_pixmap(pixmap) : NULL;
-	return priv && priv->gpu_bo;
-}
-
 static inline struct kgem_bo *sna_pixmap_get_bo(PixmapPtr pixmap)
 {
 	return sna_pixmap(pixmap)->gpu_bo;
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 6f69135..d99f884 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -240,7 +240,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		return BadAlloc;
 	}
 
-	if (!sna_pixmap_is_gpu(pixmap)) {
+	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: attempting to render to a non-GPU pixmap\n",
 		     __FUNCTION__));
 		return BadAlloc;
commit d02bd80b2f9f8ee3840be5d8d6b8d389192c57f5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 20:12:49 2012 +0000

    sna: Use a proxy rather than a temporary bo for too-tall but thin targets
    
    If the render target is thin enough to fit within the 3D pipeline, but is
    too tall, we can fudge the address of the origin and coordinates to fit
    within the constaints of the pipeline.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index d15ec60..ab825aa 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -187,6 +187,11 @@ static struct sna_damage *_sna_damage_create(void)
 	return damage;
 }
 
+struct sna_damage *sna_damage_create(void)
+{
+	return _sna_damage_create();
+}
+
 static bool _sna_damage_create_boxes(struct sna_damage *damage,
 				     int count)
 {
@@ -1395,6 +1400,21 @@ int _sna_damage_get_boxes(struct sna_damage *damage, BoxPtr *boxes)
 }
 #endif
 
+struct sna_damage *_sna_damage_combine(struct sna_damage *l,
+				       struct sna_damage *r,
+				       int dx, int dy)
+{
+	if (r->dirty)
+		__sna_damage_reduce(r);
+
+	if (pixman_region_not_empty(&r->region)) {
+		pixman_region_translate(&r->region, dx, dy);
+		l = __sna_damage_add(l, &r->region);
+	}
+
+	return l;
+}
+
 void __sna_damage_destroy(struct sna_damage *damage)
 {
 	free_list(&damage->embedded_box.list);
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index 228aba0..422a124 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -27,6 +27,18 @@ struct sna_damage {
 #define DAMAGE_MARK_ALL(ptr) ((struct sna_damage *)(((uintptr_t)(ptr))|1))
 #define DAMAGE_PTR(ptr) ((struct sna_damage *)(((uintptr_t)(ptr))&~1))
 
+struct sna_damage *sna_damage_create(void);
+
+struct sna_damage *_sna_damage_combine(struct sna_damage *l,
+				       struct sna_damage *r,
+				       int dx, int dy);
+static inline void sna_damage_combine(struct sna_damage **l,
+				      struct sna_damage *r,
+				      int dx, int dy)
+{
+	*l = _sna_damage_combine(*l, r, dx, dy);
+}
+
 fastcall struct sna_damage *_sna_damage_add(struct sna_damage *damage,
 					    RegionPtr region);
 static inline void sna_damage_add(struct sna_damage **damage,
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 513b489..f9151e0 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -1519,14 +1519,39 @@ sna_render_composite_redirect(struct sna *sna,
 	if (!width || !height)
 		return FALSE;
 
-	priv = sna_pixmap(op->dst.pixmap);
-	if (priv->gpu_bo == NULL) {
+	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
+	if (priv == NULL) {
 		DBG(("%s: fallback -- no GPU bo attached\n", __FUNCTION__));
 		return FALSE;
 	}
 
-	if (!sna_pixmap_move_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE))
-		return FALSE;
+	if (op->dst.pixmap->drawable.width <= sna->render.max_3d_size) {
+		int y1, y2;
+
+		assert(op->dst.pixmap.drawable.height > sna->render.max_3d_size);
+		y1 =  y + op->dst.y;
+		y2 =  y1 + height;
+		y1 &= y1 & (64 - 1);
+		y2 = ALIGN(y2, 64);
+
+		if (y2 - y1 <= sna->render.max_3d_size) {
+			t->box.x2 = t->box.x1 = op->dst.x;
+			t->box.y2 = t->box.y1 = op->dst.y;
+			t->real_bo = priv->gpu_bo;
+			t->real_damage = op->damage;
+			if (op->damage) {
+				t->damage = sna_damage_create();
+				op->damage = &t->damage;
+			}
+
+			op->dst.bo = kgem_create_proxy(priv->gpu_bo,
+						       y1 * priv->gpu_bo->pitch,
+						       (y2 - y1) * priv->gpu_bo->pitch);
+			op->dst.y += -y1;
+			op->dst.height = y2 - y1;
+			return TRUE;
+		}
+	}
 
 	/* We can process the operation in a single pass,
 	 * but the target is too large for the 3D pipeline.
@@ -1557,12 +1582,17 @@ sna_render_composite_redirect(struct sna *sna,
 	}
 
 	t->real_bo = priv->gpu_bo;
+	t->real_damage = op->damage;
+	if (op->damage) {
+		t->damage = sna_damage_create();
+		op->damage = &t->damage;
+	}
+
 	op->dst.bo = bo;
 	op->dst.x = -x;
 	op->dst.y = -y;
 	op->dst.width  = width;
 	op->dst.height = height;
-	op->damage = NULL;
 	return TRUE;
 }
 
@@ -1573,13 +1603,20 @@ sna_render_composite_redirect_done(struct sna *sna,
 	const struct sna_composite_redirect *t = &op->redirect;
 
 	if (t->real_bo) {
-		DBG(("%s: copying temporary to dst\n", __FUNCTION__));
-
-		sna_blt_copy_boxes(sna, GXcopy,
-				   op->dst.bo, -t->box.x1, -t->box.y1,
-				   t->real_bo, 0, 0,
-				   op->dst.pixmap->drawable.bitsPerPixel,
-				   &t->box, 1);
+		if (t->box.x2 > t->box.x1) {
+			DBG(("%s: copying temporary to dst\n", __FUNCTION__));
+			sna_blt_copy_boxes(sna, GXcopy,
+					   op->dst.bo, -t->box.x1, -t->box.y1,
+					   t->real_bo, 0, 0,
+					   op->dst.pixmap->drawable.bitsPerPixel,
+					   &t->box, 1);
+		}
+		if (t->damage) {
+			sna_damage_combine(t->real_damage, t->damage,
+					   t->box.x1 - op->dst.x,
+					   t->box.y1 - op->dst.y);
+			__sna_damage_destroy(t->damage);
+		}
 
 		kgem_bo_destroy(&sna->kgem, op->dst.bo);
 	}
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index c4711f4..b23a8a7 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -86,6 +86,7 @@ struct sna_composite_op {
 
 	struct sna_composite_redirect {
 		struct kgem_bo *real_bo;
+		struct sna_damage **real_damage, *damage;
 		BoxRec box;
 	} redirect;
 
commit ea433995a37f0a9d1579f74029418f22a63a2bc0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 19:34:39 2012 +0000

    sna: Experiment with a partial source
    
    If the source is thin enough such that the pitch is within the sampler's
    constraints and the sample size is small enough, just fudge the origin
    of the bo such that it can be sampled.
    
    This avoids having to create a temporary bo and use the BLT to extract
    it and helps, for example, firefox-asteroids which uses an 64x11200
    texture atlas.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4df29d2..95b67cf 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2967,7 +2967,6 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 	DBG(("%s: target handle=%d, offset=%d, length=%d, io=%d\n",
 	     __FUNCTION__, target->handle, offset, length, target->io));
 	assert(target->proxy == NULL);
-	assert(target->tiling == I915_TILING_NONE);
 
 	bo = __kgem_bo_alloc(target->handle, length);
 	if (bo == NULL)
@@ -2975,9 +2974,11 @@ struct kgem_bo *kgem_create_proxy(struct kgem_bo *target,
 
 	bo->io = target->io;
 	bo->dirty = target->dirty;
-	bo->reusable = false;
+	bo->tiling = target->tiling;
+	bo->pitch = target->pitch;
 	bo->proxy = kgem_bo_reference(target);
 	bo->delta = offset;
+	bo->reusable = false;
 	return bo;
 }
 
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 9d7857c..513b489 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -800,6 +800,103 @@ cleanup_tmp:
 	return ret;
 }
 
+static int
+sna_render_picture_partial(struct sna *sna,
+			   PicturePtr picture,
+			   struct sna_composite_channel *channel,
+			   int16_t x, int16_t y,
+			   int16_t w, int16_t h,
+			   int16_t dst_x, int16_t dst_y)
+{
+	struct kgem_bo *bo = NULL;
+	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
+	BoxRec box;
+
+	DBG(("%s (%d, %d)x(%d, %d) [dst=(%d, %d)]\n",
+	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
+
+	box.x1 = x;
+	box.y1 = y;
+	box.x2 = x + w;
+	box.y2 = y + h;
+	if (channel->transform)
+		pixman_transform_bounds(channel->transform, &box);
+
+	DBG(("%s sample=(%d, %d), (%d, %d): (%d, %d)/(%d, %d), repeat=%d\n", __FUNCTION__,
+	     box.x1, box.y1, box.x2, box.y2, w, h,
+	     pixmap->drawable.width, pixmap->drawable.height,
+	     channel->repeat));
+
+	if (channel->repeat == RepeatNone || channel->repeat == RepeatPad) {
+		if (box.x1 < 0)
+			box.x1 = 0;
+		if (box.y1 < 0)
+			box.y1 = 0;
+		if (box.x2 > pixmap->drawable.width)
+			box.x2 = pixmap->drawable.width;
+		if (box.y2 > pixmap->drawable.height)
+			box.y2 = pixmap->drawable.height;
+	} else {
+		if (box.x1 < 0 ||
+		    box.y1 < 0 ||
+		    box.x2 > pixmap->drawable.width ||
+		    box.y2 > pixmap->drawable.height) {
+			box.x1 = box.y1 = 0;
+			box.x2 = pixmap->drawable.width;
+			box.y2 = pixmap->drawable.height;
+
+			if (!channel->is_affine)
+				return 0;
+		}
+	}
+
+	/* Presume worst case tile-row alignment for Y-tiling */
+	box.y1 = box.y1 & (64 - 1);
+	box.y2 = ALIGN(box.y2, 64);
+	w = box.x2 - box.x1;
+	h = box.y2 - box.y1;
+	DBG(("%s box=(%d, %d), (%d, %d): (%d, %d)/(%d, %d)\n", __FUNCTION__,
+	     box.x1, box.y1, box.x2, box.y2, w, h,
+	     pixmap->drawable.width, pixmap->drawable.height));
+	if (w <= 0 || h <= 0 || h > sna->render.max_3d_size)
+		return 0;
+
+	memset(&channel->embedded_transform,
+	       0,
+	       sizeof(channel->embedded_transform));
+	channel->embedded_transform.matrix[0][0] = 1 << 16;
+	channel->embedded_transform.matrix[0][2] = 0;
+	channel->embedded_transform.matrix[1][1] = 1 << 16;
+	channel->embedded_transform.matrix[1][2] = -box.y1 << 16;
+	channel->embedded_transform.matrix[2][2] = 1 << 16;
+	if (channel->transform)
+		pixman_transform_multiply(&channel->embedded_transform,
+					  &channel->embedded_transform,
+					  channel->transform);
+	channel->transform = &channel->embedded_transform;
+
+	if (use_cpu_bo(sna, pixmap, &box)) {
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
+			return 0;
+
+		bo = sna_pixmap(pixmap)->cpu_bo;
+	} else {
+		if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+			return 0;
+
+		bo = sna_pixmap(pixmap)->gpu_bo;
+	}
+
+	channel->offset[0] = x - dst_x;
+	channel->offset[1] = y - dst_y;
+	channel->scale[0] = 1.f/pixmap->drawable.width;
+	channel->scale[1] = 1.f/h;
+	channel->width  = pixmap->drawable.width;
+	channel->height = h;
+	channel->bo = kgem_create_proxy(bo, box.y1 * bo->pitch, h * bo->pitch);
+	return channel->bo != NULL;
+}
+
 int
 sna_render_picture_extract(struct sna *sna,
 			   PicturePtr picture,
@@ -825,6 +922,12 @@ sna_render_picture_extract(struct sna *sna,
 		return -1;
 	}
 
+	if (pixmap->drawable.width < sna->render.max_3d_size &&
+	    sna_render_picture_partial(sna, picture, channel,
+				       x, y, w, h,
+				       dst_x, dst_y))
+		return 1;
+
 	ow = w;
 	oh = h;
 
commit ad910949beb0c42e2d7b864f030b055ca40adacd
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 18:37:39 2012 +0000

    sna: Mark diagonal lines as partial write
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index abcf559..f2997d0 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -6337,7 +6337,7 @@ fallback:
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable, gc,
-							       n > 2)))
+							       !(data.flags & 4 && n == 2))))
 		goto out;
 
 	/* Install FillSpans in case we hit a fallback path in fbPolyLine */
@@ -7249,7 +7249,7 @@ fallback:
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(drawable, &data.region,
 					     drawable_gc_flags(drawable, gc,
-							       n > 1)))
+							       !(data.flags & 4 && n > 1))))
 		goto out;
 
 	/* Install FillSpans in case we hit a fallback path in fbPolySegment */
commit b9c83e0b2c42f7657b1854a7cdcbc0407957c382
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 18:24:01 2012 +0000

    sna/video: Add some DBG messages to track the error paths
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45279
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index 09f1551..6f69135 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -235,11 +235,16 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	     drw_x, drw_y, drw_w, drw_h,
 	     id, width, height, sync));
 
-	if (buf == 0)
+	if (buf == 0) {
+		DBG(("%s: garbage video buffer\n", __FUNCTION__));
 		return BadAlloc;
+	}
 
-	if (!sna_pixmap_is_gpu(pixmap))
+	if (!sna_pixmap_is_gpu(pixmap)) {
+		DBG(("%s: attempting to render to a non-GPU pixmap\n",
+		     __FUNCTION__));
 		return BadAlloc;
+	}
 
 	sna_video_frame_init(sna, video, id, width, height, &frame);
 
@@ -261,16 +266,21 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		}
 
 		frame.bo = kgem_create_for_name(&sna->kgem, *(uint32_t*)buf);
-		if (frame.bo == NULL)
+		if (frame.bo == NULL) {
+			DBG(("%s: failed to open bo\n", __FUNCTION__));
 			return BadAlloc;
+		}
 
 		assert(frame.bo->size >= frame.size);
 	} else {
 		frame.bo = kgem_create_linear(&sna->kgem, frame.size);
-		if (frame.bo == NULL)
+		if (frame.bo == NULL) {
+			DBG(("%s: failed to allocate bo\n", __FUNCTION__));
 			return BadAlloc;
+		}
 
 		if (!sna_video_copy_data(sna, video, &frame, buf)) {
+			DBG(("%s: failed to copy frame\n", __FUNCTION__));
 			kgem_bo_destroy(&sna->kgem, frame.bo);
 			return BadAlloc;
 		}
@@ -281,13 +291,14 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 					      &clip->extents);
 
 	ret = Success;
-	if (sna->render.video(sna, video, &frame, clip,
+	if (!sna->render.video(sna, video, &frame, clip,
 			      src_w, src_h,
 			      drw_w, drw_h,
-			      pixmap))
-		DamageDamageRegion(drawable, clip);
-	else
+			      pixmap)) {
+		DBG(("%s: failed to render video\n", __FUNCTION__));
 		ret = BadAlloc;
+	} else
+		DamageDamageRegion(drawable, clip);
 
 	kgem_bo_destroy(&sna->kgem, frame.bo);
 
commit 45d831c8b11d6f9f38c0cb9b5877541a971f2c67
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 15:45:17 2012 +0000

    sna: Consolidate routines to choice destination bo
    
    Combine the two very similar routines that decided if we should render
    into the GPU bo, CPU bo or shadow pixmap into a single function.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 840012f..abcf559 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1684,50 +1684,48 @@ box_inplace(PixmapPtr pixmap, const BoxRec *box)
 	return ((box->x2 - box->x1) * (box->y2 - box->y1) * pixmap->drawable.bitsPerPixel >> 15) >= sna->kgem.half_cpu_cache_pages;
 }
 
-static inline Bool
-_sna_drawable_use_gpu_bo(DrawablePtr drawable,
-			 const BoxRec *box,
-			 struct sna_damage ***damage)
+static inline struct kgem_bo *
+sna_drawable_use_bo(DrawablePtr drawable,
+		    const BoxRec *box,
+		    struct sna_damage ***damage)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	BoxRec extents;
 	int16_t dx, dy;
+	int ret;
+
+	DBG(("%s((%d, %d), (%d, %d))...\n", __FUNCTION__,
+	     box->x1, box->y1, box->x2, box->y2));
 
 	assert_drawable_contains_box(drawable, box);
 
 	if (priv == NULL) {
 		DBG(("%s: not attached\n", __FUNCTION__));
-		return FALSE;
+		return NULL;
 	}
 
-	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
-		*damage = NULL;
-		if (!priv->pinned)
-			list_move(&priv->inactive,
-				  &to_sna_from_pixmap(pixmap)->active_pixmaps);
-		priv->clear = false;
-		return TRUE;
-	}
+	if (DAMAGE_IS_ALL(priv->gpu_damage))
+		goto use_gpu_bo;
 
 	if (DAMAGE_IS_ALL(priv->cpu_damage))
-		return FALSE;
+		goto use_cpu_bo;
 
 	if (priv->gpu_bo == NULL) {
 		if (!priv->gpu) {
 			DBG(("%s: untiled, will not force allocation\n",
 			     __FUNCTION__));
-			return FALSE;
+			goto use_cpu_bo;
 		}
 
 		if (priv->cpu_damage && !box_inplace(pixmap, box)) {
 			DBG(("%s: damaged with a small operation, will not force allocation\n",
 			     __FUNCTION__));
-			return FALSE;
+			goto use_cpu_bo;
 		}
 
 		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE | MOVE_READ))
-			return FALSE;
+			goto use_cpu_bo;
 
 		DBG(("%s: allocated GPU bo for operation\n", __FUNCTION__));
 		goto done;
@@ -1745,34 +1743,39 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
 	     extents.x1, extents.y1, extents.x2, extents.y2));
 
 	if (priv->gpu_damage) {
-		int ret = sna_damage_contains_box(priv->gpu_damage, &extents);
+		if (!priv->cpu_damage) {
+			if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
+							       &extents)) {
+				DBG(("%s: region wholly contained within GPU damage\n",
+				     __FUNCTION__));
+				goto use_gpu_bo;
+			} else {
+				DBG(("%s: partial GPU damage with no CPU damage, continuing to use GPU\n",
+				     __FUNCTION__));
+				goto move_to_gpu;
+			}
+		}
+
+		ret = sna_damage_contains_box(priv->gpu_damage, &extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within GPU damage\n",
 			     __FUNCTION__));
-			*damage = NULL;
-			return TRUE;
+			goto use_gpu_bo;
 		}
 
-		if (ret != PIXMAN_REGION_OUT &&
-		    (priv->cpu_bo || kgem_bo_is_busy(priv->gpu_bo))) {
-			DBG(("%s: region partially contained within busy GPU damage\n",
+		if (ret != PIXMAN_REGION_OUT) {
+			DBG(("%s: region partially contained within GPU damage\n",
 			     __FUNCTION__));
 			goto move_to_gpu;
 		}
 	}
 
-	if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) {
-		DBG(("%s: busy CPU bo, prefer to use GPU\n",
-		     __FUNCTION__));
-		goto move_to_gpu;
-	}
-
 	if (priv->cpu_damage) {
-		int ret = sna_damage_contains_box(priv->cpu_damage, &extents);
+		ret = sna_damage_contains_box(priv->cpu_damage, &extents);
 		if (ret == PIXMAN_REGION_IN) {
 			DBG(("%s: region wholly contained within CPU damage\n",
 			     __FUNCTION__));
-			return FALSE;
+			goto use_cpu_bo;
 		}
 
 		if (box_inplace(pixmap, box)) {
@@ -1780,11 +1783,10 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable,
 			goto move_to_gpu;
 		}
 
-		if (ret != PIXMAN_REGION_OUT &&
-		    (priv->cpu_bo || !kgem_bo_is_busy(priv->gpu_bo))) {
-			DBG(("%s: region partially contained within idle CPU damage\n",
+		if (ret != PIXMAN_REGION_OUT) {
+			DBG(("%s: region partially contained within CPU damage\n",
 			     __FUNCTION__));
-			return FALSE;
+			goto use_cpu_bo;
 		}
 	}
 
@@ -1792,81 +1794,58 @@ move_to_gpu:
 	if (!sna_pixmap_move_area_to_gpu(pixmap, &extents,
 					 MOVE_READ | MOVE_WRITE)) {
 		DBG(("%s: failed to move-to-gpu, fallback\n", __FUNCTION__));
-		return FALSE;
+		goto use_cpu_bo;
 	}
 
 done:
-	*damage = DAMAGE_IS_ALL(priv->gpu_damage) ? NULL : &priv->gpu_damage;
-	return TRUE;
-}
-
-static inline Bool
-sna_drawable_use_gpu_bo(DrawablePtr drawable,
-			const BoxRec *box,
-			struct sna_damage ***damage)
-{
-	Bool ret;
-
-	DBG(("%s((%d, %d), (%d, %d))...\n", __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2));
-
-	ret = _sna_drawable_use_gpu_bo(drawable, box, damage);
-
-	DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2, ret));
-
-	return ret;
-}
+	if (sna_damage_is_all(&priv->gpu_damage,
+			      pixmap->drawable.width,
+			      pixmap->drawable.height))
+		*damage = NULL;
+	else
+		*damage = &priv->gpu_damage;
 
-static inline Bool
-_sna_drawable_use_cpu_bo(DrawablePtr drawable,
-			 const BoxRec *box,
-			 struct sna_damage ***damage)
-{
-	PixmapPtr pixmap = get_drawable_pixmap(drawable);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
-	struct sna *sna = to_sna_from_pixmap(pixmap);
-	BoxRec extents;
-	int16_t dx, dy;
+	DBG(("%s: using GPU bo with damage? %d\n",
+	     __FUNCTION__, *damage != NULL));
+	return priv->gpu_bo;
 
-	assert_drawable_contains_box(drawable, box);
+use_gpu_bo:
+	priv->clear = false;
+	if (!priv->pinned)
+		list_move(&priv->inactive,
+			  &to_sna_from_pixmap(pixmap)->active_pixmaps);
+	*damage = NULL;
+	DBG(("%s: using whole GPU bo\n", __FUNCTION__));
+	return priv->gpu_bo;
+
+use_cpu_bo:
+	if (priv->cpu_bo == NULL)
+		return NULL;
 
-	if (priv == NULL || priv->cpu_bo == NULL)
-		return FALSE;
+	/* Continue to use the shadow pixmap once mapped */
+	if (pixmap->devPrivate.ptr) {
+		/* But only if we do not need to sync the CPU bo */
+		if (!kgem_bo_is_busy(priv->cpu_bo))
+			return NULL;
 
-	if (!sna->kgem.has_llc && priv->cpu_bo->domain == DOMAIN_CPU)
-		return FALSE;
+		/* Both CPU and GPU are busy, prefer to use the GPU */
+		if (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))
+			goto move_to_gpu;
 
-	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
-		*damage = NULL;
-		return TRUE;
+		priv->mapped = false;
+		pixmap->devPrivate.ptr = NULL;
 	}
 
-	get_drawable_deltas(drawable, pixmap, &dx, &dy);
-
-	extents = *box;
-	extents.x1 += dx;
-	extents.x2 += dx;
-	extents.y1 += dy;
-	extents.y2 += dy;
-
-	*damage = &priv->cpu_damage;
-	if (priv->cpu_damage &&
-	     sna_damage_contains_box__no_reduce(priv->cpu_damage, &extents))
+	if (sna_damage_is_all(&priv->cpu_damage,
+			      pixmap->drawable.width,
+			      pixmap->drawable.height))
 		*damage = NULL;
+	else
+		*damage = &priv->cpu_damage;
 
-	return TRUE;
-}
-
-static inline Bool
-sna_drawable_use_cpu_bo(DrawablePtr drawable,
-			const BoxRec *box,
-			struct sna_damage ***damage)
-{
-	Bool ret = _sna_drawable_use_cpu_bo(drawable, box, damage);
-	DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
-	     box->x1, box->y1, box->x2, box->y2, ret));
-	return ret;
+	DBG(("%s: using CPU bo with damage? %d\n",
+	     __FUNCTION__, *damage != NULL));
+	return priv->cpu_bo;
 }
 
 PixmapPtr
@@ -2617,21 +2596,22 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	BoxRec *box;
 	int16_t dx, dy;
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	if (!sna_drawable_use_gpu_bo(&pixmap->drawable,
-				     &region->extents,
-				     &damage))
+	bo = sna_drawable_use_bo(&pixmap->drawable, &region->extents, &damage);
+	if (bo == NULL)
 		return false;
 
-	if (priv->gpu_bo->tiling == I915_TILING_Y) {
+	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
-		if (!sna_pixmap_change_tiling(pixmap, I915_TILING_X))
+		assert(bo == sna_pixmap_get_bo(pixmap));
+		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
+		if (bo == NULL)
 			return false;
 	}
 
@@ -2663,7 +2643,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		void *ptr;
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -2696,8 +2676,8 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		if (drawable->bitsPerPixel == 32)
 			b[0] |= 3 << 20;
 		b[0] |= ((box->x1 - x) & 7) << 17;
-		b[1] = priv->gpu_bo->pitch;
-		if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+		b[1] = bo->pitch;
+		if (sna->kgem.gen >= 40 && bo->tiling) {
 			b[0] |= BLT_DST_TILED;
 			b[1] >>= 2;
 		}
@@ -2705,8 +2685,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 		b[1] |= rop << 16;
 		b[2] = box->y1 << 16 | box->x1;
 		b[3] = box->y2 << 16 | box->x2;
-		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-				      priv->gpu_bo,
+		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
 				      I915_GEM_DOMAIN_RENDER << 16 |
 				      I915_GEM_DOMAIN_RENDER |
 				      KGEM_RELOC_FENCED,
@@ -2735,7 +2714,6 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna_damage **damage;
 	struct kgem_bo *bo;
 	int16_t dx, dy;
@@ -2744,18 +2722,16 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
 	if (gc->alu != GXcopy)
 		return false;
 
-	if (!sna_drawable_use_gpu_bo(&pixmap->drawable,
-				     &region->extents,
-				     &damage))
+	bo = sna_drawable_use_bo(&pixmap->drawable, &region->extents, &damage);
+	if (bo == NULL)
 		return false;
 
-	bo = priv->gpu_bo;
 	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
-		if (!sna_pixmap_change_tiling(pixmap, I915_TILING_X))
+		assert(bo == sna_pixmap_get_bo(pixmap));
+		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
+		if (bo == NULL)
 			return false;
-
-		bo = priv->gpu_bo;
 	}
 
 	assert_pixmap_contains_box(pixmap, RegionExtents(region));
@@ -4511,6 +4487,8 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
+	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	RegionRec region;
 	unsigned flags;
 	uint32_t color;
@@ -4543,32 +4521,18 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 	if (!PM_IS_SOLID(drawable, gc->planemask))
 		goto fallback;
 
-	if (gc_is_solid(gc, &color)) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-		struct sna_damage **damage;
-
-		DBG(("%s: trying solid fill [alu=%d, pixel=%08lx] blt paths\n",
-		     __FUNCTION__, gc->alu, gc->fgPixel));
-
-		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage) &&
-		    sna_fill_spans_blt(drawable,
-				       priv->gpu_bo, damage,
-				       gc, color, n, pt, width, sorted,
-				       &region.extents, flags & 2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(drawable, &region.extents, &damage) &&
-		    sna_fill_spans_blt(drawable,
-				       priv->cpu_bo, damage,
-				       gc, color, n, pt, width, sorted,
-				       &region.extents, flags & 2))
-			return;
-	} else {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-		struct sna_damage **damage;
+	bo = sna_drawable_use_bo(drawable, &region.extents, &damage);
+	if (bo) {
+		if (gc_is_solid(gc, &color)) {
+			DBG(("%s: trying solid fill [alu=%d, pixel=%08lx] blt paths\n",
+			     __FUNCTION__, gc->alu, gc->fgPixel));
 
-		/* Try converting these to a set of rectangles instead */
-		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage)) {
+			sna_fill_spans_blt(drawable,
+					   bo, damage,
+					   gc, color, n, pt, width, sorted,
+					   &region.extents, flags & 2);
+		} else {
+			/* Try converting these to a set of rectangles instead */
 			xRectangle *rect;
 			int i;
 
@@ -4587,12 +4551,12 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
 
 			if (gc->fillStyle == FillTiled) {
 				i = sna_poly_fill_rect_tiled_blt(drawable,
-								 priv->gpu_bo, damage,
+								 bo, damage,
 								 gc, n, rect,
 								 &region.extents, flags & 2);
 			} else {
 				i = sna_poly_fill_rect_stippled_blt(drawable,
-								    priv->gpu_bo, damage,
+								    bo, damage,
 								    gc, n, rect,
 								    &region.extents, flags & 2);
 			}
@@ -4661,6 +4625,11 @@ out:
 	RegionUninit(&region);
 }
 
+struct sna_copy_plane {
+	struct sna_damage **damage;
+	struct kgem_bo *bo;
+};
+
 static void
 sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		    BoxPtr box, int n,
@@ -4670,7 +4639,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	struct sna_copy_plane *arg = closure;
 	PixmapPtr bitmap = (PixmapPtr)_bitmap;
 	uint32_t br00, br13;
 	int16_t dx, dy;
@@ -4681,12 +4650,12 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		return;
 
 	get_drawable_deltas(drawable, pixmap, &dx, &dy);
-	if (closure)
-		sna_damage_add_boxes(closure, box, n, dx, dy);
+	if (arg->damage)
+		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
 
 	br00 = 3 << 20;
-	br13 = priv->gpu_bo->pitch;
-	if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+	br13 = arg->bo->pitch;
+	if (sna->kgem.gen >= 40 && arg->bo->tiling) {
 		br00 |= BLT_DST_TILED;
 		br13 >>= 2;
 	}
@@ -4714,7 +4683,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 		if (src_stride <= 128) {
 			src_stride = ALIGN(src_stride, 8) / 4;
 			if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
-			    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
 			    !kgem_check_reloc(&sna->kgem, 1)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -4727,7 +4696,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 			b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
 			b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
 			b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-					      priv->gpu_bo,
+					      arg->bo,
 					      I915_GEM_DOMAIN_RENDER << 16 |
 					      I915_GEM_DOMAIN_RENDER |
 					      KGEM_RELOC_FENCED,
@@ -4756,7 +4725,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 			void *ptr;
 
 			if (!kgem_check_batch(&sna->kgem, 8) ||
-			    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+			    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
 			    !kgem_check_reloc(&sna->kgem, 2)) {
 				_kgem_submit(&sna->kgem);
 				_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -4776,7 +4745,7 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
 			b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
 			b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
 			b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-					      priv->gpu_bo,
+					      arg->bo,
 					      I915_GEM_DOMAIN_RENDER << 16 |
 					      I915_GEM_DOMAIN_RENDER |
 					      KGEM_RELOC_FENCED,
@@ -4824,8 +4793,8 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 {
 	PixmapPtr dst_pixmap = get_drawable_pixmap(drawable);
 	PixmapPtr src_pixmap = get_drawable_pixmap(source);
-	struct sna_pixmap *priv = sna_pixmap(dst_pixmap);
 	struct sna *sna = to_sna_from_pixmap(dst_pixmap);
+	struct sna_copy_plane *arg = closure;
 	int16_t dx, dy;
 	int bit = ffs(bitplane) - 1;
 	uint32_t br00, br13;
@@ -4841,14 +4810,14 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	sy += dy;
 
 	get_drawable_deltas(drawable, dst_pixmap, &dx, &dy);
-	if (closure)
-		sna_damage_add_boxes(closure, box, n, dx, dy);
+	if (arg->damage)
+		sna_damage_add_boxes(arg->damage, box, n, dx, dy);
 
 	br00 = XY_MONO_SRC_COPY;
 	if (drawable->bitsPerPixel == 32)
 		br00 |= 3 << 20;
-	br13 = priv->gpu_bo->pitch;
-	if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+	br13 = arg->bo->pitch;
+	if (sna->kgem.gen >= 40 && arg->bo->tiling) {
 		br00 |= BLT_DST_TILED;
 		br13 >>= 2;
 	}
@@ -4873,7 +4842,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 		     sx, sy, bx1, bx2));
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, arg->bo, NULL) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -4997,7 +4966,7 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 		b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
 		b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
 		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-				      priv->gpu_bo,
+				      arg->bo,
 				      I915_GEM_DOMAIN_RENDER << 16 |
 				      I915_GEM_DOMAIN_RENDER |
 				      KGEM_RELOC_FENCED,
@@ -5029,7 +4998,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	PixmapPtr pixmap = get_drawable_pixmap(dst);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	RegionRec region, *ret = NULL;
-	struct sna_damage **damage;
+	struct sna_copy_plane arg;
 
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d\n", __FUNCTION__,
 	     src_x, src_y, dst_x, dst_y, w, h));
@@ -5098,18 +5067,21 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (!PM_IS_SOLID(dst, gc->planemask))
 		goto fallback;
 
-	if (sna_drawable_use_gpu_bo(dst, &region.extents, &damage)) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-		if (priv->gpu_bo->tiling != I915_TILING_Y ||
-		    sna_pixmap_change_tiling(pixmap, I915_TILING_X)) {
-			RegionUninit(&region);
-			return miDoCopy(src, dst, gc,
-					src_x, src_y,
-					w, h,
-					dst_x, dst_y,
-					src->depth == 1 ? sna_copy_bitmap_blt :sna_copy_plane_blt,
-					(Pixel)bit, damage);
+	arg.bo = sna_drawable_use_bo(dst, &region.extents, &arg.damage);
+	if (arg.bo) {
+		if (arg.bo->tiling == I915_TILING_Y) {
+			assert(arg.bo == sna_pixmap_get_bo(pixmap));
+			arg.bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
+			if (arg.bo == NULL)
+				goto fallback;
 		}
+		RegionUninit(&region);
+		return miDoCopy(src, dst, gc,
+				src_x, src_y,
+				w, h,
+				dst_x, dst_y,
+				src->depth == 1 ? sna_copy_bitmap_blt :sna_copy_plane_blt,
+				(Pixel)bit, &arg);
 	}
 
 fallback:
@@ -5294,21 +5266,14 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
 	}
 
 	if (PM_IS_SOLID(drawable, gc->planemask) && gc_is_solid(gc, &color)) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
 		struct sna_damage **damage;
+		struct kgem_bo *bo;
 
 		DBG(("%s: trying solid fill [%08lx] blt paths\n",
 		     __FUNCTION__, gc->fgPixel));
 
-		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage) &&
-		    sna_poly_point_blt(drawable,
-				       priv->gpu_bo, damage,
-				       gc, color, mode, n, pt, flags & 2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(drawable, &region.extents, &damage) &&
-		    sna_poly_point_blt(drawable,
-				       priv->cpu_bo, damage,
+		if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+		    sna_poly_point_blt(drawable, bo, damage,
 				       gc, color, mode, n, pt, flags & 2))
 			return;
 	}
@@ -6187,32 +6152,23 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		     __FUNCTION__, (unsigned)color));
 
 		if (data.flags & 4) {
-			if (sna_drawable_use_gpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
-			    sna_poly_line_blt(drawable,
-					      priv->gpu_bo, data.damage,
-					      gc, color, mode, n, pt,
-					      &data.region.extents,
-					      data.flags & 2))
-				return;
-
-			if (sna_drawable_use_cpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
+			data.bo = sna_drawable_use_bo(drawable,
+						      &data.region.extents,
+						      &data.damage);
+			if (data.bo &&
 			    sna_poly_line_blt(drawable,
-					      priv->cpu_bo, data.damage,
+					      data.bo, data.damage,
 					      gc, color, mode, n, pt,
 					      &data.region.extents,
 					      data.flags & 2))
 				return;
 		} else { /* !rectilinear */
 			if (use_zero_spans(drawable, gc, &data.region.extents) &&
-			    sna_drawable_use_gpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
+			    (data.bo = sna_drawable_use_bo(drawable,
+							   &data.region.extents,
+							   &data.damage)) &&
 			    sna_poly_zero_line_blt(drawable,
-						   priv->gpu_bo, data.damage,
+						   data.bo, data.damage,
 						   gc, mode, n, pt,
 						   &data.region.extents,
 						   data.flags & 2))
@@ -6221,7 +6177,8 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 		}
 	} else if (data.flags & 4) {
 		/* Try converting these to a set of rectangles instead */
-		if (sna_drawable_use_gpu_bo(drawable, &data.region.extents, &data.damage)) {
+		data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage);
+		if (data.bo) {
 			DDXPointRec p1, p2;
 			xRectangle *rect;
 			int i;
@@ -6272,13 +6229,13 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 
 			if (gc->fillStyle == FillTiled) {
 				i = sna_poly_fill_rect_tiled_blt(drawable,
-								 priv->gpu_bo, data.damage,
+								 data.bo, data.damage,
 								 gc, n - 1, rect + 1,
 								 &data.region.extents,
 								 data.flags & 2);
 			} else {
 				i = sna_poly_fill_rect_stippled_blt(drawable,
-								    priv->gpu_bo, data.damage,
+								    data.bo, data.damage,
 								    gc, n - 1, rect + 1,
 								    &data.region.extents,
 								    data.flags & 2);
@@ -6292,7 +6249,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
 
 spans_fallback:
 	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    sna_drawable_use_gpu_bo(drawable, &data.region.extents, &data.damage)) {
+	    (data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
 		DBG(("%s: converting line into spans\n", __FUNCTION__));
 		get_drawable_deltas(drawable, data.pixmap, &data.dx, &data.dy);
 		sna_gc(gc)->priv = &data;
@@ -6304,7 +6261,7 @@ spans_fallback:
 
 			if (!sna_fill_init_blt(&fill,
 					       data.sna, data.pixmap,
-					       priv->gpu_bo, gc->alu, color))
+					       data.bo, gc->alu, color))
 				goto fallback;
 
 			data.op = &fill;
@@ -6335,7 +6292,6 @@ spans_fallback:
 			 * using fgPixel and bgPixel so we need to reset state between
 			 * FillSpans.
 			 */
-			data.bo = priv->gpu_bo;
 			sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
 			gc->ops = &sna_gc_ops__tmp;
 
@@ -7120,32 +7076,22 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		     __FUNCTION__, (unsigned)color, data.flags));
 
 		if (data.flags & 4) {
-			if (sna_drawable_use_gpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
-			    sna_poly_segment_blt(drawable,
-						 priv->gpu_bo, data.damage,
-						 gc, color, n, seg,
-						 &data.region.extents,
-						 data.flags & 2))
-				return;
-
-			if (sna_drawable_use_cpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
+			if ((data.bo = sna_drawable_use_bo(drawable,
+							   &data.region.extents,
+							   &data.damage)) &&
 			    sna_poly_segment_blt(drawable,
-						 priv->cpu_bo, data.damage,
+						 data.bo, data.damage,
 						 gc, color, n, seg,
 						 &data.region.extents,
 						 data.flags & 2))
 				return;
 		} else {
 			if (use_zero_spans(drawable, gc, &data.region.extents) &&
-			    sna_drawable_use_gpu_bo(drawable,
-						    &data.region.extents,
-						    &data.damage) &&
+			    (data.bo = sna_drawable_use_bo(drawable,
+							   &data.region.extents,
+							   &data.damage)) &&
 			    sna_poly_zero_segment_blt(drawable,
-						      priv->gpu_bo, data.damage,
+						      data.bo, data.damage,
 						      gc, n, seg,
 						      &data.region.extents,
 						      data.flags & 2))
@@ -7153,7 +7099,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 		}
 	} else if (data.flags & 4) {
 		/* Try converting these to a set of rectangles instead */
-		if (sna_drawable_use_gpu_bo(drawable, &data.region.extents, &data.damage)) {
+		if ((data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
 			xRectangle *rect;
 			int i;
 
@@ -7196,13 +7142,13 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 
 			if (gc->fillStyle == FillTiled) {
 				i = sna_poly_fill_rect_tiled_blt(drawable,
-								 priv->gpu_bo, data.damage,
+								 data.bo, data.damage,
 								 gc, n, rect,
 								 &data.region.extents,
 								 data.flags & 2);
 			} else {
 				i = sna_poly_fill_rect_stippled_blt(drawable,
-								    priv->gpu_bo, data.damage,
+								    data.bo, data.damage,
 								    gc, n, rect,
 								    &data.region.extents,
 								    data.flags & 2);
@@ -7216,7 +7162,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
 
 spans_fallback:
 	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    sna_drawable_use_gpu_bo(drawable, &data.region.extents, &data.damage)) {
+	    (data.bo = sna_drawable_use_bo(drawable, &data.region.extents, &data.damage))) {
 		void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
 		int i;
 
@@ -7249,7 +7195,7 @@ spans_fallback:
 
 			if (!sna_fill_init_blt(&fill,
 					       data.sna, data.pixmap,
-					       priv->gpu_bo, gc->alu, color))
+					       data.bo, gc->alu, color))
 				goto fallback;
 
 			data.op = &fill;
@@ -7278,7 +7224,6 @@ spans_fallback:
 
 			fill.done(data.sna, &fill);
 		} else {
-			data.bo = priv->gpu_bo;
 			sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
 			gc->ops = &sna_gc_ops__tmp;
 
@@ -7793,6 +7738,7 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	RegionRec region;
 	unsigned flags;
 
@@ -7829,25 +7775,17 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
 	if (gc->lineStyle == LineSolid &&
 	    gc->joinStyle == JoinMiter &&
 	    PM_IS_SOLID(drawable, gc->planemask)) {
-		struct sna_pixmap *priv = sna_pixmap(pixmap);
-
 		DBG(("%s: trying blt solid fill [%08lx] paths\n",
 		     __FUNCTION__, gc->fgPixel));
-
-		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage) &&
-		    sna_poly_rectangle_blt(drawable, priv->gpu_bo, damage,
-					   gc, n, r, &region.extents, flags&2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(drawable, &region.extents, &damage) &&
-		    sna_poly_rectangle_blt(drawable, priv->cpu_bo, damage,
+		if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
+		    sna_poly_rectangle_blt(drawable, bo, damage,
 					   gc, n, r, &region.extents, flags&2))
 			return;
 	} else {
 		/* Not a trivial outline, but we still maybe able to break it
 		 * down into simpler operations that we can accelerate.
 		 */
-		if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage)) {
+		if (sna_drawable_use_bo(drawable, &region.extents, &damage)) {
 			miPolyRectangle(drawable, gc, n, r);
 			return;
 		}
@@ -7966,8 +7904,8 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 		goto fallback;
 
 	if (use_wide_spans(drawable, gc, &data.region.extents) &&
-	    sna_drawable_use_gpu_bo(drawable,
-				    &data.region.extents, &data.damage)) {
+	    (data.bo = sna_drawable_use_bo(drawable,
+					   &data.region.extents, &data.damage))) {
 		uint32_t color;
 
 		DBG(("%s: converting arcs into spans\n", __FUNCTION__));
@@ -7978,7 +7916,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
 
 			if (!sna_fill_init_blt(&fill,
 					       data.sna, data.pixmap,
-					       priv->gpu_bo, gc->alu, color))
+					       data.bo, gc->alu, color))
 				goto fallback;
 
 			data.op = &fill;
@@ -8311,9 +8249,9 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 		goto fallback;
 
 	if (use_wide_spans(draw, gc, &data.region.extents) &&
-	    sna_drawable_use_gpu_bo(draw,
-				    &data.region.extents,
-				    &data.damage)) {
+	    (data.bo = sna_drawable_use_bo(draw,
+					   &data.region.extents,
+					   &data.damage))) {
 		uint32_t color;
 
 		sna_gc(gc)->priv = &data;
@@ -8324,7 +8262,7 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 
 			if (!sna_fill_init_blt(&fill,
 					       data.sna, data.pixmap,
-					       priv->gpu_bo, gc->alu, color))
+					       data.bo, gc->alu, color))
 				goto fallback;
 
 			data.op = &fill;
@@ -8351,7 +8289,6 @@ sna_poly_fill_polygon(DrawablePtr draw, GCPtr gc,
 			miFillPolygon(draw, gc, shape, mode, n, pt);
 			fill.done(data.sna, &fill);
 		} else {
-			data.bo = priv->gpu_bo;
 			sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
 			gc->ops = &sna_gc_ops__tmp;
 
@@ -9654,6 +9591,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	RegionRec region;
 	unsigned flags;
 	uint32_t color;
@@ -9712,48 +9650,25 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
 		DBG(("%s: solid fill [%08x], testing for blt\n",
 		     __FUNCTION__, color));
 
-		if (sna_drawable_use_gpu_bo(draw, &region.extents, &damage) &&
+		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
 		    sna_poly_fill_rect_blt(draw,
-					   priv->gpu_bo, damage,
-					   gc, color, n, rect,
-					   &region.extents, flags & 2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(draw, &region.extents, &damage) &&
-		    sna_poly_fill_rect_blt(draw,
-					   priv->cpu_bo, damage,
+					   bo, damage,
 					   gc, color, n, rect,
 					   &region.extents, flags & 2))
 			return;
 	} else if (gc->fillStyle == FillTiled) {
 		DBG(("%s: tiled fill, testing for blt\n", __FUNCTION__));
 
-		if (sna_drawable_use_gpu_bo(draw, &region.extents, &damage) &&
-		    sna_poly_fill_rect_tiled_blt(draw,
-						 priv->gpu_bo, damage,
-						 gc, n, rect,
-						 &region.extents, flags & 2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(draw, &region.extents, &damage) &&
-		    sna_poly_fill_rect_tiled_blt(draw,
-						 priv->cpu_bo, damage,
+		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
+		    sna_poly_fill_rect_tiled_blt(draw, bo, damage,
 						 gc, n, rect,
 						 &region.extents, flags & 2))
 			return;
 	} else {
 		DBG(("%s: stippled fill, testing for blt\n", __FUNCTION__));
 
-		if (sna_drawable_use_gpu_bo(draw, &region.extents, &damage) &&
-		    sna_poly_fill_rect_stippled_blt(draw,
-						    priv->gpu_bo, damage,
-						    gc, n, rect,
-						    &region.extents, flags & 2))
-			return;
-
-		if (sna_drawable_use_cpu_bo(draw, &region.extents, &damage) &&
-		    sna_poly_fill_rect_stippled_blt(draw,
-						    priv->cpu_bo, damage,
+		if ((bo = sna_drawable_use_bo(draw, &region.extents, &damage)) &&
+		    sna_poly_fill_rect_stippled_blt(draw, bo, damage,
 						    gc, n, rect,
 						    &region.extents, flags & 2))
 			return;
@@ -9887,9 +9802,9 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 		goto fallback;
 
 	if (use_wide_spans(draw, gc, &data.region.extents) &&
-	    sna_drawable_use_gpu_bo(draw,
-				    &data.region.extents,
-				    &data.damage)) {
+	    (data.bo = sna_drawable_use_bo(draw,
+					   &data.region.extents,
+					   &data.damage))) {
 		uint32_t color;
 
 		get_drawable_deltas(draw, data.pixmap, &data.dx, &data.dy);
@@ -9900,7 +9815,7 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 
 			if (!sna_fill_init_blt(&fill,
 					       data.sna, data.pixmap,
-					       priv->gpu_bo, gc->alu, color))
+					       data.bo, gc->alu, color))
 				goto fallback;
 
 			data.op = &fill;
@@ -9927,7 +9842,6 @@ sna_poly_fill_arc(DrawablePtr draw, GCPtr gc, int n, xArc *arc)
 			miPolyFillArc(draw, gc, n, arc);
 			fill.done(data.sna, &fill);
 		} else {
-			data.bo = priv->gpu_bo;
 			sna_gc_ops__tmp.FillSpans = sna_fill_spans__gpu;
 			gc->ops = &sna_gc_ops__tmp;
 
@@ -10016,7 +9930,6 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct kgem_bo *bo;
 	struct sna_damage **damage;
 	const BoxRec *extents, *last_extents;
@@ -10034,12 +9947,13 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		return false;
 	}
 
-	if (!sna_drawable_use_gpu_bo(drawable, &clip->extents, &damage))
+	bo = sna_drawable_use_bo(drawable, &clip->extents, &damage);
+	if (bo == NULL)
 		return false;
 
-	bo = priv->gpu_bo;
 	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
+		assert(bo == sna_pixmap_get_bo(pixmap));
 		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
 		if (bo == NULL) {
 			DBG(("%s -- fallback, dst uses Y-tiling\n", __FUNCTION__));
@@ -10621,19 +10535,19 @@ static bool
 sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 		       int _x, int _y, unsigned int _n,
 		       CharInfoPtr *_info, pointer _base,
+		       struct kgem_bo *bo,
 		       struct sna_damage **damage,
 		       RegionPtr clip,
 		       bool transparent)
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	const BoxRec *extents, *last_extents;
 	uint32_t *b;
 	int16_t dx, dy;
 	uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
 
-	if (priv->gpu_bo->tiling == I915_TILING_Y) {
+	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
 		if (!sna_pixmap_change_tiling(pixmap, I915_TILING_X)) {
 			DBG(("%s -- fallback, dst uses Y-tiling\n", __FUNCTION__));
@@ -10651,23 +10565,22 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 
 	kgem_set_mode(&sna->kgem, KGEM_BLT);
 	if (!kgem_check_batch(&sna->kgem, 16) ||
-	    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+	    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 	    !kgem_check_reloc(&sna->kgem, 1)) {
 		_kgem_submit(&sna->kgem);
 		_kgem_set_mode(&sna->kgem, KGEM_BLT);
 	}
 	b = sna->kgem.batch + sna->kgem.nbatch;
 	b[0] = XY_SETUP_BLT | 1 << 20;
-	b[1] = priv->gpu_bo->pitch;
-	if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+	b[1] = bo->pitch;
+	if (sna->kgem.gen >= 40 && bo->tiling) {
 		b[0] |= BLT_DST_TILED;
 		b[1] >>= 2;
 	}
 	b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
 	b[2] = extents->y1 << 16 | extents->x1;
 	b[3] = extents->y2 << 16 | extents->x2;
-	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-			      priv->gpu_bo,
+	b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
 			      I915_GEM_DOMAIN_RENDER << 16 |
 			      I915_GEM_DOMAIN_RENDER |
 			      KGEM_RELOC_FENCED,
@@ -10712,8 +10625,8 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 
 				b = sna->kgem.batch + sna->kgem.nbatch;
 				b[0] = XY_SETUP_BLT | 1 << 20;
-				b[1] = priv->gpu_bo->pitch;
-				if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+				b[1] = bo->pitch;
+				if (sna->kgem.gen >= 40 && bo->tiling) {
 					b[0] |= BLT_DST_TILED;
 					b[1] >>= 2;
 				}
@@ -10721,7 +10634,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 				b[2] = extents->y1 << 16 | extents->x1;
 				b[3] = extents->y2 << 16 | extents->x2;
 				b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-						      priv->gpu_bo,
+						      bo,
 						      I915_GEM_DOMAIN_RENDER << 16 |
 						      I915_GEM_DOMAIN_RENDER |
 						      KGEM_RELOC_FENCED,
@@ -10736,7 +10649,7 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
 			sna->kgem.nbatch += 3 + len;
 
 			b[0] = XY_TEXT_IMMEDIATE_BLT | (1 + len);
-			if (priv->gpu_bo->tiling && sna->kgem.gen >= 40)
+			if (bo->tiling && sna->kgem.gen >= 40)
 				b[0] |= BLT_DST_TILED;
 			b[1] = (uint16_t)y1 << 16 | (uint16_t)x1;
 			b[2] = (uint16_t)(y1+h) << 16 | (uint16_t)(x1+w);
@@ -10795,6 +10708,7 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 	ExtentInfoRec extents;
 	RegionRec region;
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 
 	if (n == 0)
 		return;
@@ -10830,9 +10744,9 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
 		goto fallback;
 	}
 
-	if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage) &&
+	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
-				   damage, &region, false))
+				   bo, damage, &region, false))
 		goto out;
 
 fallback:
@@ -10861,6 +10775,7 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 	ExtentInfoRec extents;
 	RegionRec region;
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 
 	if (n == 0)
 		return;
@@ -10896,9 +10811,9 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
 		goto fallback;
 	}
 
-	if (sna_drawable_use_gpu_bo(drawable, &region.extents, &damage) &&
+	if ((bo = sna_drawable_use_bo(drawable, &region.extents, &damage)) &&
 	    sna_reversed_glyph_blt(drawable, gc, x, y, n, info, base,
-				   damage, &region, true))
+				   bo, damage, &region, true))
 		goto out;
 
 fallback:
@@ -10925,19 +10840,22 @@ sna_push_pixels_solid_blt(GCPtr gc,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	struct sna_pixmap *priv = sna_pixmap(pixmap);
 	struct sna_damage **damage;
+	struct kgem_bo *bo;
 	BoxRec *box;
 	int16_t dx, dy;
 	int n;
 	uint8_t rop = copy_ROP[gc->alu];
 
-	if (!sna_drawable_use_gpu_bo(drawable, &region->extents, &damage))
+	bo = sna_drawable_use_bo(drawable, &region->extents, &damage);
+	if (bo == NULL)
 		return false;
 
-	if (priv->gpu_bo->tiling == I915_TILING_Y) {
+	if (bo->tiling == I915_TILING_Y) {
 		DBG(("%s: converting bo from Y-tiling\n", __FUNCTION__));
-		if (!sna_pixmap_change_tiling(pixmap, I915_TILING_X))
+		assert(bo == sna_pixmap_get_bo(pixmap));
+		bo = sna_pixmap_change_tiling(pixmap, I915_TILING_X);
+		if (bo == NULL)
 			return false;
 	}
 
@@ -10970,7 +10888,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		void *ptr;
 
 		if (!kgem_check_batch(&sna->kgem, 8) ||
-		    !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+		    !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
 		    !kgem_check_reloc(&sna->kgem, 2)) {
 			_kgem_submit(&sna->kgem);
 			_kgem_set_mode(&sna->kgem, KGEM_BLT);
@@ -11004,8 +10922,8 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		if (drawable->bitsPerPixel == 32)
 			b[0] |= 3 << 20;
 		b[0] |= ((box->x1 - region->extents.x1) & 7) << 17;
-		b[1] = priv->gpu_bo->pitch;
-		if (sna->kgem.gen >= 40 && priv->gpu_bo->tiling) {
+		b[1] = bo->pitch;
+		if (sna->kgem.gen >= 40 && bo->tiling) {
 			b[0] |= BLT_DST_TILED;
 			b[1] >>= 2;
 		}
@@ -11014,8 +10932,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
 		b[1] |= rop << 16;
 		b[2] = box->y1 << 16 | box->x1;
 		b[3] = box->y2 << 16 | box->x2;
-		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
-				      priv->gpu_bo,
+		b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
 				      I915_GEM_DOMAIN_RENDER << 16 |
 				      I915_GEM_DOMAIN_RENDER |
 				      KGEM_RELOC_FENCED,
@@ -11221,7 +11138,7 @@ sna_get_image(DrawablePtr drawable,
 		PixmapPtr pixmap = get_drawable_pixmap(drawable);
 		int16_t dx, dy;
 
-		DBG(("%s: copy box (%d, %d), (%d, %d), origin (%d, %d)\n",
+		DBG(("%s: copy box (%d, %d), (%d, %d)\n",
 		     __FUNCTION__,
 		     region.extents.x1, region.extents.y1,
 		     region.extents.x2, region.extents.y2));
commit 6402e7f1195f60f96dc269a701532c3270d8920f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 18:05:51 2012 +0000

    sna: Ensure that we have a source bo for tiled fills
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3c8f2be..840012f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2016,8 +2016,10 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	     __FUNCTION__, pixmap->drawable.serialNumber, pixmap->usage_hint));
 
 	priv = sna_pixmap(pixmap);
-	if (priv == NULL)
+	if (priv == NULL) {
+		DBG(("%s: not attached\n", __FUNCTION__));
 		return NULL;
+	}
 
 	if (DAMAGE_IS_ALL(priv->gpu_damage)) {
 		DBG(("%s: already all-damaged\n", __FUNCTION__));
@@ -2039,6 +2041,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 					       sna_pixmap_choose_tiling(pixmap),
 					       priv->cpu_damage ? CREATE_GTT_MAP | CREATE_INACTIVE : 0);
 		if (priv->gpu_bo == NULL) {
+			DBG(("%s: not creating GPU bo\n", __FUNCTION__));
 			assert(list_is_empty(&priv->list));
 			return NULL;
 		}
@@ -8391,6 +8394,45 @@ out:
 	RegionUninit(&data.region);
 }
 
+static struct kgem_bo *
+sna_pixmap_get_source_bo(PixmapPtr pixmap)
+{
+	struct sna_pixmap *priv = sna_pixmap(pixmap);
+
+	if (priv == NULL) {
+		struct kgem_bo *upload;
+		struct sna *sna = to_sna_from_pixmap(pixmap);
+		void *ptr;
+
+		upload = kgem_create_buffer_2d(&sna->kgem,
+					       pixmap->drawable.width,
+					       pixmap->drawable.height,
+					       pixmap->drawable.bitsPerPixel,
+					       KGEM_BUFFER_WRITE_INPLACE,
+					       &ptr);
+		memcpy_blt(pixmap->devPrivate.ptr, ptr,
+			   pixmap->drawable.bitsPerPixel,
+			   pixmap->devKind, upload->pitch,
+			   0, 0,
+			   0, 0,
+			   pixmap->drawable.width,
+			   pixmap->drawable.height);
+
+		return upload;
+	}
+
+	if (priv->gpu_damage && !sna_pixmap_move_to_gpu(pixmap, MOVE_READ))
+		return NULL;
+
+	if (priv->cpu_damage && priv->cpu_bo)
+		return kgem_bo_reference(priv->cpu_bo);
+
+	if (!sna_pixmap_force_to_gpu(pixmap, MOVE_READ))
+		return NULL;
+
+	return kgem_bo_reference(priv->gpu_bo);
+}
+
 static Bool
 sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 			     struct kgem_bo *bo,
@@ -8401,6 +8443,7 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
 	PixmapPtr tile = gc->tile.pixmap;
+	struct kgem_bo *tile_bo;
 	const DDXPointRec * const origin = &gc->patOrg;
 	struct sna_copy_op copy;
 	CARD32 alu = gc->alu;
@@ -8412,25 +8455,30 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 
 	tile_width = tile->drawable.width;
 	tile_height = tile->drawable.height;
-	if ((tile_width | tile_height) == 1)
+	if ((tile_width | tile_height) == 1) {
+		DBG(("%s: single pixel tile pixmap ,converting to solid fill\n",
+		     __FUNCTION__));
 		return sna_poly_fill_rect_blt(drawable, bo, damage,
 					      gc, get_pixel(tile),
 					      n, rect,
 					      extents, clipped);
+	}
 
 	/* XXX [248]x[238] tiling can be reduced to a pattern fill.
 	 * Also we can do the lg2 reduction for BLT and use repeat modes for
 	 * RENDER.
 	 */
 
-	if (!sna_pixmap_move_to_gpu(tile, MOVE_READ))
+	tile_bo = sna_pixmap_get_source_bo(tile);
+	if (tile_bo == NULL) {
+		DBG(("%s: unable to move tile go GPU, fallback\n",
+		     __FUNCTION__));
 		return FALSE;
+	}
 
-	if (!sna_copy_init_blt(&copy, sna,
-			       tile, sna_pixmap_get_bo(tile),
-			       pixmap, bo,
-			       alu)) {
+	if (!sna_copy_init_blt(&copy, sna, tile, tile_bo, pixmap, bo, alu)) {
 		DBG(("%s: unsupported blt\n", __FUNCTION__));
+		kgem_bo_destroy(&sna->kgem, tile_bo);
 		return FALSE;
 	}
 
@@ -8620,6 +8668,7 @@ sna_poly_fill_rect_tiled_blt(DrawablePtr drawable,
 	}
 done:
 	copy.done(sna, &copy);
+	kgem_bo_destroy(&sna->kgem, tile_bo);
 	return TRUE;
 }
 
commit 6c5fb84f4de346b06e5a538e683c5a118f2579bc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 14:08:57 2012 +0000

    sna/glyphs: Check that we attached to the cache pixmaps upon creation
    
    If the hw is wedged, then the pixmap creation routines will return an
    ordinary unattached pixmap. The code presumed that it would only return
    a pixmap with an attached bo, and so would segfault as it chased the
    invalid pointer after a GPU hang and the server was restarted.
    Considering that we already checked that the GPU wasn't wedged before we
    started, this is just mild paranoia, but on a run-once piece of code.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index ee40707..bef1774 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -157,12 +157,15 @@ static Bool realize_glyph_caches(struct sna *sna)
 
 	for (i = 0; i < ARRAY_SIZE(formats); i++) {
 		struct sna_glyph_cache *cache = &sna->render.glyph[i];
+		struct sna_pixmap *priv;
 		PixmapPtr pixmap;
-		PicturePtr picture;
+		PicturePtr picture = NULL;
+		PictFormatPtr pPictFormat;
 		CARD32 component_alpha;
 		int depth = PIXMAN_FORMAT_DEPTH(formats[i]);
 		int error;
-		PictFormatPtr pPictFormat = PictureMatchFormat(screen, depth, formats[i]);
+
+		pPictFormat = PictureMatchFormat(screen, depth, formats[i]);
 		if (!pPictFormat)
 			goto bail;
 
@@ -175,16 +178,18 @@ static Bool realize_glyph_caches(struct sna *sna)
 		if (!pixmap)
 			goto bail;
 
-		/* Prevent the cache from ever being paged out */
-		sna_pixmap(pixmap)->pinned = true;
+		priv = sna_pixmap(pixmap);
+		if (priv != NULL) {
+			/* Prevent the cache from ever being paged out */
+			priv->pinned = true;
 
-		component_alpha = NeedsComponent(pPictFormat->format);
-		picture = CreatePicture(0, &pixmap->drawable, pPictFormat,
-					CPComponentAlpha, &component_alpha,
-					serverClient, &error);
+			component_alpha = NeedsComponent(pPictFormat->format);
+			picture = CreatePicture(0, &pixmap->drawable, pPictFormat,
+						CPComponentAlpha, &component_alpha,
+						serverClient, &error);
+		}
 
 		screen->DestroyPixmap(pixmap);
-
 		if (!picture)
 			goto bail;
 
commit 86f1ae9164a94323c08e1dc6cb301e5bc1126b10
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 10:26:11 2012 +0000

    sna/video: Add some more DBG breadcrumbs to the textured PutImage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index fcef820..09f1551 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -229,6 +229,12 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 	Bool flush = false;
 	Bool ret;
 
+	DBG(("%s: src=(%d, %d),(%d, %d), dst=(%d, %d),(%d, %d), id=%d, sizep=%dx%d, sync?=%d\n",
+	     __FUNCTION__,
+	     src_x, src_y, src_w, src_h,
+	     drw_x, drw_y, drw_w, drw_h,
+	     id, width, height, sync));
+
 	if (buf == 0)
 		return BadAlloc;
 
@@ -245,6 +251,9 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		return Success;
 
 	if (xvmc_passthrough(id)) {
+		DBG(("%s: using passthough, name=%d\n",
+		     __FUNCTION__, *(uint32_t *)buf));
+
 		if (sna->kgem.gen < 31) {
 			/* XXX: i915 is not support and needs some
 			 * serious care.  grep for KMS in i915_hwmc.c */
@@ -254,6 +263,8 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		frame.bo = kgem_create_for_name(&sna->kgem, *(uint32_t*)buf);
 		if (frame.bo == NULL)
 			return BadAlloc;
+
+		assert(frame.bo->size >= frame.size);
 	} else {
 		frame.bo = kgem_create_linear(&sna->kgem, frame.size);
 		if (frame.bo == NULL)
@@ -270,16 +281,16 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 					      &clip->extents);
 
 	ret = Success;
-	if (!sna->render.video(sna, video, &frame, clip,
-			       src_w, src_h,
-			       drw_w, drw_h,
-			       pixmap))
+	if (sna->render.video(sna, video, &frame, clip,
+			      src_w, src_h,
+			      drw_w, drw_h,
+			      pixmap))
+		DamageDamageRegion(drawable, clip);
+	else
 		ret = BadAlloc;
 
 	kgem_bo_destroy(&sna->kgem, frame.bo);
 
-	DamageDamageRegion(drawable, clip);
-
 	/* Push the frame to the GPU as soon as possible so
 	 * we can hit the next vsync.
 	 */
commit ce1cae7f4715fc8e14327c5b705d9f2cc45a3741
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Fri Jan 27 00:37:51 2012 +0000

    sna/video: Simplify the gen2/915gm check
    
    And make the later check in put image match.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_video_hwmc.c b/src/sna/sna_video_hwmc.c
index 3da7d3a..1f36096 100644
--- a/src/sna/sna_video_hwmc.c
+++ b/src/sna/sna_video_hwmc.c
@@ -196,10 +196,7 @@ Bool sna_video_xvmc_setup(struct sna *sna,
 	char buf[64];
 
 	/* Needs KMS support. */
-	if (IS_I915G(sna) || IS_I915GM(sna))
-		return FALSE;
-
-	if (IS_GEN2(sna))
+	if (sna->kgem.gen < 31)
 		return FALSE;
 
 	pAdapt = calloc(1, sizeof(XF86MCAdaptorRec));
diff --git a/src/sna/sna_video_textured.c b/src/sna/sna_video_textured.c
index a72d335..fcef820 100644
--- a/src/sna/sna_video_textured.c
+++ b/src/sna/sna_video_textured.c
@@ -245,7 +245,7 @@ sna_video_textured_put_image(ScrnInfoPtr scrn,
 		return Success;
 
 	if (xvmc_passthrough(id)) {
-		if (sna->kgem.gen == 30) {
+		if (sna->kgem.gen < 31) {
 			/* XXX: i915 is not support and needs some
 			 * serious care.  grep for KMS in i915_hwmc.c */
 			return BadAlloc;
commit 541908524f9ee754db3bc45d2e1681d34479c1cc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 23:14:14 2012 +0000

    sna: Remove extraneous clipping from GetImage
    
    The spec says that they must wholly contained with the valid BorderClip
    for a Window or within the Pixmap or else a BadMatch is thrown. Rely on
    this behaviour and not perform the clipping ourselves.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index cad8d66..3c8f2be 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11154,42 +11154,15 @@ sna_get_image(DrawablePtr drawable,
 	      char *dst)
 {
 	RegionRec region;
-	DDXPointRec origin;
 
 	DBG(("%s (%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
 
-	/* XXX should be clipped already according to the spec... */
-	origin.x = region.extents.x1 = x + drawable->x;
-	origin.y = region.extents.y1 = y + drawable->y;
+	region.extents.x1 = x + drawable->x;
+	region.extents.y1 = y + drawable->y;
 	region.extents.x2 = region.extents.x1 + w;
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
 
-	if (drawable->type == DRAWABLE_PIXMAP) {
-		if (region.extents.x1 < drawable->x)
-			region.extents.x1 = drawable->x;
-		if (region.extents.x2 > drawable->x + drawable->width)
-			region.extents.x2 = drawable->x + drawable->width;
-
-		if (region.extents.y1 < drawable->y)
-			region.extents.y1 = drawable->y;
-		if (region.extents.y2 > drawable->y + drawable->height)
-			region.extents.y2 = drawable->y + drawable->height;
-	} else {
-		pixman_box16_t *c = &((WindowPtr)drawable)->borderClip.extents;
-		pixman_box16_t *r = &region.extents;
-
-		if (r->x1 < c->x1)
-			r->x1 = c->x1;
-		if (r->x2 > c->x2)
-			r->x2 = c->x2;
-
-		if (r->y1 < c->y1)
-			r->y1 = c->y1;
-		if (r->y2 > c->y2)
-			r->y2 = c->y2;
-	}
-
 	if (!sna_drawable_move_region_to_cpu(drawable, &region, MOVE_READ))
 		return;
 
@@ -11202,17 +11175,13 @@ sna_get_image(DrawablePtr drawable,
 		DBG(("%s: copy box (%d, %d), (%d, %d), origin (%d, %d)\n",
 		     __FUNCTION__,
 		     region.extents.x1, region.extents.y1,
-		     region.extents.x2, region.extents.y2,
-		     origin.x, origin.y));
+		     region.extents.x2, region.extents.y2));
 		get_drawable_deltas(drawable, pixmap, &dx, &dy);
 		memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel,
 			   pixmap->devKind, PixmapBytePad(w, drawable->depth),
 			   region.extents.x1 + dx,
 			   region.extents.y1 + dy,
-			   region.extents.x1 - origin.x,
-			   region.extents.y1 - origin.y,
-			   region.extents.x2 - region.extents.x1,
-			   region.extents.y2 - region.extents.y1);
+			   0, 0, w, h);
 	} else
 		fbGetImage(drawable, x, y, w, h, format, mask, dst);
 }
commit 7ff40b572ec5cd860d7c7ff23beca0388f37c31c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 22:55:04 2012 +0000

    sna: Avoid fbBlt for the easy GetImage cases
    
    From (i5-2520m):
      60000 trep @   0.6145 msec (  1630.0/sec): GetImage 500x500 square
    To:
      60000 trep @   0.4949 msec (  2020.0/sec): GetImage 500x500 square
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 7fe06de..cad8d66 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11154,11 +11154,13 @@ sna_get_image(DrawablePtr drawable,
 	      char *dst)
 {
 	RegionRec region;
+	DDXPointRec origin;
 
 	DBG(("%s (%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
 
-	region.extents.x1 = x + drawable->x;
-	region.extents.y1 = y + drawable->y;
+	/* XXX should be clipped already according to the spec... */
+	origin.x = region.extents.x1 = x + drawable->x;
+	origin.y = region.extents.y1 = y + drawable->y;
 	region.extents.x2 = region.extents.x1 + w;
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
@@ -11173,14 +11175,46 @@ sna_get_image(DrawablePtr drawable,
 			region.extents.y1 = drawable->y;
 		if (region.extents.y2 > drawable->y + drawable->height)
 			region.extents.y2 = drawable->y + drawable->height;
-	} else
-		RegionIntersect(&region, &region,
-				&((WindowPtr)drawable)->borderClip);
+	} else {
+		pixman_box16_t *c = &((WindowPtr)drawable)->borderClip.extents;
+		pixman_box16_t *r = &region.extents;
+
+		if (r->x1 < c->x1)
+			r->x1 = c->x1;
+		if (r->x2 > c->x2)
+			r->x2 = c->x2;
+
+		if (r->y1 < c->y1)
+			r->y1 = c->y1;
+		if (r->y2 > c->y2)
+			r->y2 = c->y2;
+	}
 
 	if (!sna_drawable_move_region_to_cpu(drawable, &region, MOVE_READ))
 		return;
 
-	fbGetImage(drawable, x, y, w, h, format, mask, dst);
+	if (format == ZPixmap &&
+	    drawable->bitsPerPixel >= 8 &&
+	    PM_IS_SOLID(drawable, mask)) {
+		PixmapPtr pixmap = get_drawable_pixmap(drawable);
+		int16_t dx, dy;
+
+		DBG(("%s: copy box (%d, %d), (%d, %d), origin (%d, %d)\n",
+		     __FUNCTION__,
+		     region.extents.x1, region.extents.y1,
+		     region.extents.x2, region.extents.y2,
+		     origin.x, origin.y));
+		get_drawable_deltas(drawable, pixmap, &dx, &dy);
+		memcpy_blt(pixmap->devPrivate.ptr, dst, drawable->bitsPerPixel,
+			   pixmap->devKind, PixmapBytePad(w, drawable->depth),
+			   region.extents.x1 + dx,
+			   region.extents.y1 + dy,
+			   region.extents.x1 - origin.x,
+			   region.extents.y1 - origin.y,
+			   region.extents.x2 - region.extents.x1,
+			   region.extents.y2 - region.extents.y1);
+	} else
+		fbGetImage(drawable, x, y, w, h, format, mask, dst);
 }
 
 static void
commit adb1320bba15a3a3b4fa8e7d0fd0360fa696721d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 16:05:48 2012 +0000

    sna/gen2+: Include being unattached in the list of source fallbacks
    
    If the source is not attached to a buffer (be it a GPU bo or a CPU bo),
    a temporary upload buffer would be required and so it is not worth
    forcing the target to the destination in that case (should the target
    not be on the GPU already).
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index eb8d4ef..2a97cea 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1514,12 +1514,19 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
 	return (has_alphamap(p) ||
 		is_unhandled_gradient(p) ||
 		!gen2_check_filter(p) ||
-		!gen2_check_repeat(p));
+		!gen2_check_repeat(p) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index af83966..931142d 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2426,9 +2426,19 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
-	return has_alphamap(p) || !gen3_check_xformat(p) || !gen3_check_filter(p) || !gen3_check_repeat(p);
+	return (has_alphamap(p) ||
+		!gen3_check_xformat(p) ||
+		!gen3_check_filter(p) ||
+		!gen3_check_repeat(p) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c798ce5..91d5f49 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2015,9 +2015,20 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
-	return has_alphamap(p) || is_gradient(p) || !gen4_check_filter(p) || !gen4_check_repeat(p) || !gen4_check_format(p->format);
+	return (has_alphamap(p) ||
+		is_gradient(p) ||
+		!gen4_check_filter(p) ||
+		!gen4_check_repeat(p) ||
+		!gen4_check_format(p->format) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 47c4e96..2c6d020 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2049,13 +2049,20 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
 	return (has_alphamap(p) ||
 		is_gradient(p) ||
 		!gen5_check_filter(p) ||
 		!gen5_check_repeat(p) ||
-		!gen5_check_format(p->format));
+		!gen5_check_format(p->format) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index c3bc2e7..d813d95 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2266,9 +2266,20 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
-	return has_alphamap(p) || is_gradient(p) || !gen6_check_filter(p) || !gen6_check_repeat(p) || !gen6_check_format(p->format);
+	return (has_alphamap(p) ||
+		is_gradient(p) ||
+		!gen6_check_filter(p) ||
+		!gen6_check_repeat(p) ||
+		!gen6_check_format(p->format) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index 21d8c99..282b724 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2333,9 +2333,20 @@ has_alphamap(PicturePtr p)
 }
 
 static bool
+need_upload(PicturePtr p)
+{
+	return p->pDrawable && unattached(p->pDrawable);
+}
+
+static bool
 source_fallback(PicturePtr p)
 {
-	return has_alphamap(p) || is_gradient(p) || !gen7_check_filter(p) || !gen7_check_repeat(p) || !gen7_check_format(p->format);
+	return (has_alphamap(p) ||
+		is_gradient(p) ||
+		!gen7_check_filter(p) ||
+		!gen7_check_repeat(p) ||
+		!gen7_check_format(p->format) ||
+		need_upload(p));
 }
 
 static bool
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index c9d2b5f..489f215 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -106,6 +106,17 @@ too_small(DrawablePtr drawable)
 }
 
 static inline Bool
+unattached(DrawablePtr drawable)
+{
+	struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+
+	if (priv == NULL)
+		return true;
+
+	return priv->gpu_bo == NULL && priv->cpu_bo == NULL;
+}
+
+static inline Bool
 picture_is_gpu(PicturePtr picture)
 {
 	if (!picture || !picture->pDrawable)
commit b1f9415bf355e9fbd6fc3ce8fa1af59083ca2943
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 13:59:24 2012 +0000

    sna: Allow gen4+ to use larger GPU bo
    
    As the blitter on gen4+ does not require fence registers, it is not
    restricted to operating on large objects within the mappable aperture.
    As we do not need to operate on such large GPU bo in place, we can relax
    the restriction on the maximum bo size for gen4+ to allocate for use
    with the GPU.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6cd86e6..4df29d2 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -658,15 +658,22 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		 * disable dual-stream mode */
 		kgem->min_alignment = 64;
 
-	kgem->max_gpu_size = kgem->aperture_mappable / 2;
-	if (kgem->max_gpu_size > kgem->aperture_low)
-		kgem->max_gpu_size = kgem->aperture_low;
-	if (kgem->max_gpu_size > MAX_OBJECT_SIZE)
-		kgem->max_gpu_size = MAX_OBJECT_SIZE;
-
 	kgem->max_cpu_size = kgem->aperture_total / 2;
 	if (kgem->max_cpu_size > MAX_OBJECT_SIZE)
 		kgem->max_cpu_size = MAX_OBJECT_SIZE;
+
+	kgem->max_gpu_size = -1;
+	if (gen < 40) {
+		/* If we have to use fences for blitting, we have to make
+		 * sure we can fit them into the aperture.
+		 */
+		kgem->max_gpu_size = kgem->aperture_mappable / 2;
+		if (kgem->max_gpu_size > kgem->aperture_low)
+			kgem->max_gpu_size = kgem->aperture_low;
+	}
+	if (kgem->max_gpu_size > kgem->max_cpu_size)
+		kgem->max_gpu_size = kgem->max_cpu_size;
+
 	DBG(("%s: max object size (tiled=%d, linear=%d)\n",
 	     __FUNCTION__, kgem->max_gpu_size, kgem->max_cpu_size));
 
@@ -2729,6 +2736,8 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 
 	ptr = bo->map;
 	if (ptr == NULL) {
+		assert(bo->size <= kgem->aperture_mappable / 4);
+
 		kgem_trim_vma_cache(kgem, MAP_GTT, bo->bucket);
 
 		ptr = gem_mmap(kgem->fd, bo->handle, bo->size,
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 0cc4fd3..0dc67da 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -363,10 +363,16 @@ static inline bool kgem_bo_is_mappable(struct kgem *kgem,
 	if (bo->domain == DOMAIN_GTT)
 		return true;
 
+	if (IS_GTT_MAP(bo->map))
+		return true;
+
 	if (kgem->gen < 40 && bo->tiling &&
 	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
 		return false;
 
+	if (!bo->presumed_offset)
+		return bo->size <= kgem->aperture_mappable / 4;
+
 	return bo->presumed_offset + bo->size <= kgem->aperture_mappable;
 }
 
commit d35b6955dbb5d652d8685d2c1ea82c5e08de55ea
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 14:34:36 2012 +0000

    sna: Prevent mapping through the GTT for large bo
    
    If the bo is larger than a quarter of the aperture, it is unlikely that
    we will be able to evict enough contiguous space in the GATT to
    accommodate that buffer. So don't attempt to map them and use the
    indirect access instead.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index db4f061..0cc4fd3 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -394,13 +394,19 @@ static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
 	     __FUNCTION__, bo->handle,
 	     bo->domain, bo->presumed_offset, bo->size));
 
+	if (!kgem_bo_is_mappable(kgem, bo))
+		return true;
+
+	if (kgem->wedged)
+		return false;
+
 	if (kgem_bo_is_busy(bo))
 		return true;
 
 	if (bo->presumed_offset == 0)
 		return !list_is_empty(&kgem->requests);
 
-	return !kgem_bo_is_mappable(kgem, bo);
+	return false;
 }
 
 static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b3b968c..7fe06de 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -797,7 +797,7 @@ sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
 			       sna_pixmap_choose_tiling(pixmap),
 			       CREATE_GTT_MAP | CREATE_INACTIVE);
 
-	return priv->gpu_bo != NULL;
+	return priv->gpu_bo && kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo);
 }
 
 bool
@@ -835,7 +835,8 @@ _sna_pixmap_move_to_cpu(PixmapPtr pixmap, unsigned int flags)
 				    priv->gpu_bo->exec == NULL)
 					kgem_retire(&sna->kgem);
 
-				if (kgem_bo_is_busy(priv->gpu_bo)) {
+				if (kgem_bo_map_will_stall(&sna->kgem,
+							   priv->gpu_bo)) {
 					if (priv->pinned)
 						goto skip_inplace_map;
 
@@ -897,7 +898,7 @@ skip_inplace_map:
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_is_busy(priv->gpu_bo) &&
+	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
 	    pixmap_inplace(sna, pixmap, priv) &&
 	    sna_pixmap_move_to_gpu(pixmap, flags)) {
 		assert(flags & MOVE_WRITE);
@@ -1250,7 +1251,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 
 	if (flags & MOVE_INPLACE_HINT &&
 	    priv->stride && priv->gpu_bo &&
-	    !kgem_bo_is_busy(priv->gpu_bo) &&
+	    !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo) &&
 	    region_inplace(sna, pixmap, region, priv) &&
 	    sna_pixmap_move_area_to_gpu(pixmap, &region->extents, flags)) {
 		assert(flags & MOVE_WRITE);
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index a2e7a59..f3ca212 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -122,8 +122,7 @@ void sna_read_boxes(struct sna *sna,
 	 * this path.
 	 */
 
-	if (DEBUG_NO_IO || kgem->wedged ||
-	    !kgem_bo_map_will_stall(kgem, src_bo) ||
+	if (!kgem_bo_map_will_stall(kgem, src_bo) ||
 	    src_bo->tiling == I915_TILING_NONE) {
 fallback:
 		read_boxes_inplace(kgem,
@@ -386,10 +385,7 @@ static bool upload_inplace(struct kgem *kgem,
 			   int n, int bpp)
 {
 	if (DEBUG_NO_IO)
-		return true;
-
-	if (unlikely(kgem->wedged))
-		return true;
+		return kgem_bo_is_mappable(kgem, bo);
 
 	/* If we are writing through the GTT, check first if we might be
 	 * able to almagamate a series of small writes into a single
@@ -993,14 +989,27 @@ struct kgem_bo *sna_replace(struct sna *sna,
 		kgem_bo_write(kgem, bo, src,
 			      (pixmap->drawable.height-1)*stride + pixmap->drawable.width*pixmap->drawable.bitsPerPixel/8);
 	} else {
-		dst = kgem_bo_map(kgem, bo);
-		if (dst) {
-			memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
-				   stride, bo->pitch,
-				   0, 0,
-				   0, 0,
-				   pixmap->drawable.width,
-				   pixmap->drawable.height);
+		if (kgem_bo_is_mappable(kgem, bo)) {
+			dst = kgem_bo_map(kgem, bo);
+			if (dst) {
+				memcpy_blt(src, dst, pixmap->drawable.bitsPerPixel,
+					   stride, bo->pitch,
+					   0, 0,
+					   0, 0,
+					   pixmap->drawable.width,
+					   pixmap->drawable.height);
+			}
+		} else {
+			BoxRec box;
+
+			box.x1 = box.y1 = 0;
+			box.x2 = pixmap->drawable.width;
+			box.y2 = pixmap->drawable.height;
+
+			sna_write_boxes(sna, pixmap,
+					bo, 0, 0,
+					src, stride, 0, 0,
+					&box, 1);
 		}
 	}
 
@@ -1038,15 +1047,29 @@ struct kgem_bo *sna_replace__xor(struct sna *sna,
 		}
 	}
 
-	dst = kgem_bo_map(kgem, bo);
-	if (dst) {
-		memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel,
-			   stride, bo->pitch,
-			   0, 0,
-			   0, 0,
-			   pixmap->drawable.width,
-			   pixmap->drawable.height,
-			   and, or);
+	if (kgem_bo_is_mappable(kgem, bo)) {
+		dst = kgem_bo_map(kgem, bo);
+		if (dst) {
+			memcpy_xor(src, dst, pixmap->drawable.bitsPerPixel,
+				   stride, bo->pitch,
+				   0, 0,
+				   0, 0,
+				   pixmap->drawable.width,
+				   pixmap->drawable.height,
+				   and, or);
+		}
+	} else {
+		BoxRec box;
+
+		box.x1 = box.y1 = 0;
+		box.x2 = pixmap->drawable.width;
+		box.y2 = pixmap->drawable.height;
+
+		sna_write_boxes__xor(sna, pixmap,
+				     bo, 0, 0,
+				     src, stride, 0, 0,
+				     &box, 1,
+				     and, or);
 	}
 
 	return bo;
commit 7c81bcd0c425cc0f7ddf2ad8289bb739c8d44289
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 13:35:15 2012 +0000

    sna: Add FORCE_FALLBACK debugging hook for PutImage
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 759e0fe..b3b968c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2907,6 +2907,9 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
 
 	RegionTranslate(&region, dx, dy);
 
+	if (FORCE_FALLBACK)
+		goto fallback;
+
 	if (wedged(sna))
 		goto fallback;
 
commit 35c0ef586bf508c577642d772f18eae0b64cfd44
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 11:20:03 2012 +0000

    sna/gen3: Use cpu bo if already in use
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 95a79b2..af83966 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2337,32 +2337,42 @@ gen3_composite_set_target(struct sna *sna,
 	op->dst.height = op->dst.pixmap->drawable.height;
 	priv = sna_pixmap(op->dst.pixmap);
 
-	priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
-	if (priv == NULL)
-		return FALSE;
+	op->dst.bo = NULL;
+	priv = sna_pixmap(op->dst.pixmap);
+	if (priv &&
+	    priv->gpu_bo == NULL &&
+	    priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU) {
+		op->dst.bo = priv->cpu_bo;
+		op->damage = &priv->cpu_damage;
+	}
+	if (op->dst.bo == NULL) {
+		priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_READ | MOVE_WRITE);
+		if (priv == NULL)
+			return FALSE;
 
-	/* For single-stream mode there should be no minimum alignment
-	 * required, except that the width must be at least 2 elements.
-	 */
-	if (priv->gpu_bo->pitch < 2*op->dst.pixmap->drawable.bitsPerPixel) {
-		struct kgem_bo *bo;
+		/* For single-stream mode there should be no minimum alignment
+		 * required, except that the width must be at least 2 elements.
+		 */
+		if (priv->gpu_bo->pitch < 2*op->dst.pixmap->drawable.bitsPerPixel) {
+			struct kgem_bo *bo;
 
-		if (priv->pinned)
-			return FALSE;
+			if (priv->pinned)
+				return FALSE;
 
-		bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
-				     op->dst.width, op->dst.height,
-				     2*op->dst.pixmap->drawable.bitsPerPixel,
-				     op->dst.pixmap->drawable.bitsPerPixel);
-		if (bo == NULL)
-			return FALSE;
+			bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
+					     op->dst.width, op->dst.height,
+					     2*op->dst.pixmap->drawable.bitsPerPixel,
+					     op->dst.pixmap->drawable.bitsPerPixel);
+			if (bo == NULL)
+				return FALSE;
 
-		kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
-		priv->gpu_bo = bo;
-	}
+			kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
+			priv->gpu_bo = bo;
+		}
 
-	op->dst.bo = priv->gpu_bo;
-	op->damage = &priv->gpu_damage;
+		op->dst.bo = priv->gpu_bo;
+		op->damage = &priv->gpu_damage;
+	}
 	if (sna_damage_is_all(op->damage, op->dst.width, op->dst.height))
 		op->damage = NULL;
 
@@ -2475,7 +2485,9 @@ gen3_composite_fallback(struct sna *sna,
 
 	if (src_pixmap && !is_solid(src) && !source_fallback(src)) {
 		priv = sna_pixmap(src_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
+		if (priv &&
+		    ((priv->gpu_damage && !priv->cpu_damage) ||
+		     (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
 			DBG(("%s: src is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
@@ -2483,7 +2495,9 @@ gen3_composite_fallback(struct sna *sna,
 	}
 	if (mask_pixmap && !is_solid(mask) && !source_fallback(mask)) {
 		priv = sna_pixmap(mask_pixmap);
-		if (priv && priv->gpu_damage && !priv->cpu_damage) {
+		if (priv &&
+		    ((priv->gpu_damage && !priv->cpu_damage) ||
+		     (priv->cpu_bo && priv->cpu_bo->domain != DOMAIN_CPU))) {
 			DBG(("%s: mask is already on the GPU, try to use GPU\n",
 			     __FUNCTION__));
 			return FALSE;
commit b76a6da3fa0148ef32600dd9505e22b90de037df
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 10:47:01 2012 +0000

    sna: Search the buckets above the desired size in the bo cache
    
    It is preferrable to reuse a slightly larger bo, than it is to create a
    fresh one and map it into the aperture. So search the bucket above us as
    well.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index a2fcefc..6cd86e6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -2189,7 +2189,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	struct kgem_bo *bo, *next;
 	uint32_t pitch, untiled_pitch, tiled_height, size;
 	uint32_t handle;
-	int i;
+	int i, bucket, retry;
 
 	if (tiling < 0)
 		tiling = -tiling, flags |= CREATE_EXACT;
@@ -2208,6 +2208,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				 width, height, bpp, tiling, &pitch);
 	assert(size && size < kgem->max_cpu_size);
 	assert(tiling == I915_TILING_NONE || size < kgem->max_gpu_size);
+	bucket = cache_bucket(size);
 
 	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
@@ -2216,10 +2217,10 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		/* We presume that we will need to upload to this bo,
 		 * and so would prefer to have an active VMA.
 		 */
-		cache = &kgem->vma[for_cpu].inactive[cache_bucket(size)];
+		cache = &kgem->vma[for_cpu].inactive[bucket];
 		do {
 			list_for_each_entry(bo, cache, vma) {
-				assert(bo->bucket == cache_bucket(size));
+				assert(bo->bucket == bucket);
 				assert(bo->refcnt == 0);
 				assert(bo->map);
 				assert(IS_CPU_MAP(bo->map) == for_cpu);
@@ -2263,13 +2264,17 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		goto skip_active_search;
 
 	/* Best active match */
-	cache = active(kgem, size, tiling);
+	retry = NUM_CACHE_BUCKETS - bucket;
+	if (retry > 3)
+		retry = 3;
+search_again:
+	cache = &kgem->active[bucket][tiling];
 	if (tiling) {
 		tiled_height = kgem_aligned_height(kgem, height, tiling);
 		list_for_each_entry(bo, cache, list) {
-			assert(bo->bucket == cache_bucket(size));
 			assert(!bo->purged);
 			assert(bo->refcnt == 0);
+			assert(bo->bucket == bucket);
 			assert(bo->reusable);
 			assert(bo->tiling == tiling);
 
@@ -2280,7 +2285,6 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 				continue;
 			}
 
-
 			if (bo->pitch * tiled_height > bo->size)
 				continue;
 
@@ -2294,7 +2298,7 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		}
 	} else {
 		list_for_each_entry(bo, cache, list) {
-			assert(bo->bucket == cache_bucket(size));
+			assert(bo->bucket == bucket);
 			assert(!bo->purged);
 			assert(bo->refcnt == 0);
 			assert(bo->reusable);
@@ -2314,6 +2318,11 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 		}
 	}
 
+	if (--retry && flags & CREATE_EXACT) {
+		bucket++;
+		goto search_again;
+	}
+
 	if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
 		untiled_pitch = kgem_untiled_pitch(kgem,
 						   width, bpp,
@@ -2356,10 +2365,15 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	}
 
 skip_active_search:
+	bucket = cache_bucket(size);
+	retry = NUM_CACHE_BUCKETS - bucket;
+	if (retry > 3)
+		retry = 3;
+search_inactive:
 	/* Now just look for a close match and prefer any currently active */
-	cache = inactive(kgem, size);
+	cache = &kgem->inactive[bucket];
 	list_for_each_entry_safe(bo, next, cache, list) {
-		assert(bo->bucket == cache_bucket(size));
+		assert(bo->bucket == bucket);
 
 		if (size > bo->size) {
 			DBG(("inactive too small: %d < %d\n",
@@ -2409,10 +2423,15 @@ skip_active_search:
 	if (flags & CREATE_INACTIVE && !list_is_empty(&kgem->requests)) {
 		if (kgem_retire(kgem)) {
 			flags &= ~CREATE_INACTIVE;
-			goto skip_active_search;
+			goto search_inactive;
 		}
 	}
 
+	if (--retry) {
+		bucket++;
+		goto search_inactive;
+	}
+
 	handle = gem_create(kgem->fd, size);
 	if (handle == 0)
 		return NULL;
commit e2b8b1c145932e2254a705905c60f18c200cf2e8
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 12:42:12 2012 +0000

    sna: Apply any previous transformation when downsampling
    
    In order to handle rotations and fractional offsets produced by the act
    of downsampling, we need to compute the full affine transformation and
    apply it to the vertices rather than attempt to fudge it with an integer
    offset.
    
    References: https://bugs.freedesktop.org/show_bug.cgi?id=45086
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6eae248..a2fcefc 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3323,67 +3323,6 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 	return bo;
 }
 
-struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
-						pixman_format_code_t format,
-						const void *data,
-						int x, int y,
-						int width, int height,
-						int stride, int bpp)
-{
-	struct kgem_bo *bo;
-	pixman_image_t *src_image, *dst_image;
-	pixman_transform_t t;
-	int w, h;
-	void *dst;
-
-	DBG(("%s : (%d, %d), (%d, %d), stride=%d, bpp=%d\n",
-	     __FUNCTION__, x, y, width, height, stride, bpp));
-
-	w = (width + 1) / 2;
-	h = (height + 1) / 2;
-
-	bo = kgem_create_buffer_2d(kgem, w, h, bpp,
-				   KGEM_BUFFER_WRITE_INPLACE,
-				   &dst);
-	if (bo == NULL)
-		return NULL;
-
-	dst_image = pixman_image_create_bits(format, w, h, dst, bo->pitch);
-	if (dst_image == NULL)
-		goto cleanup_bo;
-
-	src_image = pixman_image_create_bits(format, width, height,
-					     (uint32_t*)data, stride);
-	if (src_image == NULL)
-		goto cleanup_dst;
-
-	memset(&t, 0, sizeof(t));
-	t.matrix[0][0] = 2 << 16;
-	t.matrix[1][1] = 2 << 16;
-	t.matrix[2][2] = 1 << 16;
-	pixman_image_set_transform(src_image, &t);
-	pixman_image_set_filter(src_image, PIXMAN_FILTER_BILINEAR, NULL, 0);
-	pixman_image_set_repeat(src_image, PIXMAN_REPEAT_PAD);
-
-	pixman_image_composite(PIXMAN_OP_SRC,
-			       src_image, NULL, dst_image,
-			       x, y,
-			       0, 0,
-			       0, 0,
-			       w, h);
-
-	pixman_image_unref(src_image);
-	pixman_image_unref(dst_image);
-
-	return bo;
-
-cleanup_dst:
-	pixman_image_unref(dst_image);
-cleanup_bo:
-	kgem_bo_destroy(kgem, bo);
-	return NULL;
-}
-
 void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 {
 	struct kgem_partial_bo *bo;
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index fd3aa9d..db4f061 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -191,12 +191,6 @@ struct kgem_bo *kgem_upload_source_image(struct kgem *kgem,
 					 const void *data,
 					 BoxPtr box,
 					 int stride, int bpp);
-struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
-						pixman_format_code_t format,
-						const void *data,
-						int x, int y,
-						int width, int height,
-						int stride, int bpp);
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index 28b93a2..9d7857c 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -615,15 +615,17 @@ static int sna_render_picture_downsample(struct sna *sna,
 					 int16_t w, int16_t h,
 					 int16_t dst_x, int16_t dst_y)
 {
-	struct kgem_bo *bo = NULL;
 	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
-	int16_t ox, oy, ow, oh;
-	BoxRec box;
-
-	assert(w && h);
-
-	DBG(("%s (%d, %d)x(%d, %d) [dst=(%d, %d)]\n",
-	     __FUNCTION__, x, y, w, h, dst_x, dst_y));
+	ScreenPtr screen = pixmap->drawable.pScreen;
+	PicturePtr tmp_src, tmp_dst;
+	PictFormatPtr format;
+	struct sna_pixmap *priv;
+	pixman_transform_t t;
+	PixmapPtr tmp;
+	int width, height;
+	int sx, sy, ox, oy, ow, oh;
+	int error, ret = 0;
+	BoxRec box, b;
 
 	ow = w;
 	oh = h;
@@ -645,12 +647,6 @@ static int sna_render_picture_downsample(struct sna *sna,
 		oy = v.vector[1] / v.vector[2];
 	}
 
-	/* Align the origin to an even pixel so that the sampling of
-	 * partial images is stable.
-	 */
-	box.x1 &= ~1;
-	box.y1 &= ~1;
-
 	if (channel->repeat == RepeatNone || channel->repeat == RepeatPad) {
 		if (box.x1 < 0)
 			box.x1 = 0;
@@ -684,184 +680,124 @@ static int sna_render_picture_downsample(struct sna *sna,
 
 	w = box.x2 - box.x1;
 	h = box.y2 - box.y1;
-	DBG(("%s: sample area (%d, %d), (%d, %d): %dx%d\n",
-	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2, w, h));
-	assert(w && h);
-	if (w > 2*sna->render.max_3d_size || h > 2*sna->render.max_3d_size) {
-		DBG(("%s: sample size too large for pixman downscaling\n",
-		     __FUNCTION__));
-		goto fixup;
-	}
 
-	if (texture_is_cpu(pixmap, &box) && !move_to_gpu(pixmap, &box)) {
-		DBG(("%s: uploading partial texture\n", __FUNCTION__));
-		bo = kgem_upload_source_image_halved(&sna->kgem,
-						     picture->format,
-						     pixmap->devPrivate.ptr,
-						     box.x1, box.y1, w, h,
-						     pixmap->devKind,
-						     pixmap->drawable.bitsPerPixel);
-		if (!bo) {
-			DBG(("%s: failed to upload source image, using clear\n",
-			     __FUNCTION__));
-			return 0;
-		}
-	} else {
-		ScreenPtr screen = pixmap->drawable.pScreen;
-		PicturePtr tmp_src, tmp_dst;
-		PictFormatPtr format;
-		struct sna_pixmap *priv;
-		pixman_transform_t t;
-		PixmapPtr tmp;
-		int error, i, j, ww, hh, ni, nj;
-
-		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ))
-			goto fixup;
-
-		tmp = screen->CreatePixmap(screen,
-					   (w+1)/2, (h+1)/2,
-					   pixmap->drawable.depth,
-					   SNA_CREATE_SCRATCH);
-		if (!tmp)
-			goto fixup;
-
-		priv = sna_pixmap(tmp);
-		if (!priv) {
-			screen->DestroyPixmap(tmp);
-			goto fixup;
-		}
+	DBG(("%s: sample (%d, %d), (%d, %d)\n",
+	     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 
-		format = PictureMatchFormat(screen,
-					    pixmap->drawable.depth,
-					    picture->format);
+	sx = (w + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
+	sy = (h + sna->render.max_3d_size - 1) / sna->render.max_3d_size;
 
-		tmp_dst = CreatePicture(0, &tmp->drawable, format, 0, NULL,
-					serverClient, &error);
-		if (!tmp_dst) {
-			screen->DestroyPixmap(tmp);
-			goto fixup;
-		}
+	DBG(("%s: scaling (%d, %d) down by %dx%d\n",
+	     __FUNCTION__, w, h, sx, sy));
 
-		tmp_src = CreatePicture(0, &pixmap->drawable, format, 0, NULL,
-					serverClient, &error);
-		if (!tmp_src) {
-			FreePicture(tmp_dst, 0);
-			screen->DestroyPixmap(tmp);
-			goto fixup;
-		}
+	width  = w / sx;
+	height = h / sy;
 
-		tmp_src->repeat = true;
-		tmp_src->repeatType = RepeatPad;
-		tmp_src->filter = PictFilterBilinear;
-		memset(&t, 0, sizeof(t));
-		t.matrix[0][0] = 2 << 16;
-		t.matrix[1][1] = 2 << 16;
-		t.matrix[2][2] = 1 << 16;
-		tmp_src->transform = &t;
-
-		ValidatePicture(tmp_dst);
-		ValidatePicture(tmp_src);
-
-		if (w > sna->render.max_3d_size) {
-			ww = (w+3)/4;
-			nj = 2;
-		} else {
-			ww = (w+1)/2;
-			nj = 1;
-		}
+	DBG(("%s: creating temporary GPU bo %dx%d\n",
+	     __FUNCTION__, width, height));
 
-		if (h > sna->render.max_3d_size) {
-			hh = (h+3)/4;
-			ni = 2;
-		} else {
-			hh = (h+1)/2;
-			ni = 1;
-		}
+	tmp = screen->CreatePixmap(screen,
+				   width, height,
+				   pixmap->drawable.depth,
+				   SNA_CREATE_SCRATCH);
+	if (!tmp)
+		return 0;
 
-		DBG(("%s %d:%d downsampling using %dx%d GPU tiles\n",
-		     __FUNCTION__, nj, ni, ww, hh));
-
-		for (i = 0; i < ni; i++) {
-			BoxRec b;
-
-			b.y1 = hh*i;
-			if (i == ni - 1)
-				b.y2 = (h+1)/2;
-			else
-				b.y2 = b.y1 + hh;
-
-			for (j = 0; j < nj; j++) {
-				struct sna_composite_op op;
-
-				b.x1 = ww*j;
-				if (j == nj - 1)
-					b.x2 = (w+1)/2;
-				else
-					b.x2 = b.x1 + ww;
-
-				DBG(("%s: tile %d:%d, box=(%d,%d), (%d, %d)\n",
-				     __FUNCTION__, i, j, b.x1, b.y1, b.x2, b.y2));
-
-				memset(&op, 0, sizeof(op));
-				if (!sna->render.composite(sna,
-							   PictOpSrc,
-							   tmp_src, NULL, tmp_dst,
-							   box.x1/2 + b.x1, box.y1/2 + b.y1,
-							   0, 0,
-							   b.x1, b.y1,
-							   b.x2 - b.x1, b.y2 - b.y1,
-							   &op)) {
-					tmp_src->transform = NULL;
-					FreePicture(tmp_src, 0);
-					FreePicture(tmp_dst, 0);
-					screen->DestroyPixmap(tmp);
-					goto fixup;
-				}
-
-				op.boxes(sna, &op, &b, 1);
-				op.done(sna, &op);
-			}
+	priv = sna_pixmap(tmp);
+	if (!priv)
+		goto cleanup_tmp;
+
+	format = PictureMatchFormat(screen,
+				    pixmap->drawable.depth,
+				    picture->format);
+
+	tmp_dst = CreatePicture(0, &tmp->drawable, format, 0, NULL,
+				serverClient, &error);
+	if (!tmp_dst)
+		goto cleanup_tmp;
+
+	tmp_src = CreatePicture(0, &pixmap->drawable, format, 0, NULL,
+				serverClient, &error);
+	if (!tmp_src)
+		goto cleanup_dst;
+
+	tmp_src->repeat = 1;
+	tmp_src->repeatType = RepeatPad;
+	/* Prefer to use nearest as it helps reduce artefacts from
+	 * interpolating and filtering twice.
+	 */
+	tmp_src->filter = PictFilterNearest;
+	memset(&t, 0, sizeof(t));
+	t.matrix[0][0] = (w << 16) / width;
+	t.matrix[0][2] = box.x1 << 16;
+	t.matrix[1][1] = (h << 16) / height;
+	t.matrix[1][2] = box.y1 << 16;
+	t.matrix[2][2] = 1 << 16;
+	tmp_src->transform = &t;
+
+	ValidatePicture(tmp_dst);
+	ValidatePicture(tmp_src);
+
+	w = sna->render.max_3d_size / sx - 2 * sx;
+	h = sna->render.max_3d_size / sy - 2 * sy;
+	DBG(("%s %d:%d downsampling using %dx%d GPU tiles\n",
+	     __FUNCTION__, (width + w-1)/w, (height + h-1)/h, w, h));
+
+	for (b.y1 = 0; b.y1 < height; b.y1 = b.y2) {
+		b.y2 = b.y1 + h;
+		if (b.y2 > height)
+			b.y2 = height;
+
+		for (b.x1 = 0; b.x1 < width; b.x1 = b.x2) {
+			struct sna_composite_op op;
+
+			b.x2 = b.x1 + w;
+			if (b.x2 > width)
+				b.x2 = width;
+
+			DBG(("%s: tile (%d, %d), (%d, %d)\n",
+			     __FUNCTION__, b.x1, b.y1, b.x2, b.y2));
+
+			memset(&op, 0, sizeof(op));
+			if (!sna->render.composite(sna,
+						   PictOpSrc,
+						   tmp_src, NULL, tmp_dst,
+						   b.x1, b.y1,
+						   0, 0,
+						   b.x1, b.y1,
+						   b.x2 - b.x1, b.y2 - b.y1,
+						   &op))
+				goto cleanup_src;
+
+			op.boxes(sna, &op, &b, 1);
+			op.done(sna, &op);
 		}
-
-		bo = kgem_bo_reference(priv->gpu_bo);
-
-		tmp_src->transform = NULL;
-		FreePicture(tmp_src, 0);
-		FreePicture(tmp_dst, 0);
-		screen->DestroyPixmap(tmp);
 	}
 
-	if (ox == x && oy == y) {
-		x = y = 0;
-	} else if (channel->transform) {
-		pixman_vector_t v;
-		pixman_transform_t m;
-
-		v.vector[0] = (ox - box.x1) << 16;
-		v.vector[1] = (oy - box.y1) << 16;
-		v.vector[2] = 1 << 16;
-		pixman_transform_invert(&m, channel->transform);
-		pixman_transform_point(&m, &v);
-		x = v.vector[0] / v.vector[2];
-		y = v.vector[1] / v.vector[2];
-	} else {
-		x = ox - box.x1;
-		y = oy - box.y1;
-	}
+	pixman_transform_invert(&channel->embedded_transform, &t);
+	if (channel->transform)
+		pixman_transform_multiply(&channel->embedded_transform,
+					  &channel->embedded_transform,
+					  channel->transform);
+	channel->transform = &channel->embedded_transform;
 
 	channel->offset[0] = x - dst_x;
 	channel->offset[1] = y - dst_y;
-	channel->scale[0] = 1.f/w;
-	channel->scale[1] = 1.f/h;
-	channel->width  = (w + 1) / 2;
-	channel->height = (h + 1) / 2;
-	channel->bo = bo;
-	return 1;
-
-fixup:
-	return sna_render_picture_fixup(sna, picture, channel,
-					x, y, w, h,
-					dst_x, dst_y);
+	channel->scale[0] = 1.f/width;
+	channel->scale[1] = 1.f/height;
+	channel->width  = width;
+	channel->height = height;
+	channel->bo = kgem_bo_reference(priv->gpu_bo);
+
+	ret = 1;
+cleanup_src:
+	tmp_src->transform = NULL;
+	FreePicture(tmp_src, 0);
+cleanup_dst:
+	FreePicture(tmp_dst, 0);
+cleanup_tmp:
+	screen->DestroyPixmap(tmp);
+	return ret;
 }
 
 int
@@ -965,7 +901,10 @@ sna_render_picture_extract(struct sna *sna,
 	}
 
 	src_bo = use_cpu_bo(sna, pixmap, &box);
-	if (src_bo == NULL) {
+	if (src_bo) {
+		if (!sna_pixmap_move_to_cpu(pixmap, MOVE_READ))
+			return 0;
+	} else {
 		if (texture_is_cpu(pixmap, &box) &&
 		    !move_to_gpu(pixmap, &box)) {
 			bo = kgem_upload_source_image(&sna->kgem,
@@ -981,7 +920,6 @@ sna_render_picture_extract(struct sna *sna,
 				src_bo = priv->gpu_bo;
 		}
 	}
-
 	if (src_bo) {
 		bo = kgem_create_2d(&sna->kgem, w, h,
 				    pixmap->drawable.bitsPerPixel,
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index 4346196..c4711f4 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -57,6 +57,8 @@ struct sna_composite_op {
 		int16_t offset[2];
 		float scale[2];
 
+		pixman_transform_t embedded_transform;
+
 		union {
 			struct {
 				uint32_t pixel;
commit 352828ee59164a9e81093d88dfdd45bc21f0c739
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Thu Jan 26 12:41:34 2012 +0000

    sna: Tweak aperture thresholds for batch flushing
    
    In order to more easily accommodate operations on large source CPU bo.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1bcda22..6eae248 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -634,7 +634,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 
 	kgem->aperture_total = aperture.aper_size;
 	kgem->aperture_high = aperture.aper_size * 3/4;
-	kgem->aperture_low = aperture.aper_size * 1/4;
+	kgem->aperture_low = aperture.aper_size * 1/3;
 	DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
 	     kgem->aperture_low, kgem->aperture_low / (1024*1024),
 	     kgem->aperture_high, kgem->aperture_high / (1024*1024)));
@@ -2484,9 +2484,6 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	int num_exec = 0;
 	int size = 0;
 
-	if (kgem->aperture > kgem->aperture_low)
-		return false;
-
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		if (bo->exec)
@@ -2497,6 +2494,12 @@ bool kgem_check_bo(struct kgem *kgem, ...)
 	}
 	va_end(ap);
 
+	if (!size)
+		return true;
+
+	if (kgem->aperture > kgem->aperture_low)
+		return false;
+
 	if (size + kgem->aperture > kgem->aperture_high)
 		return false;
 
@@ -2515,9 +2518,6 @@ bool kgem_check_bo_fenced(struct kgem *kgem, ...)
 	int size = 0;
 	int fenced_size = 0;
 
-	if (unlikely (kgem->aperture > kgem->aperture_low))
-		return false;
-
 	va_start(ap, kgem);
 	while ((bo = va_arg(ap, struct kgem_bo *))) {
 		if (bo->exec) {
@@ -2544,13 +2544,19 @@ bool kgem_check_bo_fenced(struct kgem *kgem, ...)
 	if (fenced_size + kgem->aperture_fenced > kgem->aperture_mappable)
 		return false;
 
-	if (size + kgem->aperture > kgem->aperture_high)
+	if (kgem->nfence + num_fence > kgem->fence_max)
 		return false;
 
-	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
+	if (!size)
+		return true;
+
+	if (kgem->aperture > kgem->aperture_low)
 		return false;
 
-	if (kgem->nfence + num_fence >= kgem->fence_max)
+	if (size + kgem->aperture > kgem->aperture_high)
+		return false;
+
+	if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem))
 		return false;
 
 	return true;
commit cff6a1a2e4648eb211a1789ae9f711e2f16e9d4d
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 23:21:36 2012 +0000

    sna: Use the cpu bo where possible as the source for texture extraction
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index d1cb60b..28b93a2 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -295,7 +295,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 	if (DBG_NO_CPU_BO)
 		return NULL;
 
-	priv = sna_pixmap_attach(pixmap);
+	priv = sna_pixmap(pixmap);
 	if (priv == NULL || priv->cpu_bo == NULL) {
 		DBG(("%s: no cpu bo\n", __FUNCTION__));
 		return NULL;
@@ -332,7 +332,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 		int w = box->x2 - box->x1;
 		int h = box->y2 - box->y1;
 
-		if (pixmap->usage_hint)
+		if (!priv->gpu)
 			goto done;
 
 		if (priv->source_count*w*h >= pixmap->drawable.width * pixmap->drawable.height &&
@@ -349,7 +349,7 @@ use_cpu_bo(struct sna *sna, PixmapPtr pixmap, const BoxRec *box)
 done:
 	DBG(("%s for box=(%d, %d), (%d, %d)\n",
 	     __FUNCTION__, box->x1, box->y1, box->x2, box->y2));
-	return kgem_bo_reference(priv->cpu_bo);
+	return priv->cpu_bo;
 }
 
 static Bool
@@ -583,23 +583,25 @@ sna_render_pixmap_bo(struct sna *sna,
 	     pixmap->drawable.width, pixmap->drawable.height));
 
 	bo = use_cpu_bo(sna, pixmap, &box);
-	if (bo == NULL &&
-	    texture_is_cpu(pixmap, &box) &&
-	    !move_to_gpu(pixmap, &box)) {
-		DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
-		     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
-		bo = upload(sna, channel, pixmap, &box);
-	}
-
-	if (bo == NULL) {
-		priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
-		if (priv) {
-			bo = kgem_bo_reference(priv->gpu_bo);
-		} else {
-			DBG(("%s: failed to upload pixmap to gpu, uploading CPU box (%d, %d), (%d, %d) instead\n",
+	if (bo) {
+		bo = kgem_bo_reference(bo);
+	} else {
+		if (texture_is_cpu(pixmap, &box) && !move_to_gpu(pixmap, &box)) {
+			DBG(("%s: uploading CPU box (%d, %d), (%d, %d)\n",
 			     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
 			bo = upload(sna, channel, pixmap, &box);
 		}
+
+		if (bo == NULL) {
+			priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
+			if (priv) {
+				bo = kgem_bo_reference(priv->gpu_bo);
+			} else {
+				DBG(("%s: failed to upload pixmap to gpu, uploading CPU box (%d, %d), (%d, %d) instead\n",
+				     __FUNCTION__, box.x1, box.y1, box.x2, box.y2));
+				bo = upload(sna, channel, pixmap, &box);
+			}
+		}
 	}
 
 	channel->bo = bo;
@@ -870,7 +872,7 @@ sna_render_picture_extract(struct sna *sna,
 			   int16_t w, int16_t h,
 			   int16_t dst_x, int16_t dst_y)
 {
-	struct kgem_bo *bo = NULL;
+	struct kgem_bo *bo = NULL, *src_bo;
 	PixmapPtr pixmap = get_drawable_pixmap(picture->pDrawable);
 	int16_t ox, oy, ow, oh;
 	BoxRec box;
@@ -962,49 +964,48 @@ sna_render_picture_extract(struct sna *sna,
 						     dst_x, dst_y);
 	}
 
-	if (texture_is_cpu(pixmap, &box) && !move_to_gpu(pixmap, &box)) {
-		bo = kgem_upload_source_image(&sna->kgem,
-					      pixmap->devPrivate.ptr,
-					      &box,
-					      pixmap->devKind,
-					      pixmap->drawable.bitsPerPixel);
-		if (bo == NULL) {
-			DBG(("%s: failed to upload source image, using clear\n",
-			     __FUNCTION__));
-			return 0;
-		}
-	} else {
-		if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ)) {
-			DBG(("%s: falback -- pixmap is not on the GPU\n",
-			     __FUNCTION__));
-			return sna_render_picture_fixup(sna, picture, channel,
-							x, y, ow, oh, dst_x, dst_y);
+	src_bo = use_cpu_bo(sna, pixmap, &box);
+	if (src_bo == NULL) {
+		if (texture_is_cpu(pixmap, &box) &&
+		    !move_to_gpu(pixmap, &box)) {
+			bo = kgem_upload_source_image(&sna->kgem,
+						      pixmap->devPrivate.ptr,
+						      &box,
+						      pixmap->devKind,
+						      pixmap->drawable.bitsPerPixel);
+		} else {
+			struct sna_pixmap *priv;
+
+			priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ);
+			if (priv)
+				src_bo = priv->gpu_bo;
 		}
+	}
 
+	if (src_bo) {
 		bo = kgem_create_2d(&sna->kgem, w, h,
 				    pixmap->drawable.bitsPerPixel,
 				    kgem_choose_tiling(&sna->kgem,
 						       I915_TILING_X, w, h,
 						       pixmap->drawable.bitsPerPixel),
 				    0);
-		if (!bo) {
-			DBG(("%s: failed to create bo, using clear\n",
-			     __FUNCTION__));
-			return 0;
-		}
-
-		if (!sna_blt_copy_boxes(sna, GXcopy,
-					sna_pixmap_get_bo(pixmap), 0, 0,
+		if (bo && !sna_blt_copy_boxes(sna, GXcopy,
+					src_bo, 0, 0,
 					bo, -box.x1, -box.y1,
 					pixmap->drawable.bitsPerPixel,
 					&box, 1)) {
-			DBG(("%s: fallback -- unable to copy boxes\n",
-			     __FUNCTION__));
-			return sna_render_picture_fixup(sna, picture, channel,
-							x, y, ow, oh, dst_x, dst_y);
+			kgem_bo_destroy(&sna->kgem, bo);
+			bo = NULL;
 		}
 	}
 
+	if (bo == NULL) {
+		DBG(("%s: falback -- pixmap is not on the GPU\n",
+		     __FUNCTION__));
+		return sna_render_picture_fixup(sna, picture, channel,
+						x, y, ow, oh, dst_x, dst_y);
+	}
+
 	if (ox == x && oy == y) {
 		x = y = 0;
 	} else if (channel->transform) {
commit e583af9cca4ad2e5643317447c6b065d3ee7d11e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 23:04:50 2012 +0000

    sna: Experiment with creating large objects as CPU bo
    
    Even on non-LLC systems if we can prevent the migration of such
    objects, we can still benefit immensely from being able to map them into
    the GTT as required.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 0955a5d..1bcda22 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -632,6 +632,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 	aperture.aper_size = 64*1024*1024;
 	(void)drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
 
+	kgem->aperture_total = aperture.aper_size;
 	kgem->aperture_high = aperture.aper_size * 3/4;
 	kgem->aperture_low = aperture.aper_size * 1/4;
 	DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__,
@@ -657,12 +658,17 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, int gen)
 		 * disable dual-stream mode */
 		kgem->min_alignment = 64;
 
-	kgem->max_object_size = kgem->aperture_mappable / 2;
-	if (kgem->max_object_size > kgem->aperture_low)
-		kgem->max_object_size = kgem->aperture_low;
-	if (kgem->max_object_size > MAX_OBJECT_SIZE)
-		kgem->max_object_size = MAX_OBJECT_SIZE;
-	DBG(("%s: max object size %d\n", __FUNCTION__, kgem->max_object_size));
+	kgem->max_gpu_size = kgem->aperture_mappable / 2;
+	if (kgem->max_gpu_size > kgem->aperture_low)
+		kgem->max_gpu_size = kgem->aperture_low;
+	if (kgem->max_gpu_size > MAX_OBJECT_SIZE)
+		kgem->max_gpu_size = MAX_OBJECT_SIZE;
+
+	kgem->max_cpu_size = kgem->aperture_total / 2;
+	if (kgem->max_cpu_size > MAX_OBJECT_SIZE)
+		kgem->max_cpu_size = MAX_OBJECT_SIZE;
+	DBG(("%s: max object size (tiled=%d, linear=%d)\n",
+	     __FUNCTION__, kgem->max_gpu_size, kgem->max_cpu_size));
 
 	kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2;
 	if ((int)kgem->fence_max < 0)
@@ -979,6 +985,9 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 		goto destroy;
 	}
 
+	if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU)
+		kgem_bo_release_map(kgem, bo);
+
 	assert(list_is_empty(&bo->vma));
 	assert(list_is_empty(&bo->list));
 	assert(bo->vmap == false && bo->sync == false);
@@ -1010,6 +1019,10 @@ static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
 	if (!IS_CPU_MAP(bo->map)) {
 		if (!kgem_bo_set_purgeable(kgem, bo))
 			goto destroy;
+
+		if (!kgem->has_llc && bo->domain == DOMAIN_CPU)
+			goto destroy;
+
 		DBG(("%s: handle=%d, purged\n",
 		     __FUNCTION__, bo->handle));
 	}
@@ -1121,8 +1134,11 @@ bool kgem_retire(struct kgem *kgem)
 		if (kgem_bo_set_purgeable(kgem, rq->bo)) {
 			kgem_bo_move_to_inactive(kgem, rq->bo);
 			retired = true;
-		} else
+		} else {
+			DBG(("%s: closing %d\n",
+			     __FUNCTION__, rq->bo->handle));
 			kgem_bo_free(kgem, rq->bo);
+		}
 
 		_list_del(&rq->list);
 		free(rq);
@@ -1679,9 +1695,13 @@ void kgem_purge_cache(struct kgem *kgem)
 	int i;
 
 	for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) {
-		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list)
-			if (!kgem_bo_is_retained(kgem, bo))
+		list_for_each_entry_safe(bo, next, &kgem->inactive[i], list) {
+			if (!kgem_bo_is_retained(kgem, bo)) {
+				DBG(("%s: purging %d\n",
+				     __FUNCTION__, bo->handle));
 				kgem_bo_free(kgem, bo);
+			}
+		}
 	}
 
 	kgem->need_purge = false;
@@ -1748,6 +1768,8 @@ bool kgem_expire_cache(struct kgem *kgem)
 				count++;
 				size += bo->size;
 				kgem_bo_free(kgem, bo);
+				DBG(("%s: expiring %d\n",
+				     __FUNCTION__, bo->handle));
 			}
 		}
 		if (!list_is_empty(&preserve)) {
@@ -2033,7 +2055,7 @@ int kgem_choose_tiling(struct kgem *kgem, int tiling, int width, int height, int
 	if (tiling &&
 	    kgem_surface_size(kgem, false, false,
 			      width, height, bpp, tiling,
-			      &pitch) > kgem->max_object_size) {
+			      &pitch) > kgem->max_gpu_size) {
 		DBG(("%s: too large (%dx%d) to be fenced, discarding tiling\n",
 		     __FUNCTION__, width, height));
 		tiling = I915_TILING_NONE;
@@ -2096,43 +2118,46 @@ done:
 	return tiling;
 }
 
-static bool _kgem_can_create_2d(struct kgem *kgem,
-				int width, int height, int bpp, int tiling)
+bool kgem_can_create_cpu(struct kgem *kgem,
+			 int width, int height, int depth)
 {
 	uint32_t pitch, size;
 
-	if (bpp < 8)
+	if (depth < 8 || kgem->wedged)
 		return false;
 
-	if (tiling >= 0 && kgem->wedged)
-		return false;
+	size = kgem_surface_size(kgem, false, false,
+				 width, height, BitsPerPixel(depth),
+				 I915_TILING_NONE, &pitch);
+	return size > 0 && size < kgem->max_cpu_size;
+}
 
-	if (tiling < 0)
-		tiling = -tiling;
+static bool _kgem_can_create_gpu(struct kgem *kgem,
+				 int width, int height, int bpp)
+{
+	uint32_t pitch, size;
+
+	if (bpp < 8 || kgem->wedged)
+		return false;
 
 	size = kgem_surface_size(kgem, false, false,
-				 width, height, bpp, tiling, &pitch);
-	if (size == 0 || size >= kgem->max_object_size)
-		size = kgem_surface_size(kgem, false, false,
-					 width, height, bpp,
-					 I915_TILING_NONE, &pitch);
-	return size > 0 && size < kgem->max_object_size;
+				 width, height, bpp, I915_TILING_NONE,
+				 &pitch);
+	return size > 0 && size < kgem->max_gpu_size;
 }
 
 #if DEBUG_KGEM
-bool kgem_can_create_2d(struct kgem *kgem,
-			int width, int height, int bpp, int tiling)
+bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
 {
-	bool ret = _kgem_can_create_2d(kgem, width, height, bpp, tiling);
-	DBG(("%s(%dx%d, bpp=%d, tiling=%d) = %d\n", __FUNCTION__,
-	     width, height, bpp, tiling, ret));
+	bool ret = _kgem_can_create_gpu(kgem, width, height, bpp);
+	DBG(("%s(%dx%d, bpp=%d) = %d\n", __FUNCTION__,
+	     width, height, bpp, ret));
 	return ret;
 }
 #else
-bool kgem_can_create_2d(struct kgem *kgem,
-			int width, int height, int bpp, int tiling)
+bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp)
 {
-	return _kgem_can_create_2d(kgem, width, height, bpp, tiling);
+	return _kgem_can_create_gpu(kgem, width, height, bpp);
 }
 #endif
 
@@ -2177,12 +2202,12 @@ struct kgem_bo *kgem_create_2d(struct kgem *kgem,
 	     !!(flags & CREATE_GTT_MAP),
 	     !!(flags & CREATE_SCANOUT)));
 
-	assert(_kgem_can_create_2d(kgem, width, height, bpp, flags & CREATE_EXACT ? -tiling : tiling));
 	size = kgem_surface_size(kgem,
 				 kgem->has_relaxed_fencing,
 				 flags & CREATE_SCANOUT,
 				 width, height, bpp, tiling, &pitch);
-	assert(size && size <= kgem->max_object_size);
+	assert(size && size < kgem->max_cpu_size);
+	assert(tiling == I915_TILING_NONE || size < kgem->max_gpu_size);
 
 	if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
 		int for_cpu = !!(flags & CREATE_CPU_MAP);
@@ -2342,6 +2367,9 @@ skip_active_search:
 			continue;
 		}
 
+		if ((flags & CREATE_CPU_MAP) == 0 && IS_CPU_MAP(bo->map))
+			continue;
+
 		if (bo->tiling != tiling ||
 		    (tiling != I915_TILING_NONE && bo->pitch != pitch)) {
 			if (tiling != gem_set_tiling(kgem->fd,
@@ -2643,8 +2671,11 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 		list_del(&bo->vma);
 		kgem->vma[type].count--;
 
-		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo))
+		if (!bo->purged && !kgem_bo_set_purgeable(kgem, bo)) {
+			DBG(("%s: freeing unpurgeable old mapping\n",
+			     __FUNCTION__));
 			kgem_bo_free(kgem, bo);
+		}
 	}
 }
 
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 652c2d7..fd3aa9d 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -151,10 +151,10 @@ struct kgem {
 
 	uint16_t fence_max;
 	uint16_t half_cpu_cache_pages;
-	uint32_t aperture_high, aperture_low, aperture;
-	uint32_t aperture_fenced, aperture_mappable;
+	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
+	uint32_t aperture, aperture_fenced;
 	uint32_t min_alignment;
-	uint32_t max_object_size;
+	uint32_t max_gpu_size, max_cpu_size;
 	uint32_t partial_buffer_size;
 
 	void (*context_switch)(struct kgem *kgem, int new_mode);
@@ -200,8 +200,8 @@ struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
 
 int kgem_choose_tiling(struct kgem *kgem,
 		       int tiling, int width, int height, int bpp);
-bool kgem_can_create_2d(struct kgem *kgem,
-			int width, int height, int bpp, int tiling);
+bool kgem_can_create_gpu(struct kgem *kgem, int width, int height, int bpp);
+bool kgem_can_create_cpu(struct kgem *kgem, int width, int height, int depth);
 
 struct kgem_bo *
 kgem_replace_bo(struct kgem *kgem,
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b28134c..759e0fe 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -245,7 +245,7 @@ sna_pixmap_alloc_cpu(struct sna *sna,
 
 	assert(priv->stride);
 
-	if (sna->kgem.has_cpu_bo) {
+	if (sna->kgem.has_cpu_bo || !priv->gpu) {
 		DBG(("%s: allocating CPU buffer (%dx%d)\n", __FUNCTION__,
 		     pixmap->drawable.width, pixmap->drawable.height));
 
@@ -515,11 +515,10 @@ struct sna_pixmap *_sna_pixmap_attach(PixmapPtr pixmap)
 		break;
 
 	default:
-		if (!kgem_can_create_2d(&sna->kgem,
-					pixmap->drawable.width,
-					pixmap->drawable.height,
-					pixmap->drawable.bitsPerPixel,
-					I915_TILING_NONE))
+		if (!kgem_can_create_gpu(&sna->kgem,
+					 pixmap->drawable.width,
+					 pixmap->drawable.height,
+					 pixmap->drawable.bitsPerPixel))
 			return NULL;
 		break;
 	}
@@ -586,6 +585,11 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 		return create_pixmap(sna, screen, width, height, depth,
 				     CREATE_PIXMAP_USAGE_SCRATCH);
 
+	bpp = BitsPerPixel(depth);
+	if (!kgem_can_create_gpu(&sna->kgem, width, height, bpp))
+		return create_pixmap(sna, screen, width, height, depth,
+				     CREATE_PIXMAP_USAGE_SCRATCH);
+
 	if (tiling == I915_TILING_Y && !sna->have_render)
 		tiling = I915_TILING_X;
 
@@ -594,11 +598,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
 	     height > sna->render.max_3d_size))
 		tiling = I915_TILING_X;
 
-	bpp = BitsPerPixel(depth);
 	tiling = kgem_choose_tiling(&sna->kgem, tiling, width, height, bpp);
-	if (!kgem_can_create_2d(&sna->kgem, width, height, bpp, tiling))
-		return create_pixmap(sna, screen, width, height, depth,
-				     CREATE_PIXMAP_USAGE_SCRATCH);
 
 	/* you promise never to access this via the cpu... */
 	if (sna->freed_pixmap) {
@@ -669,7 +669,10 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 	DBG(("%s(%d, %d, %d, usage=%x)\n", __FUNCTION__,
 	     width, height, depth, usage));
 
-	if (depth < 8 || wedged(sna) || !sna->have_render)
+	if (!kgem_can_create_cpu(&sna->kgem, width, height, depth))
+		return create_pixmap(sna, screen, width, height, depth, usage);
+
+	if (!sna->have_render)
 		return create_pixmap(sna, screen,
 				     width, height, depth,
 				     usage);
@@ -696,13 +699,11 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 						 width, height, depth,
 						 I915_TILING_Y);
 
-	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE ||
-	    !kgem_can_create_2d(&sna->kgem, width, height,
-				BitsPerPixel(depth), I915_TILING_NONE))
+	if (usage == CREATE_PIXMAP_USAGE_GLYPH_PICTURE)
 		return create_pixmap(sna, screen, width, height, depth, usage);
 
 	pad = PixmapBytePad(width, depth);
-	if (pad*height <= 4096) {
+	if (pad * height <= 4096) {
 		pixmap = create_pixmap(sna, screen,
 				       width, height, depth, usage);
 		if (pixmap == NullPixmap)
@@ -729,7 +730,9 @@ static PixmapPtr sna_create_pixmap(ScreenPtr screen,
 		}
 
 		priv->stride = pad;
-		priv->gpu = true;
+		priv->gpu = kgem_can_create_gpu(&sna->kgem,
+						width, height,
+						pixmap->drawable.bitsPerPixel);
 	}
 
 	return pixmap;
@@ -1821,6 +1824,7 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(drawable);
 	struct sna_pixmap *priv = sna_pixmap(pixmap);
+	struct sna *sna = to_sna_from_pixmap(pixmap);
 	BoxRec extents;
 	int16_t dx, dy;
 
@@ -1829,6 +1833,9 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable,
 	if (priv == NULL || priv->cpu_bo == NULL)
 		return FALSE;
 
+	if (!sna->kgem.has_llc && priv->cpu_bo->domain == DOMAIN_CPU)
+		return FALSE;
+
 	if (DAMAGE_IS_ALL(priv->cpu_damage)) {
 		*damage = NULL;
 		return TRUE;
@@ -1876,9 +1883,7 @@ sna_pixmap_create_upload(ScreenPtr screen,
 	assert(width);
 	assert(height);
 	if (!sna->have_render ||
-	    !kgem_can_create_2d(&sna->kgem,
-				width, height, bpp,
-				I915_TILING_NONE))
+	    !kgem_can_create_gpu(&sna->kgem, width, height, bpp))
 		return create_pixmap(sna, screen, width, height, depth,
 				     CREATE_PIXMAP_USAGE_SCRATCH);
 
@@ -2024,7 +2029,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
 	sna_damage_reduce(&priv->cpu_damage);
 	DBG(("%s: CPU damage? %d\n", __FUNCTION__, priv->cpu_damage != NULL));
 	if (priv->gpu_bo == NULL) {
-		if (!wedged(sna))
+		if (!wedged(sna) && priv->gpu)
 			priv->gpu_bo =
 				kgem_create_2d(&sna->kgem,
 					       pixmap->drawable.width,
@@ -3195,24 +3200,19 @@ sna_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	}
 
 	/* Try to maintain the data on the GPU */
-	if (dst_priv->gpu_bo == NULL &&
+	if (dst_priv->gpu_bo == NULL && dst_priv->gpu &&
 	    ((dst_priv->cpu_damage == NULL && copy_use_gpu_bo(sna, dst_priv, &region)) ||
 	     (src_priv && (src_priv->gpu_bo != NULL || (src_priv->cpu_bo && kgem_bo_is_busy(src_priv->cpu_bo)))))) {
 		uint32_t tiling = sna_pixmap_choose_tiling(dst_pixmap);
 
 		DBG(("%s: create dst GPU bo for upload\n", __FUNCTION__));
 
-		if (kgem_can_create_2d(&sna->kgem,
+		dst_priv->gpu_bo =
+			kgem_create_2d(&sna->kgem,
 				       dst_pixmap->drawable.width,
 				       dst_pixmap->drawable.height,
 				       dst_pixmap->drawable.bitsPerPixel,
-				       tiling))
-			dst_priv->gpu_bo =
-				kgem_create_2d(&sna->kgem,
-					       dst_pixmap->drawable.width,
-					       dst_pixmap->drawable.height,
-					       dst_pixmap->drawable.bitsPerPixel,
-					       tiling, 0);
+				       tiling, 0);
 	}
 
 	if (dst_priv->gpu_bo) {
commit 55569272f7d4232ef50f7b964dda82f85a190b99
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 20:13:27 2012 +0000

    sna: Apply the same migration flags for the dst alphamap as for the dst pixmap
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_composite.c b/src/sna/sna_composite.c
index 6fb2d27..0faad84 100644
--- a/src/sna/sna_composite.c
+++ b/src/sna/sna_composite.c
@@ -519,8 +519,7 @@ fallback:
 	if (!sna_drawable_move_region_to_cpu(dst->pDrawable, &region, flags))
 		goto out;
 	if (dst->alphaMap &&
-	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable,
-				      MOVE_WRITE | MOVE_READ))
+	    !sna_drawable_move_to_cpu(dst->alphaMap->pDrawable, flags))
 		goto out;
 	if (src->pDrawable) {
 		if (!sna_drawable_move_to_cpu(src->pDrawable,
commit 4a132ddbf06e5ffc364c25002a1e46ad8bf0e45a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 20:12:55 2012 +0000

    sna: Correct offset for moving drawable regions to the CPU
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index d8f96e0..bc14967 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -465,7 +465,8 @@ sna_drawable_move_to_cpu(DrawablePtr drawable, unsigned flags)
 	RegionRec region;
 
 	pixman_region_init_rect(&region,
-				0, 0, drawable->width, drawable->height);
+				drawable->x, drawable->y,
+				drawable->width, drawable->height);
 	return sna_drawable_move_region_to_cpu(drawable, &region, flags);
 }
 
commit 65164d90b7b17ec7eea1e24d4b02ec037b55b1ff
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 20:12:27 2012 +0000

    sna/gen2+: Do not force use of GPU if the target is simply cleared
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index d75a412..eb8d4ef 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1558,7 +1558,7 @@ gen2_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && priv->gpu_damage) {
+	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 618f694..95a79b2 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -2467,7 +2467,7 @@ gen3_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && priv->gpu_damage) {
+	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c1ceb33..c798ce5 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2056,7 +2056,7 @@ gen4_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && priv->gpu_damage) {
+	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index 6308d10..47c4e96 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2094,7 +2094,7 @@ gen5_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && priv->gpu_damage) {
+	if (priv && priv->gpu_damage && !priv->clear) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index 5cbdd74..c3bc2e7 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -2307,7 +2307,9 @@ gen6_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && (priv->gpu_damage || (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
+	if (priv &&
+	    ((priv->gpu_damage && !priv->clear) ||
+	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index ee546e1..21d8c99 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -2374,7 +2374,9 @@ gen7_composite_fallback(struct sna *sna,
 
 	/* If anything is on the GPU, push everything out to the GPU */
 	priv = sna_pixmap(dst_pixmap);
-	if (priv && (priv->gpu_damage || (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
+	if (priv &&
+	    ((priv->gpu_damage && !priv->clear) ||
+	     (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)))) {
 		DBG(("%s: dst is already on the GPU, try to use GPU\n",
 		     __FUNCTION__));
 		return FALSE;
commit 307f493d76580687a3cf56106bf296475f1f53e5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 20:11:21 2012 +0000

    sna: Map freshly created, unbound bo through the CPU
    
    Take advantage that we know we will have to clflush the unbound bo
    before use by the GPU and populate it inplace.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 1e78c1a..0955a5d 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -865,8 +865,10 @@ static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo)
 	munmap(CPU_MAP(bo->map), bo->size);
 	bo->map = NULL;
 
-	list_del(&bo->vma);
-	kgem->vma[type].count--;
+	if (!list_is_empty(&bo->vma)) {
+		list_del(&bo->vma);
+		kgem->vma[type].count--;
+	}
 }
 
 static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
@@ -2393,6 +2395,7 @@ skip_active_search:
 		return NULL;
 	}
 
+	bo->domain = DOMAIN_CPU;
 	bo->unique_id = kgem_get_unique_id(kgem);
 	bo->pitch = pitch;
 	if (tiling != I915_TILING_NONE)
@@ -2598,6 +2601,8 @@ static void kgem_trim_vma_cache(struct kgem *kgem, int type, int bucket)
 {
 	int i, j;
 
+	DBG(("%s: type=%d, count=%d (bucket: %d)\n",
+	     __FUNCTION__, type, kgem->vma[type].count, bucket));
 	if (kgem->vma[type].count <= 0)
 	       return;
 
@@ -2647,14 +2652,17 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
 {
 	void *ptr;
 
-	DBG(("%s: handle=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
-	     bo->handle, bo->tiling, bo->map, bo->domain));
+	DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
+	     bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
 
 	assert(!bo->purged);
 	assert(bo->exec == NULL);
 	assert(list_is_empty(&bo->list));
 
-	if (kgem->has_llc && bo->tiling == I915_TILING_NONE) {
+	if (bo->tiling == I915_TILING_NONE &&
+	    (kgem->has_llc || bo->domain == bo->presumed_offset)) {
+		DBG(("%s: converting request for GTT map into CPU map\n",
+		     __FUNCTION__));
 		ptr = kgem_bo_map__cpu(kgem, bo);
 		kgem_bo_sync__cpu(kgem, bo);
 		return ptr;
@@ -3110,6 +3118,8 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 		if (old == NULL)
 			old = search_linear_cache(kgem, alloc, CREATE_INACTIVE);
 		if (old) {
+			DBG(("%s: reusing ordinary handle %d for io\n",
+			     __FUNCTION__, old->handle));
 			alloc = old->size;
 			bo = partial_bo_alloc(alloc);
 			if (bo == NULL)
@@ -3125,20 +3135,55 @@ struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
 			list_init(&bo->base.list);
 			free(old);
 			bo->base.refcnt = 1;
+
+			bo->need_io = flags & KGEM_BUFFER_WRITE;
+			bo->base.io = true;
 		} else {
-			bo = partial_bo_alloc(alloc);
+			bo = malloc(sizeof(*bo));
 			if (bo == NULL)
 				return NULL;
 
-			if (!__kgem_bo_init(&bo->base,
-					    gem_create(kgem->fd, alloc),
-					    alloc)) {
-				free(bo);
+			old = search_linear_cache(kgem, alloc,
+						  CREATE_INACTIVE | CREATE_CPU_MAP);
+			if (old) {
+				DBG(("%s: reusing cpu map handle=%d for buffer\n",
+				     __FUNCTION__, old->handle));
+
+				memcpy(&bo->base, old, sizeof(*old));
+				if (old->rq)
+					list_replace(&old->request, &bo->base.request);
+				else
+					list_init(&bo->base.request);
+				list_replace(&old->vma, &bo->base.vma);
+				list_init(&bo->base.list);
+				free(old);
+				bo->base.refcnt = 1;
+			} else {
+				if (!__kgem_bo_init(&bo->base,
+						    gem_create(kgem->fd, alloc),
+						    alloc)) {
+					free(bo);
+					return NULL;
+				}
+				DBG(("%s: created handle=%d for buffer\n",
+				     __FUNCTION__, bo->base.handle));
+
+				bo->base.domain = DOMAIN_CPU;
+			}
+
+			bo->mem = kgem_bo_map__cpu(kgem, &bo->base);
+			if (bo->mem == NULL) {
+				kgem_bo_free(kgem, &bo->base);
 				return NULL;
 			}
+
+			if (flags & KGEM_BUFFER_WRITE)
+				kgem_bo_sync__cpu(kgem, &bo->base);
+
+			bo->need_io = false;
+			bo->base.io = true;
+			bo->mmapped = true;
 		}
-		bo->need_io = flags & KGEM_BUFFER_WRITE;
-		bo->base.io = true;
 	}
 	bo->base.reusable = false;
 	assert(bo->base.size == alloc);
@@ -3343,8 +3388,8 @@ void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *_bo)
 		gem_read(kgem->fd,
 			 bo->base.handle, (char *)bo->mem+offset,
 			 offset, length);
+		kgem_bo_map__cpu(kgem, &bo->base);
 		bo->base.domain = DOMAIN_NONE;
-		bo->base.reusable = false; /* in the CPU cache now */
 	}
 	kgem_bo_retire(kgem, &bo->base);
 }
commit d785bb7df054a1f15d59db69b089deb743bbdb40
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 16:45:43 2012 +0000

    sna: GetImage is allowed to read a window's border
    
    We need to adjust the clip to include the border pixels when migrating
    damage from the backing pixmap. This also requires relaxing the
    constraint that a read must be within the drawable.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index deb39bf..b28134c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1143,7 +1143,9 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	     RegionExtents(region)->x2, RegionExtents(region)->y2,
 	     flags));
 
-	assert_drawable_contains_box(drawable, &region->extents);
+	if (flags & MOVE_WRITE) {
+		assert_drawable_contains_box(drawable, &region->extents);
+	}
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
@@ -11157,15 +11159,19 @@ sna_get_image(DrawablePtr drawable,
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
 
-	if (region.extents.x1 < drawable->x)
-		region.extents.x1 = drawable->x;
-	if (region.extents.x2 > drawable->x + drawable->width)
-		region.extents.x2 = drawable->x + drawable->width;
+	if (drawable->type == DRAWABLE_PIXMAP) {
+		if (region.extents.x1 < drawable->x)
+			region.extents.x1 = drawable->x;
+		if (region.extents.x2 > drawable->x + drawable->width)
+			region.extents.x2 = drawable->x + drawable->width;
 
-	if (region.extents.y1 < drawable->y)
-		region.extents.y1 = drawable->y;
-	if (region.extents.y2 > drawable->y + drawable->height)
-		region.extents.y2 = drawable->y + drawable->height;
+		if (region.extents.y1 < drawable->y)
+			region.extents.y1 = drawable->y;
+		if (region.extents.y2 > drawable->y + drawable->height)
+			region.extents.y2 = drawable->y + drawable->height;
+	} else
+		RegionIntersect(&region, &region,
+				&((WindowPtr)drawable)->borderClip);
 
 	if (!sna_drawable_move_region_to_cpu(drawable, &region, MOVE_READ))
 		return;
commit 36425ba49ecbd87b1e3bf4340ca2496d8de24a7f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 12:49:24 2012 +0000

    sna: Round up buffer allocations when downsampling
    
    The pathological case being nx1 or 1xm resulting in an illegal allocation
    request of 0 bytes.
    
    One such example is
      wolframalpha.com: x = (200 + x) / 100
    which generates an approximately 8500x1 image and so needs downscaling
    to fit in the render pipeline on all but IvyBridge. Bring on Ivy!
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index d85e930..1e78c1a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3251,20 +3251,22 @@ struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
 	struct kgem_bo *bo;
 	pixman_image_t *src_image, *dst_image;
 	pixman_transform_t t;
+	int w, h;
 	void *dst;
 
 	DBG(("%s : (%d, %d), (%d, %d), stride=%d, bpp=%d\n",
 	     __FUNCTION__, x, y, width, height, stride, bpp));
 
-	bo = kgem_create_buffer_2d(kgem,
-				   width/2, height/2, bpp,
+	w = (width + 1) / 2;
+	h = (height + 1) / 2;
+
+	bo = kgem_create_buffer_2d(kgem, w, h, bpp,
 				   KGEM_BUFFER_WRITE_INPLACE,
 				   &dst);
 	if (bo == NULL)
 		return NULL;
 
-	dst_image = pixman_image_create_bits(format, width/2, height/2,
-					     dst, bo->pitch);
+	dst_image = pixman_image_create_bits(format, w, h, dst, bo->pitch);
 	if (dst_image == NULL)
 		goto cleanup_bo;
 
@@ -3286,7 +3288,7 @@ struct kgem_bo *kgem_upload_source_image_halved(struct kgem *kgem,
 			       x, y,
 			       0, 0,
 			       0, 0,
-			       width/2, height/2);
+			       w, h);
 
 	pixman_image_unref(src_image);
 	pixman_image_unref(dst_image);
diff --git a/src/sna/sna_render.c b/src/sna/sna_render.c
index af81735..d1cb60b 100644
--- a/src/sna/sna_render.c
+++ b/src/sna/sna_render.c
@@ -717,7 +717,8 @@ static int sna_render_picture_downsample(struct sna *sna,
 			goto fixup;
 
 		tmp = screen->CreatePixmap(screen,
-					   w/2, h/2, pixmap->drawable.depth,
+					   (w+1)/2, (h+1)/2,
+					   pixmap->drawable.depth,
 					   SNA_CREATE_SCRATCH);
 		if (!tmp)
 			goto fixup;
@@ -760,18 +761,18 @@ static int sna_render_picture_downsample(struct sna *sna,
 		ValidatePicture(tmp_src);
 
 		if (w > sna->render.max_3d_size) {
-			ww = w/4;
+			ww = (w+3)/4;
 			nj = 2;
 		} else {
-			ww = w/2;
+			ww = (w+1)/2;
 			nj = 1;
 		}
 
 		if (h > sna->render.max_3d_size) {
-			hh = h/4;
+			hh = (h+3)/4;
 			ni = 2;
 		} else {
-			hh = h/2;
+			hh = (h+1)/2;
 			ni = 1;
 		}
 
@@ -783,7 +784,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 
 			b.y1 = hh*i;
 			if (i == ni - 1)
-				b.y2 = h/2;
+				b.y2 = (h+1)/2;
 			else
 				b.y2 = b.y1 + hh;
 
@@ -792,7 +793,7 @@ static int sna_render_picture_downsample(struct sna *sna,
 
 				b.x1 = ww*j;
 				if (j == nj - 1)
-					b.x2 = w/2;
+					b.x2 = (w+1)/2;
 				else
 					b.x2 = b.x1 + ww;
 
@@ -850,8 +851,8 @@ static int sna_render_picture_downsample(struct sna *sna,
 	channel->offset[1] = y - dst_y;
 	channel->scale[0] = 1.f/w;
 	channel->scale[1] = 1.f/h;
-	channel->width  = w / 2;
-	channel->height = h / 2;
+	channel->width  = (w + 1) / 2;
+	channel->height = (h + 1) / 2;
 	channel->bo = bo;
 	return 1;
 
commit a2e83c6dcba1e911f42a3004b3d0782049e243e2
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 11:22:38 2012 +0000

    sna: Silence compiler warning for a potential uninitialised return on error
    
    sna_accel.c: In function 'sna_copy_plane':
    sna_accel.c:5022:21: warning: 'ret' may be used uninitialized in this
    function [-Wuninitialized]
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index ee1f373..deb39bf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5019,7 +5019,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 {
 	PixmapPtr pixmap = get_drawable_pixmap(dst);
 	struct sna *sna = to_sna_from_pixmap(pixmap);
-	RegionRec region, *ret;
+	RegionRec region, *ret = NULL;
 	struct sna_damage **damage;
 
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d\n", __FUNCTION__,
@@ -5105,7 +5105,6 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 
 fallback:
 	DBG(("%s: fallback\n", __FUNCTION__));
-	ret = NULL;
 	if (!sna_gc_move_to_cpu(gc, dst))
 		goto out;
 	if (!sna_drawable_move_region_to_cpu(dst, &region,
commit 8d22a76506133e0f76424159c0944d29bdf39da9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 09:36:20 2012 +0000

    sna: Run the miHandleExposures for no-op CopyPlane
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 03dceaf..ee1f373 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5026,13 +5026,10 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     src_x, src_y, dst_x, dst_y, w, h));
 
 	if (gc->planemask == 0)
-		return NULL;
+		goto empty;
 
 	if (src->bitsPerPixel == 1 && (bit&1) == 0)
-		return miHandleExposures(src, dst, gc,
-					 src_x, src_y,
-					 w, h,
-					 dst_x, dst_y, bit);
+		goto empty;
 
 	region.extents.x1 = dst_x + dst->x;
 	region.extents.y1 = dst_y + dst->y;
@@ -5067,7 +5064,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	     region.extents.x1, region.extents.y1,
 	     region.extents.x2, region.extents.y2));
 	if (!RegionNotEmpty(&region))
-		return NULL;
+		goto empty;
 
 	RegionTranslate(&region,
 			src_x - dst_x - dst->x + src->x,
@@ -5121,6 +5118,11 @@ fallback:
 out:
 	RegionUninit(&region);
 	return ret;
+empty:
+	return miHandleExposures(src, dst, gc,
+				 src_x, src_y,
+				 w, h,
+				 dst_x, dst_y, bit);
 }
 
 static Bool
commit 338941eda3c7591a85b83000eafae0407d0d7cd0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 02:42:25 2012 +0000

    sna: Handle self-copies for CopyPlane
    
    Prepare the source first as this has the dual benefit of letting us
    decide how best to proceed with the op (on the CPU or GPU) and prevents
    modification of the damage after we have choosen our preferred path.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f52766b..03dceaf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -4827,8 +4827,6 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
 	if (n == 0)
 		return;
 
-	if (!sna_pixmap_move_to_cpu(src_pixmap, MOVE_READ))
-		return;
 	get_drawable_deltas(source, src_pixmap, &dx, &dy);
 	sx += dx;
 	sy += dy;
@@ -5027,6 +5025,9 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d\n", __FUNCTION__,
 	     src_x, src_y, dst_x, dst_y, w, h));
 
+	if (gc->planemask == 0)
+		return NULL;
+
 	if (src->bitsPerPixel == 1 && (bit&1) == 0)
 		return miHandleExposures(src, dst, gc,
 					 src_x, src_y,
@@ -5068,6 +5069,17 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (!RegionNotEmpty(&region))
 		return NULL;
 
+	RegionTranslate(&region,
+			src_x - dst_x - dst->x + src->x,
+			src_y - dst_y - dst->y + src->y);
+
+	if (!sna_drawable_move_region_to_cpu(src, &region, MOVE_READ))
+		goto out;
+
+	RegionTranslate(&region,
+			-(src_x - dst_x - dst->x + src->x),
+			-(src_y - dst_y - dst->y + src->y));
+
 	if (FORCE_FALLBACK)
 		goto fallback;
 
@@ -5077,6 +5089,9 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	if (wedged(sna))
 		goto fallback;
 
+	if (!PM_IS_SOLID(dst, gc->planemask))
+		goto fallback;
+
 	if (sna_drawable_use_gpu_bo(dst, &region.extents, &damage)) {
 		struct sna_pixmap *priv = sna_pixmap(pixmap);
 		if (priv->gpu_bo->tiling != I915_TILING_Y ||
@@ -5096,17 +5111,10 @@ fallback:
 	ret = NULL;
 	if (!sna_gc_move_to_cpu(gc, dst))
 		goto out;
-
 	if (!sna_drawable_move_region_to_cpu(dst, &region,
 					     MOVE_READ | MOVE_WRITE))
 		goto out;
 
-	RegionTranslate(&region,
-			src_x - dst_x - dst->x + src->x,
-			src_y - dst_y - dst->y + src->y);
-	if (!sna_drawable_move_region_to_cpu(src, &region, MOVE_READ))
-		goto out;
-
 	DBG(("%s: fbCopyPlane(%d, %d, %d, %d, %d,%d) %x\n",
 	     __FUNCTION__, src_x, src_y, w, h, dst_x, dst_y, (unsigned)bit));
 	ret = fbCopyPlane(src, dst, gc, src_x, src_y, w, h, dst_x, dst_y, bit);
commit 2e8b398ca383f5292adab8b351b8837dde3e131a
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:42:56 2012 +0000

    sna: Only shrink partial buffers that are being written to
    
    Ignore inactive and mmapped buffers.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index b979d3c..d85e930 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1255,54 +1255,55 @@ static void kgem_finish_partials(struct kgem *kgem)
 		}
 
 		assert(bo->base.rq == kgem->next_request);
-		if (bo->base.refcnt == 1 && bo->used < bo->base.size / 2) {
-			struct kgem_bo *shrink;
-
-			shrink = search_linear_cache(kgem,
-						     PAGE_ALIGN(bo->used),
-						     CREATE_INACTIVE);
-			if (shrink) {
-				int n;
-
-				DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
-				     __FUNCTION__,
-				     bo->used, bo->base.size, shrink->size,
-				     bo->base.handle, shrink->handle));
-
-				assert(bo->used <= shrink->size);
-				gem_write(kgem->fd, shrink->handle,
-					  0, bo->used, bo->mem);
-
-				for (n = 0; n < kgem->nreloc; n++) {
-					if (kgem->reloc[n].target_handle == bo->base.handle) {
-						kgem->reloc[n].target_handle = shrink->handle;
-						kgem->reloc[n].presumed_offset = shrink->presumed_offset;
-						kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
-							kgem->reloc[n].delta + shrink->presumed_offset;
+		if (bo->used && bo->need_io) {
+			if (bo->base.refcnt == 1 &&
+			    bo->used < bo->base.size / 2) {
+				struct kgem_bo *shrink;
+
+				shrink = search_linear_cache(kgem,
+							     PAGE_ALIGN(bo->used),
+							     CREATE_INACTIVE);
+				if (shrink) {
+					int n;
+
+					DBG(("%s: used=%d, shrinking %d to %d, handle %d to %d\n",
+					     __FUNCTION__,
+					     bo->used, bo->base.size, shrink->size,
+					     bo->base.handle, shrink->handle));
+
+					assert(bo->used <= shrink->size);
+					gem_write(kgem->fd, shrink->handle,
+						  0, bo->used, bo->mem);
+
+					for (n = 0; n < kgem->nreloc; n++) {
+						if (kgem->reloc[n].target_handle == bo->base.handle) {
+							kgem->reloc[n].target_handle = shrink->handle;
+							kgem->reloc[n].presumed_offset = shrink->presumed_offset;
+							kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] =
+								kgem->reloc[n].delta + shrink->presumed_offset;
+						}
 					}
-				}
-
-				bo->base.exec->handle = shrink->handle;
-				bo->base.exec->offset = shrink->presumed_offset;
-				shrink->exec = bo->base.exec;
-				shrink->rq = bo->base.rq;
-				list_replace(&bo->base.request,
-					     &shrink->request);
-				list_init(&bo->base.request);
-				shrink->needs_flush = bo->base.dirty;
-
-				bo->base.exec = NULL;
-				bo->base.rq = NULL;
-				bo->base.dirty = false;
-				bo->base.needs_flush = false;
-				bo->used = 0;
 
-				bubble_sort_partial(kgem, bo);
-				continue;
+					bo->base.exec->handle = shrink->handle;
+					bo->base.exec->offset = shrink->presumed_offset;
+					shrink->exec = bo->base.exec;
+					shrink->rq = bo->base.rq;
+					list_replace(&bo->base.request,
+						     &shrink->request);
+					list_init(&bo->base.request);
+					shrink->needs_flush = bo->base.dirty;
+
+					bo->base.exec = NULL;
+					bo->base.rq = NULL;
+					bo->base.dirty = false;
+					bo->base.needs_flush = false;
+					bo->used = 0;
+
+					bubble_sort_partial(kgem, bo);
+					continue;
+				}
 			}
-		}
 
-		if (bo->used && bo->need_io) {
 			DBG(("%s: handle=%d, uploading %d/%d\n",
 			     __FUNCTION__, bo->base.handle, bo->used, bo->base.size));
 			assert(!kgem_busy(kgem, bo->base.handle));
commit b79252efaafe2ebc998d6cf6176a425dd897e66f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:36:27 2012 +0000

    sna: Apply source clipping to sna_copy_plane()
    
    Ensure that the migration region is within bounds for both the source
    and destination pixmaps.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 2d83868..f52766b 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -5038,7 +5038,33 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 	region.extents.x2 = region.extents.x1 + w;
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
-	region_maybe_clip(&region, gc->pCompositeClip);
+	RegionIntersect(&region, &region, gc->pCompositeClip);
+
+	DBG(("%s: dst extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
+
+	{
+		RegionRec clip;
+
+		clip.extents.x1 = src->x - (src->x + src_x) + (dst->x + dst_x);
+		clip.extents.y1 = src->y - (src->y + src_y) + (dst->y + dst_y);
+		clip.extents.x2 = clip.extents.x1 + src->width;
+		clip.extents.y2 = clip.extents.y1 + src->height;
+		clip.data = NULL;
+
+		DBG(("%s: src extents (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     clip.extents.x1, clip.extents.y1,
+		     clip.extents.x2, clip.extents.y2));
+
+		RegionIntersect(&region, &region, &clip);
+	}
+	DBG(("%s: dst^src extents (%d, %d), (%d, %d)\n",
+	     __FUNCTION__,
+	     region.extents.x1, region.extents.y1,
+	     region.extents.x2, region.extents.y2));
 	if (!RegionNotEmpty(&region))
 		return NULL;
 
commit 46252bc7bcc7e08e47d00cdc87d6c1ed93830fcc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:31:34 2012 +0000

    sna: Set the source clip for CopyArea fallback correctly
    
    The source window is (src->x, src->y)x(src->width, src->height) in
    pixmap space. However, we then need to use this to clip against the
    desination region, and so we need to translate from the source
    coordinate to the destination coordinate.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f86bb62..2d83868 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3629,8 +3629,8 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		{
 			RegionRec clip;
 
-			clip.extents.x1 = -(src_x - dst_x - dst->x + src->x);
-			clip.extents.y1 = -(src_y - dst_y - dst->y + src->y);
+			clip.extents.x1 = src->x - (src->x + src_x) + (dst->x + dst_x);
+			clip.extents.y1 = src->y - (src->y + src_y) + (dst->y + dst_y);
 			clip.extents.x2 = clip.extents.x1 + src->width;
 			clip.extents.y2 = clip.extents.y1 + src->height;
 			clip.data = NULL;
commit ae6d3a311783d7e063de0347363331f14bd74d74
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:18:11 2012 +0000

    sna: Print source and destination regions for CopyArea fallback for DBG
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 613c1ed..f86bb62 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3597,8 +3597,13 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 {
 	struct sna *sna = to_sna_from_drawable(dst);
 
-	DBG(("%s: src=(%d, %d)x(%d, %d) -> dst=(%d, %d)\n",
-	     __FUNCTION__, src_x, src_y, width, height, dst_x, dst_y));
+	if (gc->planemask == 0)
+		return NULL;
+
+	DBG(("%s: src=(%d, %d)x(%d, %d)+(%d, %d) -> dst=(%d, %d)+(%d, %d)\n",
+	     __FUNCTION__,
+	     src_x, src_y, width, height, src->x, src->y,
+	     dst_x, dst_y, dst->x, dst->y));
 
 	if (FORCE_FALLBACK || !ACCEL_COPY_AREA || wedged(sna) ||
 	    !PM_IS_SOLID(dst, gc->planemask)) {
@@ -3616,6 +3621,11 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 		region.data = NULL;
 		RegionIntersect(&region, &region, gc->pCompositeClip);
 
+		DBG(("%s: dst extents (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     region.extents.x1, region.extents.y1,
+		     region.extents.x2, region.extents.y2));
+
 		{
 			RegionRec clip;
 
@@ -3625,8 +3635,18 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
 			clip.extents.y2 = clip.extents.y1 + src->height;
 			clip.data = NULL;
 
+			DBG(("%s: src extents (%d, %d), (%d, %d)\n",
+			     __FUNCTION__,
+			     clip.extents.x1, clip.extents.y1,
+			     clip.extents.x2, clip.extents.y2));
+
 			RegionIntersect(&region, &region, &clip);
 		}
+		DBG(("%s: dst^src extents (%d, %d), (%d, %d)\n",
+		     __FUNCTION__,
+		     region.extents.x1, region.extents.y1,
+		     region.extents.x2, region.extents.y2));
+
 		if (!RegionNotEmpty(&region))
 			return NULL;
 
commit dd5e90adfc73870cebcb215ad9fb9b5aedd38673
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:17:49 2012 +0000

    sna: Clip GetImage to drawable so that damage migration is within bounds
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 31a9079..613c1ed 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1143,7 +1143,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
 	     RegionExtents(region)->x2, RegionExtents(region)->y2,
 	     flags));
 
-	assert_pixmap_contains_box(pixmap, &region->extents);
+	assert_drawable_contains_box(drawable, &region->extents);
 
 	priv = sna_pixmap(pixmap);
 	if (priv == NULL) {
@@ -11102,6 +11102,16 @@ sna_get_image(DrawablePtr drawable,
 	region.extents.y2 = region.extents.y1 + h;
 	region.data = NULL;
 
+	if (region.extents.x1 < drawable->x)
+		region.extents.x1 = drawable->x;
+	if (region.extents.x2 > drawable->x + drawable->width)
+		region.extents.x2 = drawable->x + drawable->width;
+
+	if (region.extents.y1 < drawable->y)
+		region.extents.y1 = drawable->y;
+	if (region.extents.y2 > drawable->y + drawable->height)
+		region.extents.y2 = drawable->y + drawable->height;
+
 	if (!sna_drawable_move_region_to_cpu(drawable, &region, MOVE_READ))
 		return;
 
commit b1fba5e8534da7fe253e21a3501854c04d82a108
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Wed Jan 25 01:16:34 2012 +0000

    sna: Clear GPU damage first when moving a clear pixmap to the CPU
    
    This allows us to discard any busy GPU or CPU bo when we know we are
    going to clear the shadow pixmap afterwards.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 1a13465..31a9079 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -922,6 +922,13 @@ skip_inplace_map:
 		priv->mapped = false;
 	}
 
+	if (priv->clear) {
+		if (priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo))
+			sna_pixmap_free_cpu(sna, priv);
+		sna_damage_destroy(&priv->gpu_damage);
+		priv->undamaged = true;
+	}
+
 	if (pixmap->devPrivate.ptr == NULL &&
 	    !sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL))
 		return false;
@@ -943,8 +950,6 @@ skip_inplace_map:
 			    pixmap->drawable.height,
 			    priv->clear_color);
 
-		sna_damage_destroy(&priv->gpu_damage);
-		priv->undamaged = true;
 		priv->clear = false;
 	}
 
commit 5ad95d66665802bce25e127ae0d06f3e0a9b0e62
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date:   Tue Jan 24 22:11:20 2012 +0000

    sna: Reduce number of reads required to inspect timers
    
    By using the information provided by select at wakeup.
    
    Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>

diff --git a/src/sna/sna.h b/src/sna/sna.h
index cabf04c..d8f96e0 100644
--- a/src/sna/sna.h
+++ b/src/sna/sna.h
@@ -231,7 +231,8 @@ struct sna {
 #define SNA_NO_DELAYED_FLUSH	0x2
 
 	int timer[NUM_TIMERS];
-	int timer_active;
+	uint16_t timer_active;
+	uint16_t timer_ready;
 
 	int vblank_interval;
 
@@ -581,7 +582,7 @@ static inline uint32_t pixmap_size(PixmapPtr pixmap)
 Bool sna_accel_pre_init(struct sna *sna);
 Bool sna_accel_init(ScreenPtr sreen, struct sna *sna);
 void sna_accel_block_handler(struct sna *sna);
-void sna_accel_wakeup_handler(struct sna *sna);
+void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready);
 void sna_accel_close(struct sna *sna);
 void sna_accel_free(struct sna *sna);
 
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 46016e0..1a13465 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -11276,7 +11276,7 @@ static void _sna_accel_disarm_timer(struct sna *sna, int id)
 
 #define return_if_timer_active(id) do {					\
 	if (sna->timer_active & (1<<(id)))				\
-		return read_timer(sna->timer[id]) > 0;			\
+		return (sna->timer_ready & (1<<(id))) && read_timer(sna->timer[id]) > 0;			\
 } while (0)
 
 static Bool sna_accel_do_flush(struct sna *sna)
@@ -11687,15 +11687,24 @@ void sna_accel_block_handler(struct sna *sna)
 
 	if (sna_accel_do_inactive(sna))
 		sna_accel_inactive(sna);
+
+	sna->timer_ready = 0;
 }
 
-void sna_accel_wakeup_handler(struct sna *sna)
+void sna_accel_wakeup_handler(struct sna *sna, fd_set *ready)
 {
+	int id, active;
+
 	if (sna->kgem.need_retire)
 		kgem_retire(&sna->kgem);
 	if (sna->kgem.need_purge)
 		kgem_purge_cache(&sna->kgem);
 
+	active = sna->timer_active & ~sna->timer_ready;
+	for (id = 0; id < NUM_TIMERS; id++)
+		if (active & (1 << id) && FD_ISSET(sna->timer[id], ready))
+			sna->timer_ready |= 1 << id;
+
 	sna_deferred_free(sna);
 }
 
diff --git a/src/sna/sna_driver.c b/src/sna/sna_driver.c
index 770a5bd..bcd1191 100644
--- a/src/sna/sna_driver.c
+++ b/src/sna/sna_driver.c
@@ -606,7 +606,7 @@ sna_wakeup_handler(int i, pointer data, unsigned long result, pointer read_mask)
 
 	sna->WakeupHandler(i, sna->WakeupData, result, read_mask);
 
-	sna_accel_wakeup_handler(sna);
+	sna_accel_wakeup_handler(sna, read_mask);
 
 	if (FD_ISSET(sna->kgem.fd, (fd_set*)read_mask))
 		sna_dri_wakeup(sna);
@@ -761,7 +761,6 @@ static Bool sna_close_screen(int scrnIndex, ScreenPtr screen)
 #endif
 
 	/* drain the event queues */
-	sna_accel_wakeup_handler(sna);
 	if (sna_dri_has_pending_events(sna))
 		sna_dri_wakeup(sna);
 


More information about the xorg-commit mailing list