xf86-video-intel: 3 commits - src/sna/gen4_render.c src/sna/sna_io.c
Chris Wilson
ickle at kemper.freedesktop.org
Tue Nov 13 03:30:50 PST 2012
src/sna/gen4_render.c | 16 +++++++---------
src/sna/sna_io.c | 44 +++++++++++++++++++++++++++++++++-----------
2 files changed, 40 insertions(+), 20 deletions(-)
New commits:
commit b6d2bb961517623d46aa6944307cb998ee125459
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Nov 13 10:05:56 2012 +0000
sna/gen4: Do not prefer inplace non-rectilinear spans
As gen4 requires the per-rectangle vertex flush, emitting spans on the
GPU is inefficient and so we prefer to composite the mask instead.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index 95a8363..3aa33db 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -49,6 +49,7 @@
*/
#define PREFER_BLT 1
#define FLUSH_EVERY_VERTEX 1
+#define FORCE_SPANS 0
#define NO_COMPOSITE 0
#define NO_COMPOSITE_SPANS 0
@@ -2624,16 +2625,11 @@ gen4_check_composite_spans(struct sna *sna,
return false;
}
- if ((flags & (COMPOSITE_SPANS_RECTILINEAR | COMPOSITE_SPANS_INPLACE_HINT)) == 0) {
- struct sna_pixmap *priv = sna_pixmap_from_drawable(dst->pDrawable);
- assert(priv);
+ if (FORCE_SPANS)
+ return FORCE_SPANS > 0;
- if ((priv->cpu_bo && kgem_bo_is_busy(priv->cpu_bo)) ||
- (priv->gpu_bo && kgem_bo_is_busy(priv->gpu_bo))) {
- return true;
- }
-
- DBG(("%s: fallback, non-rectilinear spans to idle bo\n",
+ if ((flags & COMPOSITE_SPANS_RECTILINEAR) == 0) {
+ DBG(("%s: fallback, non-rectilinear spans\n",
__FUNCTION__));
return false;
}
commit ae293609c7400cd3c753ed3762772264c4741df5
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Nov 13 10:21:29 2012 +0000
sna/gen4: Always initialise redirect
Do not assume the caller cleared the composite-op structure for us.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index be97458..95a8363 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2294,6 +2294,7 @@ gen4_render_composite(struct sna *sna,
return false;
sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
+ sna_render_composite_redirect_init(tmp);
if (too_large(tmp->dst.width, tmp->dst.height) &&
!sna_render_composite_redirect(sna, tmp,
dst_x, dst_y, width, height))
@@ -2669,6 +2670,7 @@ gen4_render_composite_spans(struct sna *sna,
return false;
sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
+ sna_render_composite_redirect_init(&tmp->base);
if (too_large(tmp->base.dst.width, tmp->base.dst.height)) {
if (!sna_render_composite_redirect(sna, &tmp->base,
dst_x, dst_y, width, height))
commit 2954f15e2bcb590a90c2cb6077c0843ee25a4413
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Nov 13 09:46:19 2012 +0000
sna: Specialise the decision for inplace xor uploads
Fixes a regression from
commit 0be1d964713ca407f029278a8256d02d925dc9da
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Tue Sep 11 21:48:24 2012 +0100
sna: Use inplace X tiling for LLC uploads
which introduced the ability to swizzle into CPU maps, but also
convinced the xorg path to the same - which for large images blows up.
Reported-by: Michael Laà <bevan at bi-co.net>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=57031
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 69d920c..2038e5d 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -579,19 +579,13 @@ static bool write_boxes_inplace(struct kgem *kgem,
return true;
}
-static bool upload_inplace(struct kgem *kgem,
- struct kgem_bo *bo,
- const BoxRec *box,
- int n, int bpp)
+static bool __upload_inplace(struct kgem *kgem,
+ struct kgem_bo *bo,
+ const BoxRec *box,
+ int n, int bpp)
{
unsigned int bytes;
- if (kgem->wedged)
- return true;
-
- if (!kgem_bo_can_map(kgem, bo) && !upload_inplace__tiled(kgem, bo))
- return false;
-
if (FORCE_INPLACE)
return FORCE_INPLACE > 0;
@@ -610,6 +604,20 @@ static bool upload_inplace(struct kgem *kgem,
return bytes * bpp >> 12;
}
+static bool upload_inplace(struct kgem *kgem,
+ struct kgem_bo *bo,
+ const BoxRec *box,
+ int n, int bpp)
+{
+ if (kgem->wedged)
+ return true;
+
+ if (!kgem_bo_can_map(kgem, bo) && !upload_inplace__tiled(kgem, bo))
+ return false;
+
+ return __upload_inplace(kgem, bo, box, n,bpp);
+}
+
bool sna_write_boxes(struct sna *sna, PixmapPtr dst,
struct kgem_bo * const dst_bo, int16_t const dst_dx, int16_t const dst_dy,
const void * const src, int const stride, int16_t const src_dx, int16_t const src_dy,
@@ -960,6 +968,20 @@ write_boxes_inplace__xor(struct kgem *kgem,
} while (--n);
}
+static bool upload_inplace__xor(struct kgem *kgem,
+ struct kgem_bo *bo,
+ const BoxRec *box,
+ int n, int bpp)
+{
+ if (kgem->wedged)
+ return true;
+
+ if (!kgem_bo_can_map(kgem, bo))
+ return false;
+
+ return __upload_inplace(kgem, bo, box, n, bpp);
+}
+
void sna_write_boxes__xor(struct sna *sna, PixmapPtr dst,
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
const void *src, int stride, int16_t src_dx, int16_t src_dy,
@@ -976,7 +998,7 @@ void sna_write_boxes__xor(struct sna *sna, PixmapPtr dst,
DBG(("%s x %d\n", __FUNCTION__, nbox));
- if (upload_inplace(kgem, dst_bo, box, nbox, dst->drawable.bitsPerPixel)) {
+ if (upload_inplace__xor(kgem, dst_bo, box, nbox, dst->drawable.bitsPerPixel)) {
fallback:
write_boxes_inplace__xor(kgem,
src, stride, dst->drawable.bitsPerPixel, src_dx, src_dy,
More information about the xorg-commit
mailing list