xf86-video-intel: 7 commits - src/sna/gen2_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_glyphs.c
Chris Wilson
ickle at kemper.freedesktop.org
Thu Oct 4 07:19:26 PDT 2012
src/sna/gen2_render.c | 72 ++++++++++++++++++++++++++++++--------------------
src/sna/kgem.c | 5 ++-
src/sna/kgem.h | 3 ++
src/sna/sna_accel.c | 26 +++++++++---------
src/sna/sna_glyphs.c | 8 ++++-
5 files changed, 71 insertions(+), 43 deletions(-)
New commits:
commit 3ad3a2d839f8e2f5b02f883e2b4131d4c0901fb0
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 15:14:08 2012 +0100
sna/gen2: Tidy DBG code for disabling composite operations
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index b6b259e..6085348 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1769,17 +1769,6 @@ gen2_render_composite(struct sna *sna,
return false;
}
-#if NO_COMPOSITE
- if (mask)
- return false;
-
- return sna_blt_composite(sna, op,
- src, dst,
- src_x, src_y,
- dst_x, dst_y,
- width, height, tmp, true);
-#endif
-
/* Try to use the BLT engine unless it implies a
* 3D -> 2D context switch.
*/
@@ -3196,7 +3185,9 @@ bool gen2_render_init(struct sna *sna)
/* Use the BLT (and overlay) for everything except when forced to
* use the texture combiners.
*/
+#if !NO_COMPOSITE
render->composite = gen2_render_composite;
+#endif
#if !NO_COMPOSITE_SPANS
render->check_composite_spans = gen2_check_composite_spans;
render->composite_spans = gen2_render_composite_spans;
commit 6054f8e1120d499f70e7bd4809b3fc6994792bcb
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 14:40:59 2012 +0100
sna: Remove another sna_pixmap->cpu assertion that was not true
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d849bea..306302e 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2722,7 +2722,6 @@ create_gpu_bo:
if (priv->gpu_damage) {
if (!priv->cpu_damage) {
- assert(priv->cpu == false);
if (sna_damage_contains_box__no_reduce(priv->gpu_damage,
®ion.extents)) {
DBG(("%s: region wholly contained within GPU damage\n",
@@ -2731,6 +2730,7 @@ create_gpu_bo:
} else {
DBG(("%s: partial GPU damage with no CPU damage, continuing to use GPU\n",
__FUNCTION__));
+ priv->cpu = false;
goto done;
}
}
commit acf354d7fb575c468e86a5935cad095015ccadb1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 14:35:14 2012 +0100
sna: Propagate failures from compositing glyph masks
So that we can fallback correctly. This is primarily using for debugging
failure paths...
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_glyphs.c b/src/sna/sna_glyphs.c
index 53494e3..9a6ad4b 100644
--- a/src/sna/sna_glyphs.c
+++ b/src/sna/sna_glyphs.c
@@ -819,6 +819,7 @@ glyphs_via_mask(struct sna *sna,
PicturePtr glyph_atlas, mask;
int16_t x, y, width, height;
int error;
+ bool ret = false;
BoxRec box;
if (NO_GLYPHS_VIA_MASK)
@@ -1141,11 +1142,12 @@ next_glyph:
0, 0,
box.x1, box.y1,
width, height);
+ ret = true;
err_mask:
FreePicture(mask, 0);
err_pixmap:
sna_pixmap_destroy(pixmap);
- return TRUE;
+ return ret;
}
static PictFormatPtr
@@ -1716,6 +1718,7 @@ glyphs_via_image(struct sna *sna,
int16_t x, y, width, height;
pixman_image_t *mask_image;
int error;
+ bool ret = false;
BoxRec box;
if (NO_GLYPHS_VIA_MASK)
@@ -1929,9 +1932,10 @@ next_image:
box.x1, box.y1,
width, height);
FreePicture(mask, 0);
+ ret = true;
err_pixmap:
sna_pixmap_destroy(pixmap);
- return TRUE;
+ return ret;
}
void
commit c7ad655359b746917deeac3efb9eb843465cf4b9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 13:52:55 2012 +0100
sna/gen2: Prevent using the GTT maps with I915_TILING_Y on 855gm
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 3d8620a..bc2e66e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -3826,8 +3826,10 @@ uint32_t kgem_add_reloc(struct kgem *kgem,
kgem->reloc[index].target_handle = bo->handle;
kgem->reloc[index].presumed_offset = bo->presumed_offset;
- if (read_write_domain & 0x7ff)
+ if (read_write_domain & 0x7ff) {
+ assert(!bo->snoop || kgem->can_blt_cpu);
kgem_bo_mark_dirty(bo);
+ }
delta += bo->presumed_offset;
} else {
@@ -3964,6 +3966,7 @@ void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
ptr = bo->map;
if (ptr == NULL) {
assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
+ assert(kgem->gen != 21 || bo->tiling != I915_TILING_Y);
kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index cdbb7cb..06f3999 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -519,6 +519,9 @@ static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
if (!bo->tiling && kgem->has_llc)
return true;
+ if (kgem->gen == 21 && bo->tiling == I915_TILING_Y)
+ return false;
+
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
}
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 94ed933..d849bea 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1830,6 +1830,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
assert(flags & MOVE_WRITE);
if (priv->stride && priv->gpu_bo &&
+ kgem_bo_can_map(&sna->kgem, priv->gpu_bo) &&
region_inplace(sna, pixmap, region, priv, true)) {
assert(priv->gpu_bo->proxy == NULL);
if (!__kgem_bo_is_busy(&sna->kgem, priv->gpu_bo)) {
@@ -1913,6 +1914,7 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
}
if (operate_inplace(priv, flags) &&
+ kgem_bo_can_map(&sna->kgem, priv->gpu_bo) &&
region_inplace(sna, pixmap, region, priv, (flags & MOVE_READ) == 0)) {
kgem_bo_submit(&sna->kgem, priv->gpu_bo);
commit 0a2c5eb7669c249194b41d624ca05654a8d7035b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 13:51:23 2012 +0100
sna: Once again look into assertions around sna_pixmap->cpu
Revert back to basics, and clear the CPU flag everytime we use the GPU,
rather than try to avoid clearing it along some paths.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c159f43..94ed933 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3016,10 +3016,8 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
priv->gpu_bo = NULL;
}
- if ((flags & MOVE_READ) == 0) {
+ if ((flags & MOVE_READ) == 0)
sna_damage_destroy(&priv->cpu_damage);
- priv->cpu = false;
- }
sna_damage_reduce(&priv->cpu_damage);
assert_pixmap_damage(pixmap);
@@ -3071,7 +3069,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
pixmap->drawable.height);
DBG(("%s: marking as all-damaged for GPU\n",
__FUNCTION__));
- assert(priv->cpu == false);
goto active;
}
}
@@ -3139,7 +3136,6 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
__sna_damage_destroy(DAMAGE_PTR(priv->cpu_damage));
priv->cpu_damage = NULL;
priv->undamaged = true;
- priv->cpu = false;
if (priv->shm) {
assert(!priv->flush);
@@ -3171,7 +3167,7 @@ done:
active:
if (flags & MOVE_WRITE)
priv->clear = false;
- assert(priv->cpu == false);
+ priv->cpu = false;
assert(!priv->gpu_bo->proxy || (flags & MOVE_WRITE) == 0);
return sna_pixmap_mark_active(sna, priv);
}
commit 4d3b849c57d78f0447334c5c0871d8bd78e0af24
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 12:14:47 2012 +0100
sna: Avoid using the gpu for uploads whilst wedged
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 5ebedad..c159f43 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1377,12 +1377,16 @@ static inline bool use_cpu_bo_for_download(struct sna *sna,
return true;
}
-static inline bool use_cpu_bo_for_upload(struct sna_pixmap *priv,
+static inline bool use_cpu_bo_for_upload(struct sna *sna,
+ struct sna_pixmap *priv,
unsigned flags)
{
if (DBG_NO_CPU_UPLOAD)
return false;
+ if (wedged(sna))
+ return false;
+
if (priv->cpu_bo == NULL)
return false;
@@ -2425,7 +2429,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
if (n) {
bool ok = false;
- if (use_cpu_bo_for_upload(priv, 0)) {
+ if (use_cpu_bo_for_upload(sna, priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -2469,7 +2473,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
} else if (DAMAGE_IS_ALL(priv->cpu_damage) ||
sna_damage_contains_box__no_reduce(priv->cpu_damage, box)) {
bool ok = false;
- if (use_cpu_bo_for_upload(priv, 0)) {
+ if (use_cpu_bo_for_upload(sna, priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -2504,7 +2508,7 @@ sna_pixmap_move_area_to_gpu(PixmapPtr pixmap, const BoxRec *box, unsigned int fl
box = REGION_RECTS(&i);
ok = false;
- if (use_cpu_bo_for_upload(priv, 0)) {
+ if (use_cpu_bo_for_upload(sna, priv, 0)) {
DBG(("%s: using CPU bo for upload to GPU, %d boxes\n", __FUNCTION__, n));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
@@ -3099,7 +3103,7 @@ sna_pixmap_move_to_gpu(PixmapPtr pixmap, unsigned flags)
flags |= MOVE_ASYNC_HINT;
ok = false;
- if (use_cpu_bo_for_upload(priv, flags)) {
+ if (use_cpu_bo_for_upload(sna, priv, flags)) {
DBG(("%s: using CPU bo for upload to GPU\n", __FUNCTION__));
ok = sna->render.copy_boxes(sna, GXcopy,
pixmap, priv->cpu_bo, 0, 0,
commit 4608e482615d4b4599608c7bc17ac8a9f9293840
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Oct 4 11:23:36 2012 +0100
sna/gen2: Allow fine damage tracking for render operations
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index cf3088b..b6b259e 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -1429,25 +1429,40 @@ gen2_composite_picture(struct sna *sna,
static bool
gen2_composite_set_target(struct sna *sna,
struct sna_composite_op *op,
- PicturePtr dst)
+ PicturePtr dst,
+ int x, int y, int w, int h)
{
- struct sna_pixmap *priv;
+ BoxRec box;
op->dst.pixmap = get_drawable_pixmap(dst->pDrawable);
op->dst.format = dst->format;
- op->dst.width = op->dst.pixmap->drawable.width;
+ op->dst.width = op->dst.pixmap->drawable.width;
op->dst.height = op->dst.pixmap->drawable.height;
- priv = sna_pixmap_force_to_gpu(op->dst.pixmap, MOVE_WRITE | MOVE_READ);
- if (priv == NULL)
+ if (w && h) {
+ box.x1 = x;
+ box.y1 = y;
+ box.x2 = x + w;
+ box.y2 = y + h;
+ } else
+ sna_render_picture_extents(dst, &box);
+
+ op->dst.bo = sna_drawable_use_bo (dst->pDrawable,
+ PREFER_GPU | FORCE_GPU | RENDER_GPU,
+ &box, &op->damage);
+ if (op->dst.bo == NULL)
return false;
- if (priv->gpu_bo->pitch < 8) {
+ if (op->dst.bo->pitch < 8) {
+ struct sna_pixmap *priv;
struct kgem_bo *bo;
- if (priv->pinned)
+ priv = sna_pixmap_move_to_gpu (op->dst.pixmap,
+ MOVE_READ | MOVE_WRITE);
+ if (priv == NULL || priv->pinned)
return false;
+ assert(op->dst.bo == priv->gpu_bo);
bo = kgem_replace_bo(&sna->kgem, priv->gpu_bo,
op->dst.width, op->dst.height, 8,
op->dst.pixmap->drawable.bitsPerPixel);
@@ -1456,15 +1471,26 @@ gen2_composite_set_target(struct sna *sna,
kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
priv->gpu_bo = bo;
- }
- op->dst.bo = priv->gpu_bo;
- op->damage = &priv->gpu_damage;
- if (sna_damage_is_all(&priv->gpu_damage, op->dst.width, op->dst.height))
- op->damage = NULL;
+ op->dst.bo = priv->gpu_bo;
+ op->damage = &priv->gpu_damage;
+ if (sna_damage_is_all(op->damage,
+ op->dst.width, op->dst.height))
+ op->damage = NULL;
+ }
get_drawable_deltas(dst->pDrawable, op->dst.pixmap,
&op->dst.x, &op->dst.y);
+
+ DBG(("%s: pixmap=%p, format=%08x, size=%dx%d, pitch=%d, delta=(%d,%d),damage=%p\n",
+ __FUNCTION__,
+ op->dst.pixmap, (int)op->dst.format,
+ op->dst.width, op->dst.height,
+ op->dst.bo->pitch,
+ op->dst.x, op->dst.y,
+ op->damage ? *op->damage : (void *)-1));
+
+ assert(op->dst.bo->proxy == NULL);
return true;
}
@@ -1778,14 +1804,13 @@ gen2_render_composite(struct sna *sna,
width, height,
tmp);
- if (!gen2_composite_set_target(sna, tmp, dst)) {
+ if (!gen2_composite_set_target(sna, tmp, dst,
+ dst_x, dst_y, width, height)) {
DBG(("%s: unable to set render target\n",
__FUNCTION__));
return false;
}
- sna_render_reduce_damage(tmp, dst_x, dst_y, width, height);
-
tmp->op = op;
if (too_large(tmp->dst.width, tmp->dst.height) ||
tmp->dst.bo->pitch > MAX_3D_PITCH) {
@@ -2263,12 +2288,12 @@ gen2_render_composite_spans(struct sna *sna,
width, height, flags, tmp);
}
- if (!gen2_composite_set_target(sna, &tmp->base, dst)) {
+ if (!gen2_composite_set_target(sna, &tmp->base, dst,
+ dst_x, dst_y, width, height)) {
DBG(("%s: unable to set render target\n",
__FUNCTION__));
return false;
}
- sna_render_reduce_damage(&tmp->base, dst_x, dst_y, width, height);
tmp->base.op = op;
if (too_large(tmp->base.dst.width, tmp->base.dst.height) ||
More information about the xorg-commit
mailing list