xf86-video-intel: 3 commits - src/sna/gen4_render.c src/sna/gen5_render.c src/sna/gen6_render.c src/sna/gen7_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_io.c
Chris Wilson
ickle at kemper.freedesktop.org
Sat Jan 12 09:33:34 PST 2013
src/sna/gen4_render.c | 7 +
src/sna/gen5_render.c | 6 +
src/sna/gen6_render.c | 14 ++-
src/sna/gen7_render.c | 14 ++-
src/sna/kgem.c | 44 ++++++---
src/sna/kgem.h | 1
src/sna/sna_accel.c | 225 ++++++++++++++++++++++----------------------------
src/sna/sna_blt.c | 28 +++---
src/sna/sna_io.c | 12 ++
9 files changed, 186 insertions(+), 165 deletions(-)
New commits:
commit ab01fd696e1137ddfb9a85ae68c15c05900f0e8e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Jan 12 09:17:03 2013 +0000
sna: Experiment with a CPU mapping for certain fallbacks
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 8f0dfff..8604488 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -4678,6 +4678,29 @@ void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo)
}
}
+void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write)
+{
+ assert(bo->proxy == NULL);
+ kgem_bo_submit(kgem, bo);
+
+ if (bo->domain != DOMAIN_CPU) {
+ struct drm_i915_gem_set_domain set_domain;
+
+ DBG(("%s: SYNC: needs_flush? %d, domain? %d, busy? %d\n", __FUNCTION__,
+ bo->needs_flush, bo->domain, kgem_busy(kgem, bo->handle)));
+
+ VG_CLEAR(set_domain);
+ set_domain.handle = bo->handle;
+ set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+ set_domain.write_domain = write ? I915_GEM_DOMAIN_CPU : 0;
+
+ if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain) == 0) {
+ kgem_bo_retire(kgem, bo);
+ bo->domain = write ? DOMAIN_CPU : DOMAIN_NONE;
+ }
+ }
+}
+
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo)
{
assert(bo->proxy == NULL);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 92fbaec..d2b89f5 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -454,6 +454,7 @@ void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
+void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b0f66c7..e9ecfaf 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1324,7 +1324,7 @@ static inline bool pixmap_inplace(struct sna *sna,
return false;
if (priv->mapped)
- return true;
+ return !IS_CPU_MAP(priv->gpu_bo->map);
return (pixmap->devKind * pixmap->drawable.height >> 12) >
sna->kgem.half_cpu_cache_pages;
@@ -1530,7 +1530,7 @@ skip_inplace_map:
sna_pixmap_move_to_gpu(pixmap, flags)) {
kgem_bo_submit(&sna->kgem, priv->gpu_bo);
- DBG(("%s: try to operate inplace\n", __FUNCTION__));
+ DBG(("%s: try to operate inplace (GTT)\n", __FUNCTION__));
assert(priv->cpu == false);
pixmap->devPrivate.ptr =
@@ -1550,7 +1550,7 @@ skip_inplace_map:
}
assert_pixmap_damage(pixmap);
- DBG(("%s: operate inplace\n", __FUNCTION__));
+ DBG(("%s: operate inplace (GTT)\n", __FUNCTION__));
return true;
}
@@ -1563,6 +1563,38 @@ skip_inplace_map:
priv->mapped = false;
}
+ if (priv->gpu_bo &&
+ priv->gpu_bo->tiling == I915_TILING_NONE &&
+ sna_pixmap_move_to_gpu(pixmap, flags)) {
+ kgem_bo_submit(&sna->kgem, priv->gpu_bo);
+ sna_pixmap_free_cpu(sna, priv);
+
+ DBG(("%s: try to operate inplace (CPU)\n", __FUNCTION__));
+
+ pixmap->devPrivate.ptr =
+ kgem_bo_map__cpu(&sna->kgem, priv->gpu_bo);
+ if (pixmap->devPrivate.ptr != NULL) {
+ priv->cpu = true;
+ priv->mapped = true;
+ pixmap->devKind = priv->gpu_bo->pitch;
+ if (flags & MOVE_WRITE) {
+ assert(priv->gpu_bo->proxy == NULL);
+ sna_damage_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height);
+ sna_damage_destroy(&priv->cpu_damage);
+ list_del(&priv->list);
+ priv->undamaged = false;
+ priv->clear = false;
+ }
+
+ kgem_bo_sync__cpu_full(&sna->kgem, priv->gpu_bo, flags & MOVE_WRITE);
+ assert_pixmap_damage(pixmap);
+ DBG(("%s: operate inplace (CPU)\n", __FUNCTION__));
+ return true;
+ }
+ }
+
if (priv->clear && priv->cpu_bo && !priv->cpu_bo->flush &&
__kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
assert(!priv->shm);
@@ -1750,7 +1782,7 @@ static inline bool region_inplace(struct sna *sna,
if (priv->mapped) {
DBG(("%s: yes, already mapped, continuiung\n", __FUNCTION__));
- return true;
+ return !IS_CPU_MAP(priv->gpu_bo->map);
}
if (DAMAGE_IS_ALL(priv->gpu_damage)) {
@@ -3459,6 +3491,9 @@ static bool upload_inplace(struct sna *sna,
return false;
}
+ if (priv->mapped && IS_CPU_MAP(priv->gpu_bo->map))
+ return false;
+
if (priv->create & KGEM_CAN_CREATE_LARGE) {
if (priv->gpu_bo) {
DBG(("%s: yes, large buffer and already have GPU bo\n",
@@ -3539,6 +3574,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna *sna = to_sna_from_pixmap(pixmap);
struct sna_pixmap *priv = sna_pixmap(pixmap);
+ unsigned flags;
char *dst_bits;
int dst_stride;
BoxRec *box;
@@ -3598,110 +3634,13 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
return true;
}
- if (priv->gpu_bo && priv->gpu_bo->proxy) {
- DBG(("%s: discarding cached upload buffer\n", __FUNCTION__));
- kgem_bo_destroy(&sna->kgem, priv->gpu_bo);
- priv->gpu_bo = NULL;
- }
-
- if (priv->mapped) {
- assert(!priv->shm);
- pixmap->devPrivate.ptr = NULL;
- priv->mapped = false;
- }
-
- /* If the GPU is currently accessing the CPU pixmap, then
- * we will need to wait for that to finish before we can
- * modify the memory.
- *
- * However, we can queue some writes to the GPU bo to avoid
- * the wait. Or we can try to replace the CPU bo.
- */
- if (!priv->shm && priv->cpu_bo && __kgem_bo_is_busy(&sna->kgem, priv->cpu_bo)) {
- assert(!priv->cpu_bo->flush);
- DBG(("%s: cpu bo will stall, upload damage and discard\n",
- __FUNCTION__));
- if (priv->cpu_damage) {
- if (!region_subsumes_drawable(region, &pixmap->drawable)) {
- sna_damage_subtract(&priv->cpu_damage, region);
- if (!sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_ASYNC_HINT))
- return false;
- } else {
- sna_damage_destroy(&priv->cpu_damage);
- priv->undamaged = false;
- }
- }
- assert(priv->cpu_damage == NULL);
- assert(priv->gpu_bo->proxy == NULL);
- sna_damage_all(&priv->gpu_damage,
- pixmap->drawable.width,
- pixmap->drawable.height);
- sna_pixmap_free_cpu(sna, priv);
- assert(pixmap->devPrivate.ptr == NULL);
- }
-
- if (pixmap->devPrivate.ptr == NULL &&
- !sna_pixmap_alloc_cpu(sna, pixmap, priv, false))
- return true;
-
- if (priv->cpu_bo) {
- DBG(("%s: syncing CPU bo\n", __FUNCTION__));
- kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
- }
-
- if (priv->clear) {
- DBG(("%s: applying clear [%08x]\n",
- __FUNCTION__, priv->clear_color));
-
- if (priv->clear_color == 0) {
- memset(pixmap->devPrivate.ptr,
- 0, pixmap->devKind * pixmap->drawable.height);
- } else {
- pixman_fill(pixmap->devPrivate.ptr,
- pixmap->devKind/sizeof(uint32_t),
- pixmap->drawable.bitsPerPixel,
- 0, 0,
- pixmap->drawable.width,
- pixmap->drawable.height,
- priv->clear_color);
- }
-
- sna_damage_all(&priv->cpu_damage,
- pixmap->drawable.width,
- pixmap->drawable.height);
- sna_pixmap_free_gpu(sna, priv);
- priv->undamaged = false;
- }
+ flags = MOVE_WRITE;
+ flags |= MOVE_INPLACE_HINT;
+ if (w == drawable->width)
+ flags |= MOVE_WHOLE_HINT;
- if (!DAMAGE_IS_ALL(priv->cpu_damage)) {
- DBG(("%s: marking damage\n", __FUNCTION__));
- if (region_subsumes_drawable(region, &pixmap->drawable)) {
- DBG(("%s: replacing entire pixmap\n", __FUNCTION__));
- sna_damage_all(&priv->cpu_damage,
- pixmap->drawable.width,
- pixmap->drawable.height);
- sna_pixmap_free_gpu(sna, priv);
- priv->undamaged = false;
- assert(priv->gpu_damage == NULL);
- } else {
- sna_damage_subtract(&priv->gpu_damage, region);
- sna_damage_add(&priv->cpu_damage, region);
- if (priv->gpu_bo &&
- sna_damage_is_all(&priv->cpu_damage,
- pixmap->drawable.width,
- pixmap->drawable.height)) {
- DBG(("%s: replaced entire pixmap\n", __FUNCTION__));
- sna_pixmap_free_gpu(sna, priv);
- priv->undamaged = false;
- }
- }
- if (priv->flush) {
- assert(!priv->shm);
- sna_add_flush_pixmap(sna, priv, priv->gpu_bo);
- }
- }
- assert(!priv->flush || !list_is_empty(&priv->list));
- priv->cpu = true;
+ if (!sna_drawable_move_region_to_cpu(drawable, region, flags))
+ return false;
blt:
get_drawable_deltas(drawable, pixmap, &dx, &dy);
commit 03d392cd1d87e17129c42e4d822d3d1749edb02e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Jan 12 08:51:52 2013 +0000
sna: Tweak max object sizes to take account of aperture restrictions
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 4a28de0..8f0dfff 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1102,18 +1102,10 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
DBG(("%s: buffer size=%d [%d KiB]\n", __FUNCTION__,
kgem->buffer_size, kgem->buffer_size / 1024));
- kgem->max_object_size = 2 * aperture.aper_size / 3;
+ kgem->max_object_size = 2 * kgem->aperture_high / 3;
kgem->max_gpu_size = kgem->max_object_size;
if (!kgem->has_llc)
kgem->max_gpu_size = MAX_CACHE_SIZE;
- if (gen < 040) {
- /* If we have to use fences for blitting, we have to make
- * sure we can fit them into the aperture.
- */
- kgem->max_gpu_size = kgem->aperture_mappable / 2;
- if (kgem->max_gpu_size > kgem->aperture_low)
- kgem->max_gpu_size = kgem->aperture_low;
- }
totalram = total_ram_size();
if (totalram == 0) {
@@ -1128,10 +1120,7 @@ void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen)
kgem->max_gpu_size = totalram / 4;
half_gpu_max = kgem->max_gpu_size / 2;
- if (gen >= 040)
- kgem->max_cpu_size = half_gpu_max;
- else
- kgem->max_cpu_size = kgem->max_object_size;
+ kgem->max_cpu_size = half_gpu_max;
kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2;
if (kgem->max_copy_tile_size > half_gpu_max)
commit d111c464bfbae57bb7141872810c88b88f30c087
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Jan 12 08:15:13 2013 +0000
sna: After a size check, double check the batch before flushing
As we may fail the size check with an empty batch and a pair of large
bo, we need to check before submitting that batch in order to not run
afoul of our internal sanity checks.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen4_render.c b/src/sna/gen4_render.c
index c3e452d..6b3f864 100644
--- a/src/sna/gen4_render.c
+++ b/src/sna/gen4_render.c
@@ -2704,8 +2704,11 @@ gen4_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
tmp.u.gen4.sf = 0;
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo(&sna->kgem, bo, NULL));
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
}
gen4_bind_surfaces(sna, &tmp);
diff --git a/src/sna/gen5_render.c b/src/sna/gen5_render.c
index c86cc24..eec2f83 100644
--- a/src/sna/gen5_render.c
+++ b/src/sna/gen5_render.c
@@ -2795,7 +2795,11 @@ gen5_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
tmp.u.gen5.ve_id = 1;
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
assert(kgem_check_bo(&sna->kgem, bo, NULL));
}
diff --git a/src/sna/gen6_render.c b/src/sna/gen6_render.c
index a4b5746..8b1ae3c 100644
--- a/src/sna/gen6_render.c
+++ b/src/sna/gen6_render.c
@@ -3236,8 +3236,11 @@ gen6_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo(&sna->kgem, bo, NULL));
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
}
gen6_emit_fill_state(sna, &tmp);
@@ -3319,8 +3322,11 @@ gen6_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
assert(GEN6_VERTEX(tmp.u.gen6.flags) == FILL_VERTEX);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo(&sna->kgem, bo, NULL));
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
}
gen6_emit_fill_state(sna, &tmp);
diff --git a/src/sna/gen7_render.c b/src/sna/gen7_render.c
index badce91..cd36c95 100644
--- a/src/sna/gen7_render.c
+++ b/src/sna/gen7_render.c
@@ -3318,8 +3318,11 @@ gen7_render_fill_one(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo,
kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo(&sna->kgem, bo, NULL));
+ kgem_submit(&sna->kgem);
+ if (kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
}
gen7_emit_fill_state(sna, &tmp);
@@ -3399,8 +3402,11 @@ gen7_render_clear(struct sna *sna, PixmapPtr dst, struct kgem_bo *bo)
kgem_set_mode(&sna->kgem, KGEM_RENDER, bo);
if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo(&sna->kgem, bo, NULL));
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo(&sna->kgem, bo, NULL)) {
+ kgem_bo_destroy(&sna->kgem, tmp.src.bo);
+ return false;
+ }
}
gen7_emit_fill_state(sna, &tmp);
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 26581ad..4a28de0 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -5551,7 +5551,11 @@ kgem_replace_bo(struct kgem *kgem,
if (!kgem_check_batch(kgem, 8) ||
!kgem_check_reloc(kgem, 2) ||
!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
+ if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
+ kgem_bo_destroy(kgem, dst);
+ return NULL;
+ }
_kgem_set_mode(kgem, KGEM_BLT);
}
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index b828cbf..b0f66c7 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -3824,7 +3824,9 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -3952,7 +3954,9 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
if (!kgem_check_batch(&sna->kgem, 12) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -6187,7 +6191,9 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
!kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, arg->bo))
+ return; /* XXX fallback? */
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -6229,7 +6235,9 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, arg->bo))
+ return; /* XXX fallback? */
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -6348,7 +6356,9 @@ sna_copy_plane_blt(DrawablePtr source, DrawablePtr drawable, GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, arg->bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, arg->bo))
+ return; /* XXX fallback? */
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -9923,7 +9933,9 @@ sna_poly_fill_rect_tiled_8x8_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 8+2*3) ||
!kgem_check_reloc(&sna->kgem, 2) ||
!kgem_check_bo_fenced(&sna->kgem, bo)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -10557,7 +10569,9 @@ sna_poly_fill_rect_stippled_8x8_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 9 + 2*3) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -10867,7 +10881,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -10909,7 +10925,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11009,7 +11027,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11048,7 +11068,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11149,7 +11171,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11188,7 +11212,9 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11298,7 +11324,9 @@ sna_poly_fill_rect_stippled_n_box__imm(struct sna *sna,
if (!kgem_check_batch(&sna->kgem, 7+len) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return; /* XXX fallback? */
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -11402,7 +11430,9 @@ sna_poly_fill_rect_stippled_n_box(struct sna *sna,
if (!kgem_check_batch(&sna->kgem, 7+len) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return; /* XXX fallback? */
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -12315,7 +12345,9 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 16) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -12960,7 +12992,9 @@ sna_reversed_glyph_blt(DrawablePtr drawable, GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 16) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc(&sna->kgem, 1)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -13356,7 +13390,9 @@ sna_push_pixels_solid_blt(GCPtr gc,
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, bo) ||
!kgem_check_reloc_and_exec(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index fb560d5..5602579 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -148,8 +148,9 @@ static bool sna_blt_fill_init(struct sna *sna,
kgem_set_mode(kgem, KGEM_BLT, bo);
if (!kgem_check_batch(kgem, 12) ||
!kgem_check_bo_fenced(kgem, bo)) {
- _kgem_submit(kgem);
- assert(kgem_check_bo_fenced(kgem, bo));
+ kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(kgem, bo))
+ return false;
_kgem_set_mode(kgem, KGEM_BLT);
}
@@ -291,7 +292,7 @@ static bool sna_blt_copy_init(struct sna *sna,
kgem_set_mode(kgem, KGEM_BLT, dst);
if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
return false;
_kgem_set_mode(kgem, KGEM_BLT);
@@ -343,7 +344,7 @@ static bool sna_blt_alpha_fixup_init(struct sna *sna,
kgem_set_mode(kgem, KGEM_BLT, dst);
if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
if (!kgem_check_many_bo_fenced(kgem, src, dst, NULL))
return false;
_kgem_set_mode(kgem, KGEM_BLT);
@@ -980,8 +981,10 @@ begin_blt(struct sna *sna,
struct sna_composite_op *op)
{
if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo)) {
- _kgem_submit(&sna->kgem);
- assert(kgem_check_bo_fenced(&sna->kgem, op->dst.bo));
+ kgem_submit(&sna->kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, op->dst.bo))
+ return false;
+
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
@@ -1245,7 +1248,7 @@ prepare_blt_copy(struct sna *sna,
}
if (!kgem_check_many_bo_fenced(&sna->kgem, op->dst.bo, bo, NULL)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
if (!kgem_check_many_bo_fenced(&sna->kgem,
op->dst.bo, bo, NULL)) {
DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
@@ -2041,7 +2044,7 @@ sna_blt_composite__convert(struct sna *sna,
}
if (!kgem_check_many_bo_fenced(&sna->kgem, tmp->dst.bo, tmp->src.bo, NULL)) {
- _kgem_submit(&sna->kgem);
+ kgem_submit(&sna->kgem);
if (!kgem_check_many_bo_fenced(&sna->kgem,
tmp->dst.bo, tmp->src.bo, NULL)) {
DBG(("%s: fallback -- no room in aperture\n", __FUNCTION__));
@@ -2267,7 +2270,7 @@ static bool sna_blt_fill_box(struct sna *sna, uint8_t alu,
if (!kgem_check_batch(kgem, 6) ||
!kgem_check_reloc(kgem, 1) ||
!kgem_check_bo_fenced(kgem, bo)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
assert(kgem_check_bo_fenced(&sna->kgem, bo));
_kgem_set_mode(kgem, KGEM_BLT);
}
@@ -2342,8 +2345,9 @@ bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu,
kgem_set_mode(kgem, KGEM_BLT, bo);
if (!kgem_check_batch(kgem, 12) ||
!kgem_check_bo_fenced(kgem, bo)) {
- _kgem_submit(kgem);
- assert(kgem_check_bo_fenced(&sna->kgem, bo));
+ kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(&sna->kgem, bo))
+ return false;
_kgem_set_mode(kgem, KGEM_BLT);
}
@@ -2516,7 +2520,7 @@ bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu,
if (!kgem_check_batch(kgem, 8) ||
!kgem_check_reloc(kgem, 2) ||
!kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
if (!kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL))
return sna_tiling_blt_copy_boxes(sna, alu,
src_bo, src_dx, src_dy,
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 8b4c25e..368d880 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -382,7 +382,9 @@ fallback:
if (!kgem_check_batch(kgem, 8) ||
!kgem_check_reloc_and_exec(kgem, 2) ||
!kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
+ if (!kgem_check_many_bo_fenced(kgem, dst_bo, src_bo, NULL))
+ goto fallback;
_kgem_set_mode(kgem, KGEM_BLT);
}
@@ -837,7 +839,9 @@ tile:
if (!kgem_check_batch(kgem, 8) ||
!kgem_check_reloc_and_exec(kgem, 2) ||
!kgem_check_bo_fenced(kgem, dst_bo)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(kgem, dst_bo))
+ goto fallback;
_kgem_set_mode(kgem, KGEM_BLT);
}
@@ -1206,7 +1210,9 @@ tile:
if (!kgem_check_batch(kgem, 8) ||
!kgem_check_reloc_and_exec(kgem, 2) ||
!kgem_check_bo_fenced(kgem, dst_bo)) {
- _kgem_submit(kgem);
+ kgem_submit(kgem);
+ if (!kgem_check_bo_fenced(kgem, dst_bo))
+ goto fallback;
_kgem_set_mode(kgem, KGEM_BLT);
}
More information about the xorg-commit
mailing list