xf86-video-intel: 10 commits - src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c
Chris Wilson
ickle at kemper.freedesktop.org
Thu Nov 3 06:16:30 PDT 2011
src/sna/kgem.c | 4
src/sna/kgem.h | 5
src/sna/sna_accel.c | 1413 ++++++++++++++++++++++++++++++++--------------------
3 files changed, 897 insertions(+), 525 deletions(-)
New commits:
commit bc032c9be1bab7477fbf4b2a64fb7aca6d75ac18
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 11:47:18 2011 +0000
sna: Coalesce reduction of cpu damage
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 4812c2c..ede994f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -733,34 +733,6 @@ done:
list_del(&priv->list);
}
-static struct sna_damage **
-reduce_damage(DrawablePtr drawable,
- struct sna_damage **damage,
- const BoxRec *box)
-{
- PixmapPtr pixmap = get_drawable_pixmap(drawable);
- int16_t dx, dy;
- BoxRec r;
-
- if (*damage == NULL)
- return damage;
-
- if (sna_damage_is_all(damage,
- pixmap->drawable.width,
- pixmap->drawable.height))
- return NULL;
-
- get_drawable_deltas(drawable, pixmap, &dx, &dy);
-
- r = *box;
- r.x1 += dx; r.x2 += dx;
- r.y1 += dy; r.y2 += dy;
- if (sna_damage_contains_box(*damage, &r) == PIXMAN_REGION_IN)
- return NULL;
- else
- return damage;
-}
-
static inline Bool
_sna_drawable_use_gpu_bo(DrawablePtr drawable,
const BoxRec *box,
@@ -828,7 +800,9 @@ sna_drawable_use_gpu_bo(DrawablePtr drawable,
}
static inline Bool
-_sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
+_sna_drawable_use_cpu_bo(DrawablePtr drawable,
+ const BoxRec *box,
+ struct sna_damage ***damage)
{
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
@@ -840,9 +814,6 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
if (priv->cpu_bo == NULL)
return FALSE;
- if (priv->gpu_damage == NULL)
- return TRUE;
-
get_drawable_deltas(drawable, pixmap, &dx, &dy);
extents = *box;
@@ -851,14 +822,34 @@ _sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
extents.y1 += dy;
extents.y2 += dy;
- return sna_damage_contains_box(priv->gpu_damage,
- &extents) == PIXMAN_REGION_OUT;
+ if (priv->gpu_damage == NULL)
+ goto done;
+
+ if (sna_damage_contains_box(priv->gpu_damage,
+ &extents) != PIXMAN_REGION_OUT)
+ return FALSE;
+
+done:
+ if (damage) {
+ if (!sna_damage_is_all(&priv->cpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height) &&
+ sna_damage_contains_box(priv->cpu_damage,
+ &extents) != PIXMAN_REGION_IN)
+ *damage = &priv->cpu_damage;
+ else
+ *damage = NULL;
+ }
+
+ return TRUE;
}
static inline Bool
-sna_drawable_use_cpu_bo(DrawablePtr drawable, const BoxRec *box)
+sna_drawable_use_cpu_bo(DrawablePtr drawable,
+ const BoxRec *box,
+ struct sna_damage ***damage)
{
- Bool ret = _sna_drawable_use_cpu_bo(drawable, box);
+ Bool ret = _sna_drawable_use_cpu_bo(drawable, box, damage);
DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
box->x1, box->y1, box->x2, box->y2, ret));
return ret;
@@ -2671,10 +2662,9 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
®ion.extents, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(drawable, ®ion.extents, &damage) &&
sna_fill_spans_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, n, pt, width, sorted,
®ion.extents, flags & 2))
return;
@@ -3334,10 +3324,9 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
gc, mode, n, pt, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(drawable, ®ion.extents, &damage) &&
sna_poly_point_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, mode, n, pt, flags & 2))
return;
}
@@ -4067,10 +4056,11 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
®ion.extents, flags & 4))
return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
sna_poly_line_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, mode, n, pt,
®ion.extents, flags & 4))
return;
@@ -4839,10 +4829,11 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
®ion.extents, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
sna_poly_segment_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, n, seg,
®ion.extents, flags & 2))
return;
@@ -5414,9 +5405,8 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
gc, n, r, ®ion.extents, flags&2))
return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
- sna_poly_rectangle_blt(drawable, priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ if (sna_drawable_use_cpu_bo(drawable, ®ion.extents, &damage) &&
+ sna_poly_rectangle_blt(drawable, priv->cpu_bo, damage,
gc, n, r, ®ion.extents, flags&2))
return;
}
@@ -6708,10 +6698,9 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
®ion.extents, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_blt(draw,
- priv->cpu_bo,
- reduce_damage(draw, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, color, n, rect,
®ion.extents, flags & 2))
return;
@@ -6728,10 +6717,9 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
®ion.extents, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_tiled(draw,
- priv->cpu_bo,
- reduce_damage(draw, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, n, rect,
®ion.extents, flags & 2))
return;
@@ -6748,10 +6736,9 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
®ion.extents, flags & 2))
return;
- if (sna_drawable_use_cpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_cpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_stippled_blt(draw,
- priv->cpu_bo,
- reduce_damage(draw, &priv->cpu_damage, ®ion.extents),
+ priv->cpu_bo, damage,
gc, n, rect,
®ion.extents, flags & 2))
return;
commit c92671b33ef30a8eb723eb366692ae3a5878b8f9
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 11:47:18 2011 +0000
sna: Coalesce reduction of gpu damage
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9935d5d..4812c2c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -733,8 +733,38 @@ done:
list_del(&priv->list);
}
+static struct sna_damage **
+reduce_damage(DrawablePtr drawable,
+ struct sna_damage **damage,
+ const BoxRec *box)
+{
+ PixmapPtr pixmap = get_drawable_pixmap(drawable);
+ int16_t dx, dy;
+ BoxRec r;
+
+ if (*damage == NULL)
+ return damage;
+
+ if (sna_damage_is_all(damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height))
+ return NULL;
+
+ get_drawable_deltas(drawable, pixmap, &dx, &dy);
+
+ r = *box;
+ r.x1 += dx; r.x2 += dx;
+ r.y1 += dy; r.y2 += dy;
+ if (sna_damage_contains_box(*damage, &r) == PIXMAN_REGION_IN)
+ return NULL;
+ else
+ return damage;
+}
+
static inline Bool
-_sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
+_sna_drawable_use_gpu_bo(DrawablePtr drawable,
+ const BoxRec *box,
+ struct sna_damage ***damage)
{
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
@@ -746,10 +776,6 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
if (priv->gpu_bo == NULL)
return FALSE;
- if (priv->cpu_damage == NULL)
- return TRUE;
-
- assert(!priv->gpu_only);
get_drawable_deltas(drawable, pixmap, &dx, &dy);
extents = *box;
@@ -758,9 +784,14 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
extents.y1 += dy;
extents.y2 += dy;
+ if (priv->cpu_damage == NULL)
+ goto done;
+
+ assert(!priv->gpu_only);
+
if (sna_damage_contains_box(priv->cpu_damage,
&extents) == PIXMAN_REGION_OUT)
- return TRUE;
+ goto done;
if (!priv->gpu || priv->gpu_damage == NULL)
return FALSE;
@@ -770,13 +801,27 @@ _sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
return FALSE;
sna_pixmap_move_area_to_gpu(pixmap, &extents);
+done:
+ if (damage) {
+ if (!sna_damage_is_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height) &&
+ sna_damage_contains_box(priv->gpu_damage,
+ &extents) != PIXMAN_REGION_IN)
+ *damage = &priv->gpu_damage;
+ else
+ *damage = NULL;
+ }
+
return TRUE;
}
static inline Bool
-sna_drawable_use_gpu_bo(DrawablePtr drawable, const BoxRec *box)
+sna_drawable_use_gpu_bo(DrawablePtr drawable,
+ const BoxRec *box,
+ struct sna_damage ***damage)
{
- Bool ret = _sna_drawable_use_gpu_bo(drawable, box);
+ Bool ret = _sna_drawable_use_gpu_bo(drawable, box, damage);
DBG(("%s((%d, %d), (%d, %d)) = %d\n", __FUNCTION__,
box->x1, box->y1, box->x2, box->y2, ret));
return ret;
@@ -1366,19 +1411,22 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
struct sna *sna = to_sna_from_drawable(drawable);
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
+ struct sna_damage **damage;
BoxRec *box;
int16_t dx, dy;
int n;
uint8_t rop = copy_ROP[gc->alu];
- if (!sna_drawable_use_gpu_bo(&pixmap->drawable, ®ion->extents))
+ if (!sna_drawable_use_gpu_bo(&pixmap->drawable,
+ ®ion->extents,
+ &damage))
return false;
if (priv->gpu_bo->tiling == I915_TILING_Y)
return false;
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -1478,6 +1526,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
struct sna *sna = to_sna_from_drawable(drawable);
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
+ struct sna_damage **damage;
struct kgem_bo *bo = priv->gpu_bo;
int16_t dx, dy;
unsigned i, skip;
@@ -1485,14 +1534,16 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
if (gc->alu != GXcopy)
return false;
- if (!sna_drawable_use_gpu_bo(&pixmap->drawable, ®ion->extents))
+ if (!sna_drawable_use_gpu_bo(&pixmap->drawable,
+ ®ion->extents,
+ &damage))
return false;
if (bo->tiling == I915_TILING_Y)
return false;
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -2553,34 +2604,6 @@ sna_spans_extents(DrawablePtr drawable, GCPtr gc,
return 1 | clipped << 1;
}
-static struct sna_damage **
-reduce_damage(DrawablePtr drawable,
- struct sna_damage **damage,
- const BoxRec *box)
-{
- PixmapPtr pixmap = get_drawable_pixmap(drawable);
- int16_t dx, dy;
- BoxRec r;
-
- if (*damage == NULL)
- return damage;
-
- if (sna_damage_is_all(damage,
- pixmap->drawable.width,
- pixmap->drawable.height))
- return NULL;
-
- get_drawable_deltas(drawable, pixmap, &dx, &dy);
-
- r = *box;
- r.x1 += dx; r.x2 += dx;
- r.y1 += dy; r.y2 += dy;
- if (sna_damage_contains_box(*damage, &r) == PIXMAN_REGION_IN)
- return NULL;
- else
- return damage;
-}
-
static Bool
sna_poly_fill_rect_tiled(DrawablePtr drawable,
struct kgem_bo *bo,
@@ -2636,14 +2659,14 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
if (gc->fillStyle == FillSolid) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
DBG(("%s: trying solid fill [alu=%d, pixel=%08lx] blt paths\n",
__FUNCTION__, gc->alu, gc->fgPixel));
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage) &&
sna_fill_spans_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, n, pt, width, sorted,
®ion.extents, flags & 2))
return;
@@ -2656,10 +2679,11 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
®ion.extents, flags & 2))
return;
} else if (gc->fillStyle == FillTiled) {
- /* Try converting these to a set of rectangles instead */
+ struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents)) {
- struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ /* Try converting these to a set of rectangles instead */
+ if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage)) {
xRectangle *rect;
int i;
@@ -2677,8 +2701,7 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
}
i = sna_poly_fill_rect_tiled(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, n, rect,
®ion.extents, flags & 2);
free (rect);
@@ -3105,6 +3128,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
unsigned long bit)
{
RegionRec region;
+ struct sna_damage **damage;
DBG(("%s: src=(%d, %d), dst=(%d, %d), size=%dx%d\n", __FUNCTION__,
src_x, src_y, dst_x, dst_y, w, h));
@@ -3124,7 +3148,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
if (!RegionNotEmpty(®ion))
return NULL;
- if (sna_drawable_use_gpu_bo(dst, ®ion.extents)) {
+ if (sna_drawable_use_gpu_bo(dst, ®ion.extents, &damage)) {
struct sna_pixmap *priv = sna_pixmap(get_drawable_pixmap(dst));
if (priv->gpu_bo->tiling != I915_TILING_Y) {
RegionUninit(®ion);
@@ -3133,8 +3157,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
w, h,
dst_x, dst_y,
src->depth == 1 ? sna_copy_bitmap_blt :sna_copy_plane_blt,
- (Pixel)bit,
- reduce_damage(dst, &priv->gpu_damage, ®ion.extents));
+ (Pixel)bit, damage);
}
}
@@ -3300,14 +3323,14 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
if (gc->fillStyle == FillSolid &&
PM_IS_SOLID(drawable, gc->planemask)) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
DBG(("%s: trying solid fill [%08lx] blt paths\n",
__FUNCTION__, gc->fgPixel));
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
+ if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage) &&
sna_poly_point_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, mode, n, pt, flags & 2))
return;
@@ -4029,41 +4052,45 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
gc->lineWidth <= 1 &&
PM_IS_SOLID(drawable, gc->planemask)) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
DBG(("%s: trying solid fill [%08lx]\n",
__FUNCTION__, gc->fgPixel));
- if (flags & 2) {
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_poly_line_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
- gc, mode, n, pt,
- ®ion.extents, flags & 4))
- return;
+ if (flags & 2) {
+ if (sna_drawable_use_gpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
+ sna_poly_line_blt(drawable,
+ priv->gpu_bo, damage,
+ gc, mode, n, pt,
+ ®ion.extents, flags & 4))
+ return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
- sna_poly_line_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
- gc, mode, n, pt,
- ®ion.extents, flags & 4))
- return;
- } else { /* !rectilinear */
- if (USE_ZERO_SPANS &&
- sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_poly_zero_line_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
- gc, mode, n, pt,
- ®ion.extents, flags & 4))
- return;
+ if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ sna_poly_line_blt(drawable,
+ priv->cpu_bo,
+ reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ gc, mode, n, pt,
+ ®ion.extents, flags & 4))
+ return;
+ } else { /* !rectilinear */
+ if (USE_ZERO_SPANS &&
+ sna_drawable_use_gpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
+ sna_poly_zero_line_blt(drawable,
+ priv->gpu_bo, damage,
+ gc, mode, n, pt,
+ ®ion.extents, flags & 4))
+ return;
- }
+ }
}
- if (USE_SPANS && can_fill_spans(drawable, gc) &&
- sna_drawable_use_gpu_bo(drawable, ®ion.extents)) {
+ if (USE_SPANS &&
+ can_fill_spans(drawable, gc) &&
+ sna_drawable_use_gpu_bo(drawable, ®ion.extents, NULL)) {
DBG(("%s: converting line into spans\n", __FUNCTION__));
switch (gc->lineStyle) {
case LineSolid:
@@ -4797,40 +4824,44 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
gc->lineWidth <= 1 &&
PM_IS_SOLID(drawable, gc->planemask)) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
DBG(("%s: trying blt solid fill [%08lx] paths\n",
__FUNCTION__, gc->fgPixel));
- if (flags & 4) {
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_poly_segment_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
- gc, n, seg,
- ®ion.extents, flags & 2))
- return;
+ if (flags & 4) {
+ if (sna_drawable_use_gpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
+ sna_poly_segment_blt(drawable,
+ priv->gpu_bo, damage,
+ gc, n, seg,
+ ®ion.extents, flags & 2))
+ return;
- if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
- sna_poly_segment_blt(drawable,
- priv->cpu_bo,
- reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
- gc, n, seg,
- ®ion.extents, flags & 2))
- return;
- } else {
- if (USE_ZERO_SPANS &&
- sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_poly_zero_segment_blt(drawable,
- priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
- gc, n, seg, ®ion.extents, flags & 2))
- return;
- }
+ if (sna_drawable_use_cpu_bo(drawable, ®ion.extents) &&
+ sna_poly_segment_blt(drawable,
+ priv->cpu_bo,
+ reduce_damage(drawable, &priv->cpu_damage, ®ion.extents),
+ gc, n, seg,
+ ®ion.extents, flags & 2))
+ return;
+ } else {
+ if (USE_ZERO_SPANS &&
+ sna_drawable_use_gpu_bo(drawable,
+ ®ion.extents,
+ &damage) &&
+ sna_poly_zero_segment_blt(drawable,
+ priv->gpu_bo, damage,
+ gc, n, seg, ®ion.extents, flags & 2))
+ return;
+ }
}
/* XXX Do we really want to base this decision on the amalgam ? */
- if (USE_SPANS && can_fill_spans(drawable, gc) &&
- sna_drawable_use_gpu_bo(drawable, ®ion.extents)) {
+ if (USE_SPANS &&
+ can_fill_spans(drawable, gc) &&
+ sna_drawable_use_gpu_bo(drawable, ®ion.extents, NULL)) {
void (*line)(DrawablePtr, GCPtr, int, int, DDXPointPtr);
int i;
@@ -5373,13 +5404,13 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
gc->joinStyle == JoinMiter &&
PM_IS_SOLID(drawable, gc->planemask)) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(drawable);
+ struct sna_damage **damage;
DBG(("%s: trying blt solid fill [%08lx] paths\n",
__FUNCTION__, gc->fgPixel));
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_poly_rectangle_blt(drawable, priv->gpu_bo,
- reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, &damage) &&
+ sna_poly_rectangle_blt(drawable, priv->gpu_bo, damage,
gc, n, r, ®ion.extents, flags&2))
return;
@@ -5393,7 +5424,7 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
/* Not a trivial outline, but we still maybe able to break it
* down into simpler operations that we can accelerate.
*/
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents)) {
+ if (sna_drawable_use_gpu_bo(drawable, ®ion.extents, NULL)) {
miPolyRectangle(drawable, gc, n, r);
return;
}
@@ -5500,7 +5531,7 @@ sna_poly_arc(DrawablePtr drawable, GCPtr gc, int n, xArc *arc)
/* For "simple" cases use the miPolyArc to spans path */
if (USE_SPANS && arc_to_spans(gc, n) && can_fill_spans(drawable, gc) &&
- sna_drawable_use_gpu_bo(drawable, ®ion.extents)) {
+ sna_drawable_use_gpu_bo(drawable, ®ion.extents, NULL)) {
DBG(("%s: converting arcs into spans\n", __FUNCTION__));
/* XXX still around 10x slower for x11perf -ellipse */
if (gc->lineWidth == 0)
@@ -6664,15 +6695,15 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
(gc->fillStyle == FillTiled && gc->tileIsPixel) ||
(gc->fillStyle == FillOpaqueStippled && gc->bgPixel == gc->fgPixel)) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(draw);
+ struct sna_damage **damage;
uint32_t color = gc->fillStyle == FillTiled ? gc->tile.pixel : gc->fgPixel;
DBG(("%s: solid fill [%08x], testing for blt\n",
__FUNCTION__, color));
- if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_gpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_blt(draw,
- priv->gpu_bo,
- reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, color, n, rect,
®ion.extents, flags & 2))
return;
@@ -6686,13 +6717,13 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
return;
} else if (gc->fillStyle == FillTiled) {
struct sna_pixmap *priv = sna_pixmap_from_drawable(draw);
+ struct sna_damage **damage;
DBG(("%s: tiled fill, testing for blt\n", __FUNCTION__));
- if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_gpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_tiled(draw,
- priv->gpu_bo,
- reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, n, rect,
®ion.extents, flags & 2))
return;
@@ -6706,13 +6737,13 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
return;
} else {
struct sna_pixmap *priv = sna_pixmap_from_drawable(draw);
+ struct sna_damage **damage;
DBG(("%s: stippled fill, testing for blt\n", __FUNCTION__));
- if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
+ if (sna_drawable_use_gpu_bo(draw, ®ion.extents, &damage) &&
sna_poly_fill_rect_stippled_blt(draw,
- priv->gpu_bo,
- reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ priv->gpu_bo, damage,
gc, n, rect,
®ion.extents, flags & 2))
return;
@@ -6801,7 +6832,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
- if (!sna_drawable_use_gpu_bo(drawable, &clip->extents))
+ if (!sna_drawable_use_gpu_bo(drawable, &clip->extents, &damage))
return false;
bo = priv->gpu_bo;
@@ -6810,8 +6841,6 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
return false;
}
- damage = reduce_damage(drawable, &priv->gpu_damage, &clip->extents),
-
get_drawable_deltas(drawable, pixmap, &dx, &dy);
_x += drawable->x + dx;
_y += drawable->y + dy;
@@ -7318,6 +7347,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
struct sna *sna = to_sna_from_drawable(drawable);
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
+ struct sna_damage **damage;
BoxRec *box;
int16_t dx, dy;
int n;
@@ -7326,14 +7356,14 @@ sna_push_pixels_solid_blt(GCPtr gc,
if (priv->gpu_bo->tiling == I915_TILING_Y)
return false;
- if (!sna_drawable_use_gpu_bo(drawable, ®ion->extents))
+ if (!sna_drawable_use_gpu_bo(drawable, ®ion->extents, &damage))
return false;
get_drawable_deltas(drawable, pixmap, &dx, &dy);
RegionTranslate(region, dx, dy);
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__,
region->extents.x1, region->extents.y1,
commit 3b4a508eb022e9ade5929d3ebc97e0f9216a80b1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 00:32:29 2011 +0000
sna: Translate glyphs into MSBFirst upon initial load
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9126da9..9935d5d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -65,6 +65,7 @@
DevPrivateKeyRec sna_pixmap_index;
DevPrivateKeyRec sna_gc_index;
DevPrivateKey sna_window_key;
+static int sna_font_key;
static const uint8_t copy_ROP[] = {
ROP_0, /* GXclear */
@@ -6744,64 +6745,100 @@ fallback:
fbPolyFillRect(draw, gc, n, rect);
}
+struct sna_font {
+ CharInfoRec glyphs8[256];
+ CharInfoRec *glyphs16[256];
+};
+
+static Bool
+sna_realize_font(ScreenPtr screen, FontPtr font)
+{
+ struct sna_font *priv;
+
+ priv = calloc(1, sizeof(struct sna_font));
+ if (priv == NULL)
+ return FALSE;
+
+ if (!FontSetPrivate(font, sna_font_key, priv)) {
+ free(priv);
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+static Bool
+sna_unrealize_font(ScreenPtr screen, FontPtr font)
+{
+ struct sna_font *priv = FontGetPrivate(font, sna_font_key);
+ int n;
+
+ if (priv) {
+ for (n = 0; n < 256; n++)
+ free(priv->glyphs16[n]);
+ free(priv);
+ }
+
+ return TRUE;
+}
+
static bool
sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
int _x, int _y, unsigned int _n,
- CharInfoPtr *_info, pointer _base,
- bool transparent,
- const BoxRec *extents)
+ CharInfoPtr *_info,
+ RegionRec *clip,
+ bool transparent)
{
struct sna *sna = to_sna_from_drawable(drawable);
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
+ struct kgem_bo *bo;
struct sna_damage **damage;
- const BoxRec *last_extents;
+ const BoxRec *extents, *last_extents;
uint32_t *b;
int16_t dx, dy;
+ uint32_t br00;
uint8_t rop = transparent ? copy_ROP[gc->alu] : ROP_S;
- RegionRec clip;
- if (priv->gpu_bo->tiling == I915_TILING_Y) {
+ if (!sna_drawable_use_gpu_bo(drawable, &clip->extents))
+ return false;
+
+ bo = priv->gpu_bo;
+ if (bo->tiling == I915_TILING_Y) {
DBG(("%s -- fallback, dst uses Y-tiling\n", __FUNCTION__));
return false;
}
- region_set(&clip, extents);
- region_maybe_clip(&clip, gc->pCompositeClip);
- if (!RegionNotEmpty(&clip))
- return true;
-
- damage = reduce_damage(drawable, &priv->gpu_damage, extents),
+ damage = reduce_damage(drawable, &priv->gpu_damage, &clip->extents),
get_drawable_deltas(drawable, pixmap, &dx, &dy);
_x += drawable->x + dx;
_y += drawable->y + dy;
- RegionTranslate(&clip, dx, dy);
- extents = REGION_RECTS(&clip);
- last_extents = extents + REGION_NUM_RECTS(&clip);
+ RegionTranslate(clip, dx, dy);
+ extents = REGION_RECTS(clip);
+ last_extents = extents + REGION_NUM_RECTS(clip);
kgem_set_mode(&sna->kgem, KGEM_BLT);
if (!kgem_check_batch(&sna->kgem, 16) ||
- !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_bo_fenced(&sna->kgem, bo, NULL) ||
!kgem_check_reloc(&sna->kgem, 1)) {
_kgem_submit(&sna->kgem);
_kgem_set_mode(&sna->kgem, KGEM_BLT);
}
b = sna->kgem.batch + sna->kgem.nbatch;
b[0] = XY_SETUP_BLT | 1 << 20;
- b[1] = priv->gpu_bo->pitch;
+ b[1] = bo->pitch;
if (sna->kgem.gen >= 40) {
- if (priv->gpu_bo->tiling)
+ if (bo->tiling)
b[0] |= BLT_DST_TILED;
b[1] >>= 2;
}
b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
b[2] = extents->y1 << 16 | extents->x1;
b[3] = extents->y2 << 16 | extents->x2;
- b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
- priv->gpu_bo,
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
I915_GEM_DOMAIN_RENDER << 16 |
I915_GEM_DOMAIN_RENDER |
KGEM_RELOC_FENCED,
@@ -6811,6 +6848,10 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
b[7] = 0;
sna->kgem.nbatch += 8;
+ br00 = XY_TEXT_IMMEDIATE_BLT;
+ if (bo->tiling && sna->kgem.gen >= 40)
+ br00 |= BLT_DST_TILED;
+
do {
CharInfoPtr *info = _info;
int x = _x, y = _y, n = _n;
@@ -6820,10 +6861,8 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
uint8_t *glyph = FONTGLYPHBITS(base, c);
int w = GLYPHWIDTHPIXELS(c);
int h = GLYPHHEIGHTPIXELS(c);
- int stride = GLYPHWIDTHBYTESPADDED(c);
int w8 = (w + 7) >> 3;
- int x1, y1, len, i;
- uint8_t *byte;
+ int x1, y1, len;
if (w == 0 || h == 0)
goto skip;
@@ -6846,17 +6885,16 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
b = sna->kgem.batch + sna->kgem.nbatch;
b[0] = XY_SETUP_BLT | 1 << 20;
- b[1] = priv->gpu_bo->pitch;
+ b[1] = bo->pitch;
if (sna->kgem.gen >= 40) {
- if (priv->gpu_bo->tiling)
+ if (bo->tiling)
b[0] |= BLT_DST_TILED;
b[1] >>= 2;
}
b[1] |= 1 << 30 | transparent << 29 | blt_depth(drawable->depth) << 24 | rop << 16;
b[2] = extents->y1 << 16 | extents->x1;
b[3] = extents->y2 << 16 | extents->x2;
- b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
- priv->gpu_bo,
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4, bo,
I915_GEM_DOMAIN_RENDER << 16 |
I915_GEM_DOMAIN_RENDER |
KGEM_RELOC_FENCED,
@@ -6870,24 +6908,10 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
b = sna->kgem.batch + sna->kgem.nbatch;
sna->kgem.nbatch += 3 + len;
- b[0] = XY_TEXT_IMMEDIATE_BLT | (1 + len);
- if (priv->gpu_bo->tiling && sna->kgem.gen >= 40)
- b[0] |= BLT_DST_TILED;
+ b[0] = br00 | (1 + len);
b[1] = (uint16_t)y1 << 16 | (uint16_t)x1;
b[2] = (uint16_t)(y1+h) << 16 | (uint16_t)(x1+w);
-
- byte = (uint8_t *)&b[3];
- stride -= w8;
- do {
- i = w8;
- do {
- *byte++ = byte_reverse(*glyph++);
- } while (--i);
- glyph += stride;
- } while (--h);
- while ((byte - (uint8_t *)&b[3]) & 7)
- *byte++ = 0;
- assert((uint32_t *)byte == sna->kgem.batch + sna->kgem.nbatch);
+ memcpy(b+3, glyph, w8*h);
if (damage) {
BoxRec r;
@@ -6916,24 +6940,143 @@ skip:
}
} while (1);
- RegionUninit(&clip);
sna->blt_state.fill_bo = 0;
return true;
}
static void
-sna_image_glyph(DrawablePtr drawable, GCPtr gc,
- int x, int y, unsigned int n,
- CharInfoPtr *info, pointer base)
+sna_glyph_extents(FontPtr font,
+ CharInfoPtr *info,
+ unsigned long count,
+ ExtentInfoRec *extents)
{
- struct sna *sna = to_sna_from_drawable(drawable);
+ extents->drawDirection = font->info.drawDirection;
+ extents->fontAscent = font->info.fontAscent;
+ extents->fontDescent = font->info.fontDescent;
+
+ extents->overallAscent = info[0]->metrics.ascent;
+ extents->overallDescent = info[0]->metrics.descent;
+ extents->overallLeft = info[0]->metrics.leftSideBearing;
+ extents->overallRight = info[0]->metrics.rightSideBearing;
+ extents->overallWidth = info[0]->metrics.characterWidth;
+
+ while (--count) {
+ CharInfoPtr p =*++info;
+ int v;
+
+ if (p->metrics.ascent > extents->overallAscent)
+ extents->overallAscent = p->metrics.ascent;
+ if (p->metrics.descent > extents->overallDescent)
+ extents->overallDescent = p->metrics.descent;
+
+ v = extents->overallWidth + p->metrics.leftSideBearing;
+ if (v < extents->overallLeft)
+ extents->overallLeft = v;
+
+ v = extents->overallWidth + p->metrics.rightSideBearing;
+ if (v > extents->overallRight)
+ extents->overallRight = v;
+
+ extents->overallWidth += p->metrics.characterWidth;
+ }
+}
+
+static bool sna_set_glyph(CharInfoPtr in, CharInfoPtr out)
+{
+ int w = GLYPHWIDTHPIXELS(in);
+ int h = GLYPHHEIGHTPIXELS(in);
+ int stride = GLYPHWIDTHBYTESPADDED(in);
+ uint8_t *dst, *src;
+
+ w = (w + 7) >> 3;
+
+ out->metrics = in->metrics;
+ out->bits = malloc(w*h);
+
+ src = (uint8_t *)in->bits;
+ dst = (uint8_t *)out->bits;
+ stride -= w;
+ do {
+ int i = w;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ src += stride;
+ } while (--h);
+
+ return true;
+}
+
+inline static bool sna_get_glyph8(FontPtr font, struct sna_font *priv,
+ uint8_t g, CharInfoPtr *out)
+{
+ unsigned long n;
+ CharInfoPtr p, ret;
+
+ p = &priv->glyphs8[g];
+ if (p->bits) {
+ *out = p;
+ return p->bits != (void*)-1;
+ }
+
+ font->get_glyphs(font, 1, &g, Linear8Bit, &n, &ret);
+ if (n == 0) {
+ p->bits = (void*)-1;
+ return false;
+ }
+
+ return sna_set_glyph(ret, *out = p);
+}
+
+inline static bool sna_get_glyph16(FontPtr font, struct sna_font *priv,
+ uint16_t g, CharInfoPtr *out)
+{
+ unsigned long n;
+ CharInfoPtr page, p, ret;
+
+ page = priv->glyphs16[g>>8];
+ if (page == NULL)
+ page = priv->glyphs16[g>>8] = calloc(256, sizeof(CharInfoRec));
+
+ p = &page[g&0xff];
+ if (p->bits) {
+ *out = p;
+ return p->bits != (void*)-1;
+ }
+
+ font->get_glyphs(font, 1, (unsigned char *)&g,
+ FONTLASTROW(font) ? TwoD16Bit : Linear16Bit,
+ &n, &ret);
+ if (n == 0) {
+ p->bits = (void*)-1;
+ return false;
+ }
+
+ return sna_set_glyph(ret, *out = p);
+}
+
+static int
+sna_poly_text8(DrawablePtr drawable, GCPtr gc,
+ int x, int y,
+ int count, char *chars)
+{
+ struct sna_font *priv = gc->font->devPrivates[sna_font_key];
+ CharInfoPtr info[255];
ExtentInfoRec extents;
RegionRec region;
+ long unsigned i, n;
+
+ if (drawable->depth < 8)
+ goto fallback;
+ for (i = n = 0; i < count; i++) {
+ if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
+ n++;
+ }
if (n == 0)
- return;
+ return x;
- QueryGlyphExtents(gc->font, info, n, &extents);
+ sna_glyph_extents(gc->font, info, n, &extents);
region.extents.x1 = x + extents.overallLeft;
region.extents.y1 = y - extents.overallAscent;
region.extents.x2 = x + extents.overallRight;
@@ -6942,51 +7085,134 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
translate_box(®ion.extents, drawable);
clip_box(®ion.extents, gc);
if (box_empty(®ion.extents))
- return;
+ return x + extents.overallRight;
- DBG(("%s: extents(%d, %d), (%d, %d)\n", __FUNCTION__,
- region.extents.x1, region.extents.y1,
- region.extents.x2, region.extents.y2));
+ region.data = NULL;
+ region_maybe_clip(®ion, gc->pCompositeClip);
+ if (!RegionNotEmpty(®ion))
+ return x + extents.overallRight;
- if (FORCE_FALLBACK)
- goto fallback;
+ if (!sna_glyph_blt(drawable, gc, x, y, n, info, ®ion, true)) {
+ DBG(("%s: fallback\n", __FUNCTION__));
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ Linear8Bit, &n, info);
- if (wedged(sna)) {
- DBG(("%s: fallback -- wedged\n", __FUNCTION__));
- goto fallback;
+ sna_gc_move_to_cpu(gc, drawable);
+ sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+
+ DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
+ fbPolyGlyphBlt(drawable, gc, x, y, n,
+ info, FONTGLYPHS(gc->font));
}
+ RegionUninit(®ion);
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_glyph_blt(drawable, gc, x, y, n, info, base, false, ®ion.extents))
- return;
+ return x + extents.overallRight;
fallback:
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ Linear8Bit, &n, info);
+ if (n == 0)
+ return x;
+
+ extents.overallWidth = x;
+ for (i = 0; i < n; i++)
+ extents.overallWidth += info[i]->metrics.characterWidth;
+ fbPolyGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
+
+ return extents.overallWidth;
+}
+
+static int
+sna_poly_text16(DrawablePtr drawable, GCPtr gc,
+ int x, int y,
+ int count, unsigned short *chars)
+{
+ struct sna_font *priv = gc->font->devPrivates[sna_font_key];
+ CharInfoPtr info[255];
+ ExtentInfoRec extents;
+ RegionRec region;
+ long unsigned i, n;
+
+ if (drawable->depth < 8)
+ goto fallback;
+
+ for (i = n = 0; i < count; i++) {
+ if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
+ n++;
+ }
+ if (n == 0)
+ return x;
+
+ sna_glyph_extents(gc->font, info, n, &extents);
+ region.extents.x1 = x + extents.overallLeft;
+ region.extents.y1 = y - extents.overallAscent;
+ region.extents.x2 = x + extents.overallRight;
+ region.extents.y2 = y + extents.overallDescent;
+
+ translate_box(®ion.extents, drawable);
+ clip_box(®ion.extents, gc);
+ if (box_empty(®ion.extents))
+ return x + extents.overallRight;
+
region.data = NULL;
region_maybe_clip(®ion, gc->pCompositeClip);
if (!RegionNotEmpty(®ion))
- return;
+ return x + extents.overallRight;
- sna_gc_move_to_cpu(gc, drawable);
- sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+ if (!sna_glyph_blt(drawable, gc, x, y, n, info, ®ion, true)) {
+ DBG(("%s: fallback\n", __FUNCTION__));
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
+ &n, info);
+
+ sna_gc_move_to_cpu(gc, drawable);
+ sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+
+ DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
+ fbPolyGlyphBlt(drawable, gc, x, y, n,
+ info, FONTGLYPHS(gc->font));
+ }
RegionUninit(®ion);
- DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
- fbImageGlyphBlt(drawable, gc, x, y, n, info, base);
+ return x + extents.overallRight;
+
+fallback:
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
+ &n, info);
+ if (n == 0)
+ return x;
+
+ extents.overallWidth = x;
+ for (i = 0; i < n; i++)
+ extents.overallWidth += info[i]->metrics.characterWidth;
+ fbPolyGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
+
+ return extents.overallWidth;
}
static void
-sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
- int x, int y, unsigned int n,
- CharInfoPtr *info, pointer base)
+sna_image_text8(DrawablePtr drawable, GCPtr gc,
+ int x, int y,
+ int count, char *chars)
{
- struct sna *sna = to_sna_from_drawable(drawable);
+ struct sna_font *priv = gc->font->devPrivates[sna_font_key];
+ CharInfoPtr info[255];
ExtentInfoRec extents;
RegionRec region;
+ long unsigned i, n;
+
+ if (drawable->depth < 8)
+ goto fallback;
+ for (i = n = 0; i < count; i++) {
+ if (sna_get_glyph8(gc->font, priv, chars[i], &info[n]))
+ n++;
+ }
if (n == 0)
return;
- QueryGlyphExtents(gc->font, info, n, &extents);
+ sna_glyph_extents(gc->font, info, n, &extents);
region.extents.x1 = x + extents.overallLeft;
region.extents.y1 = y - extents.overallAscent;
region.extents.x2 = x + extents.overallRight;
@@ -6997,34 +7223,90 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
if (box_empty(®ion.extents))
return;
- DBG(("%s: extents(%d, %d), (%d, %d)\n", __FUNCTION__,
- region.extents.x1, region.extents.y1,
- region.extents.x2, region.extents.y2));
+ region.data = NULL;
+ region_maybe_clip(®ion, gc->pCompositeClip);
+ if (!RegionNotEmpty(®ion))
+ return;
- if (FORCE_FALLBACK)
- goto fallback;
+ if (!sna_glyph_blt(drawable, gc, x, y, n, info, ®ion, false)) {
+ DBG(("%s: fallback\n", __FUNCTION__));
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ Linear8Bit, &n, info);
- if (wedged(sna)) {
- DBG(("%s: fallback -- wedged\n", __FUNCTION__));
+ sna_gc_move_to_cpu(gc, drawable);
+ sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+
+ DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
+ fbImageGlyphBlt(drawable, gc, x, y, n,
+ info, FONTGLYPHS(gc->font));
+ }
+ RegionUninit(®ion);
+
+fallback:
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ Linear8Bit, &n, info);
+ if (n)
+ fbImageGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
+}
+
+static void
+sna_image_text16(DrawablePtr drawable, GCPtr gc,
+ int x, int y,
+ int count, unsigned short *chars)
+{
+ struct sna_font *priv = gc->font->devPrivates[sna_font_key];
+ CharInfoPtr info[255];
+ ExtentInfoRec extents;
+ RegionRec region;
+ long unsigned i, n;
+
+ if (drawable->depth < 8)
goto fallback;
+
+ for (i = n = 0; i < count; i++) {
+ if (sna_get_glyph16(gc->font, priv, chars[i], &info[n]))
+ n++;
}
+ if (n == 0)
+ return;
- if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
- sna_glyph_blt(drawable, gc, x, y, n, info, base, true, ®ion.extents))
+ sna_glyph_extents(gc->font, info, n, &extents);
+ region.extents.x1 = x + extents.overallLeft;
+ region.extents.y1 = y - extents.overallAscent;
+ region.extents.x2 = x + extents.overallRight;
+ region.extents.y2 = y + extents.overallDescent;
+
+ translate_box(®ion.extents, drawable);
+ clip_box(®ion.extents, gc);
+ if (box_empty(®ion.extents))
return;
-fallback:
region.data = NULL;
region_maybe_clip(®ion, gc->pCompositeClip);
if (!RegionNotEmpty(®ion))
return;
- sna_gc_move_to_cpu(gc, drawable);
- sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+ if (!sna_glyph_blt(drawable, gc, x, y, n, info, ®ion, false)) {
+ DBG(("%s: fallback\n", __FUNCTION__));
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
+ &n, info);
+
+ sna_gc_move_to_cpu(gc, drawable);
+ sna_drawable_move_region_to_cpu(drawable, ®ion, true);
+
+ DBG(("%s: fallback -- fbImageGlyphBlt\n", __FUNCTION__));
+ fbImageGlyphBlt(drawable, gc, x, y, n,
+ info, FONTGLYPHS(gc->font));
+ }
RegionUninit(®ion);
- DBG(("%s: fallback -- fbPolyGlyphBlt\n", __FUNCTION__));
- fbPolyGlyphBlt(drawable, gc, x, y, n, info, base);
+fallback:
+ gc->font->get_glyphs(gc->font, count, (unsigned char *)chars,
+ FONTLASTROW(gc->font) ? TwoD16Bit : Linear16Bit,
+ &n, info);
+ if (n)
+ fbImageGlyphBlt(drawable, gc, x, y, n, info, FONTGLYPHS(gc->font));
}
static bool
@@ -7208,12 +7490,12 @@ static const GCOps sna_gc_ops = {
miFillPolygon,
sna_poly_fill_rect,
miPolyFillArc,
- miPolyText8,
- miPolyText16,
- miImageText8,
- miImageText16,
- sna_image_glyph,
- sna_poly_glyph,
+ sna_poly_text8,
+ sna_poly_text16,
+ sna_image_text8,
+ sna_image_text16,
+ NULL,
+ NULL,
sna_push_pixels,
};
@@ -7555,6 +7837,10 @@ Bool sna_accel_init(ScreenPtr screen, struct sna *sna)
sna_window_key = fbGetWinPrivateKey();
+ sna_font_key = AllocateFontPrivateIndex();
+ screen->RealizeFont = sna_realize_font;
+ screen->UnrealizeFont = sna_unrealize_font;
+
list_init(&sna->dirty_pixmaps);
list_init(&sna->deferred_free);
commit 8a259e34d3295af3be864e4fae96e6c3a9a632c7
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 01:05:52 2011 +0000
sna: gc->miTranslate is always 1
So we can perform some constant folding and eliminate dead code.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c628ff2..9126da9 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2224,11 +2224,6 @@ sna_fill_spans_blt(DrawablePtr drawable,
struct sna_fill_op fill;
BoxRec box[512], *b = box, *const last_box = box + ARRAY_SIZE(box);
static void * const jump[] = {
- &&no_damage_translate,
- &&damage_translate,
- &&no_damage_clipped_translate,
- &&damage_clipped_translate,
-
&&no_damage,
&&damage,
&&no_damage_clipped,
@@ -2244,12 +2239,9 @@ sna_fill_spans_blt(DrawablePtr drawable,
get_drawable_deltas(drawable, pixmap, &dx, &dy);
- v = (damage != NULL) | clipped | gc->miTranslate << 2;
+ v = (damage != NULL) | clipped;
goto *jump[v];
-no_damage_translate:
- dx += drawable->x;
- dy += drawable->y;
no_damage:
if (dx|dy) {
do {
@@ -2286,9 +2278,6 @@ no_damage:
}
goto done;
-damage_translate:
- dx += drawable->x;
- dy += drawable->y;
damage:
do {
*(DDXPointRec *)b = *pt++;
@@ -2309,18 +2298,10 @@ damage:
}
goto done;
+no_damage_clipped:
{
RegionRec clip;
- int i;
-
-no_damage_clipped_translate:
- for (i = 0; i < n; i++) {
- /* XXX overflow? */
- pt->x += drawable->x;
- pt->y += drawable->y;
- }
-no_damage_clipped:
region_set(&clip, extents);
region_maybe_clip(&clip, gc->pCompositeClip);
if (!RegionNotEmpty(&clip))
@@ -2420,18 +2401,10 @@ no_damage_clipped:
goto done;
}
+damage_clipped:
{
RegionRec clip;
- int i;
-
-damage_clipped_translate:
- for (i = 0; i < n; i++) {
- /* XXX overflow? */
- pt->x += drawable->x;
- pt->y += drawable->y;
- }
-damage_clipped:
region_set(&clip, extents);
region_maybe_clip(&clip, gc->pCompositeClip);
if (!RegionNotEmpty(&clip))
@@ -2570,11 +2543,8 @@ sna_spans_extents(DrawablePtr drawable, GCPtr gc,
}
box.y2++;
- if (gc) {
- if (!gc->miTranslate)
- translate_box(&box, drawable);
+ if (gc)
clipped = clip_box(&box, gc);
- }
if (box_empty(&box))
return 0;
@@ -7188,10 +7158,6 @@ sna_push_pixels(GCPtr gc, PixmapPtr bitmap, DrawablePtr drawable,
region.extents.x1 = x;
region.extents.y1 = y;
- if (!gc->miTranslate) {
- region.extents.x1 += drawable->x;
- region.extents.y1 += drawable->y;
- }
region.extents.x2 = region.extents.x1 + w;
region.extents.y2 = region.extents.y1 + h;
commit 8f68f9e5f8341e20c4e1e46044f79806a44ecd03
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 00:59:46 2011 +0000
sna: Trimming is redundant given that we always have a CompositeClip
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 3185811..c628ff2 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1058,23 +1058,6 @@ static void sna_gc_move_to_cpu(GCPtr gc, DrawablePtr drawable)
sna_drawable_move_to_cpu(&gc->tile.pixmap->drawable, false);
}
-static inline bool trim_box(BoxPtr box, DrawablePtr d)
-{
- bool clipped = false;
-
- if (box->x1 < 0)
- box->x1 = 0, clipped = true;
- if (box->x2 > d->width)
- box->x2 = d->width, clipped = true;
-
- if (box->y1 < 0)
- box->y1 = 0, clipped = true;
- if (box->y2 > d->height)
- box->y2 = d->height, clipped = true;
-
- return clipped;
-}
-
static inline bool clip_box(BoxPtr box, GCPtr gc)
{
const BoxRec *clip;
@@ -1107,27 +1090,8 @@ static inline void translate_box(BoxPtr box, DrawablePtr d)
static inline bool trim_and_translate_box(BoxPtr box, DrawablePtr d, GCPtr gc)
{
- bool clipped = trim_box(box, d);
translate_box(box, d);
- clipped |= clip_box(box, gc);
- return clipped;
-}
-
-static inline bool box32_trim(Box32Rec *box, DrawablePtr d)
-{
- bool clipped = false;
-
- if (box->x1 < 0)
- box->x1 = 0, clipped = true;
- if (box->x2 > d->width)
- box->x2 = d->width, clipped = true;
-
- if (box->y1 < 0)
- box->y1 = 0, clipped = true;
- if (box->y2 > d->height)
- box->y2 = d->height, clipped = true;
-
- return clipped;
+ return clip_box(box, gc);
}
static inline bool box32_clip(Box32Rec *box, GCPtr gc)
@@ -1159,17 +1123,8 @@ static inline void box32_translate(Box32Rec *box, DrawablePtr d)
static inline bool box32_trim_and_translate(Box32Rec *box, DrawablePtr d, GCPtr gc)
{
- bool clipped;
-
- if (likely (gc->pCompositeClip)) {
- box32_translate(box, d);
- clipped = box32_clip(box, gc);
- } else {
- clipped = box32_trim(box, d);
- box32_translate(box, d);
- }
-
- return clipped;
+ box32_translate(box, d);
+ return box32_clip(box, gc);
}
static inline void box_add_pt(BoxPtr box, int16_t x, int16_t y)
@@ -1648,7 +1603,7 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
{
PixmapPtr pixmap = get_drawable_pixmap(drawable);
struct sna_pixmap *priv = sna_pixmap(pixmap);
- RegionRec region, *clip;
+ RegionRec region;
int16_t dx, dy;
DBG(("%s((%d, %d)x(%d, %d), depth=%d, format=%d)\n",
@@ -1667,26 +1622,18 @@ sna_put_image(DrawablePtr drawable, GCPtr gc, int depth,
get_drawable_deltas(drawable, pixmap, &dx, &dy);
- region.extents.x1 = x + drawable->x + dx;
- region.extents.y1 = y + drawable->y + dy;
+ region.extents.x1 = x + drawable->x;
+ region.extents.y1 = y + drawable->y;
region.extents.x2 = region.extents.x1 + w;
region.extents.y2 = region.extents.y1 + h;
-
- trim_box(®ion.extents, &pixmap->drawable);
- if (box_empty(®ion.extents))
- return;
-
region.data = NULL;
- clip = fbGetCompositeClip(gc);
- if (clip) {
- RegionTranslate(clip, dx, dy);
- RegionIntersect(®ion, ®ion, clip);
- RegionTranslate(clip, -dx, -dy);
- }
+ RegionIntersect(®ion, ®ion, gc->pCompositeClip);
if (!RegionNotEmpty(®ion))
return;
+ RegionTranslate(®ion, dx, dy);
+
switch (format) {
case ZPixmap:
if (!PM_IS_SOLID(drawable, gc->planemask))
@@ -7022,7 +6969,6 @@ sna_image_glyph(DrawablePtr drawable, GCPtr gc,
region.extents.x2 = x + extents.overallRight;
region.extents.y2 = y + extents.overallDescent;
- trim_box(®ion.extents, drawable);
translate_box(®ion.extents, drawable);
clip_box(®ion.extents, gc);
if (box_empty(®ion.extents))
@@ -7076,7 +7022,6 @@ sna_poly_glyph(DrawablePtr drawable, GCPtr gc,
region.extents.x2 = x + extents.overallRight;
region.extents.y2 = y + extents.overallDescent;
- trim_box(®ion.extents, drawable);
translate_box(®ion.extents, drawable);
clip_box(®ion.extents, gc);
if (box_empty(®ion.extents))
commit f4bdd84b846e6e778b7c496e7ed0d51e77310f36
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Thu Nov 3 00:48:10 2011 +0000
sna: Simplify the uncommon check for gpu-only damage by using damage-all
The use of a gpu-only scratch bo is uncommon with the core acceleration
routines, and we can eliminate the check for not incrementing the damage
by allocating a damage-all and using the common optimisation of
reduce_damage().
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 6bb6cef..3185811 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -328,6 +328,7 @@ sna_pixmap_create_scratch(ScreenPtr screen,
}
priv->gpu_only = 1;
+ sna_damage_all(&priv->gpu_damage, width, height);
miModifyPixmapHeader(pixmap,
width, height, depth, bpp,
@@ -1421,8 +1422,7 @@ sna_put_xybitmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
return false;
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- if (!priv->gpu_only)
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(&priv->gpu_damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -1536,8 +1536,7 @@ sna_put_xypixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
return false;
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- if (!priv->gpu_only)
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(&priv->gpu_damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__, x, y, w, h));
@@ -1793,8 +1792,7 @@ sna_self_copy_boxes(DrawablePtr src, DrawablePtr dst, GCPtr gc,
goto fallback;
}
- if (!priv->gpu_only)
- sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty);
+ sna_damage_add_boxes(&priv->gpu_damage, box, n, tx, ty);
} else {
FbBits *dst_bits, *src_bits;
int stride, bpp;
@@ -2727,7 +2725,7 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_fill_spans_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, n, pt, width, sorted,
®ion.extents, flags & 2))
return;
@@ -2762,7 +2760,7 @@ sna_fill_spans(DrawablePtr drawable, GCPtr gc, int n,
i = sna_poly_fill_rect_tiled(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, n, rect,
®ion.extents, flags & 2);
free (rect);
@@ -3218,7 +3216,7 @@ sna_copy_plane(DrawablePtr src, DrawablePtr dst, GCPtr gc,
dst_x, dst_y,
src->depth == 1 ? sna_copy_bitmap_blt :sna_copy_plane_blt,
(Pixel)bit,
- priv->gpu_only ? NULL : reduce_damage(dst, &priv->gpu_damage, ®ion.extents));
+ reduce_damage(dst, &priv->gpu_damage, ®ion.extents));
}
}
@@ -3391,7 +3389,7 @@ sna_poly_point(DrawablePtr drawable, GCPtr gc,
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_point_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, mode, n, pt, flags & 2))
return;
@@ -4121,7 +4119,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_line_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, mode, n, pt,
®ion.extents, flags & 4))
return;
@@ -4138,7 +4136,7 @@ sna_poly_line(DrawablePtr drawable, GCPtr gc,
sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_zero_line_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, mode, n, pt,
®ion.extents, flags & 4))
return;
@@ -4889,7 +4887,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_segment_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, n, seg,
®ion.extents, flags & 2))
return;
@@ -4906,7 +4904,7 @@ sna_poly_segment(DrawablePtr drawable, GCPtr gc, int n, xSegment *seg)
sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_zero_segment_blt(drawable,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, n, seg, ®ion.extents, flags & 2))
return;
}
@@ -5463,7 +5461,7 @@ sna_poly_rectangle(DrawablePtr drawable, GCPtr gc, int n, xRectangle *r)
if (sna_drawable_use_gpu_bo(drawable, ®ion.extents) &&
sna_poly_rectangle_blt(drawable, priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(drawable, &priv->gpu_damage, ®ion.extents),
gc, n, r, ®ion.extents, flags&2))
return;
@@ -6756,7 +6754,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
sna_poly_fill_rect_blt(draw,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
gc, color, n, rect,
®ion.extents, flags & 2))
return;
@@ -6776,7 +6774,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
sna_poly_fill_rect_tiled(draw,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
gc, n, rect,
®ion.extents, flags & 2))
return;
@@ -6796,7 +6794,7 @@ sna_poly_fill_rect(DrawablePtr draw, GCPtr gc, int n, xRectangle *rect)
if (sna_drawable_use_gpu_bo(draw, ®ion.extents) &&
sna_poly_fill_rect_stippled_blt(draw,
priv->gpu_bo,
- priv->gpu_only ? NULL : reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
+ reduce_damage(draw, &priv->gpu_damage, ®ion.extents),
gc, n, rect,
®ion.extents, flags & 2))
return;
@@ -6857,8 +6855,7 @@ sna_glyph_blt(DrawablePtr drawable, GCPtr gc,
if (!RegionNotEmpty(&clip))
return true;
- damage = priv->gpu_only ? NULL :
- reduce_damage(drawable, &priv->gpu_damage, extents),
+ damage = reduce_damage(drawable, &priv->gpu_damage, extents),
get_drawable_deltas(drawable, pixmap, &dx, &dy);
_x += drawable->x + dx;
@@ -7139,8 +7136,7 @@ sna_push_pixels_solid_blt(GCPtr gc,
RegionTranslate(region, dx, dy);
assert_pixmap_contains_box(pixmap, RegionExtents(region));
- if (!priv->gpu_only)
- sna_damage_add(&priv->gpu_damage, region);
+ sna_damage_add(&priv->gpu_damage, region);
DBG(("%s: upload(%d, %d, %d, %d)\n", __FUNCTION__,
region->extents.x1, region->extents.y1,
commit 353fa4218cd904857fe95ef140e7f38aefe07578
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Wed Nov 2 22:53:51 2011 +0000
sna: Don't call into retire unless there are outstanding requests
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index e470e4d..37e6035 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -671,7 +671,8 @@ bool kgem_retire(struct kgem *kgem)
free(rq);
}
- if (kgem->ring && list_is_empty(&kgem->requests))
+ kgem->need_retire = !list_is_empty(&kgem->requests);
+ if (!kgem->need_retire && kgem->ring)
kgem->ring = kgem->mode;
return retired;
@@ -712,6 +713,7 @@ destroy:
list_add_tail(&rq->list, &kgem->requests);
kgem->next_request = __kgem_request_alloc();
+ kgem->need_retire = 1;
}
static void kgem_close_list(struct kgem *kgem, struct list *head)
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 26eb784..4b260e2 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -107,6 +107,7 @@ struct kgem {
uint32_t flush:1;
uint32_t need_expire:1;
uint32_t need_purge:1;
+ uint32_t need_retire:1;
uint32_t busy:1;
uint32_t has_vmap :1;
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 529bb23..6bb6cef 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -7772,7 +7772,9 @@ void sna_accel_block_handler(struct sna *sna)
void sna_accel_wakeup_handler(struct sna *sna)
{
- kgem_retire(&sna->kgem);
+ if (sna->kgem.need_retire)
+ kgem_retire(&sna->kgem);
+
sna_deferred_free(sna);
if (sna->kgem.need_purge)
commit 6fdd0f254d2e58d425d6d23023e9db3d2924c6be
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Wed Nov 2 22:23:16 2011 +0000
sna: Use an integer value for the sentinel
clang complains otherwise.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 377670e..26eb784 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -271,8 +271,8 @@ static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords)
kgem->nbatch += num_dwords;
}
-bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(NULL)));
-bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(NULL)));
+bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
+bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo);
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo)
commit 1073c78f6cebfd6380b53dd891b7a72e50f398d6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Wed Nov 2 22:14:40 2011 +0000
sna: Pack small 1-bpp uploads into immediate buffers
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index f88222f..529bb23 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -2844,8 +2844,6 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
int src_stride;
uint8_t *dst, *src;
uint32_t *b;
- struct kgem_bo *upload;
- void *ptr;
DBG(("%s: box(%d, %d), (%d, %d), sx=(%d,%d) bx=[%d, %d]\n",
__FUNCTION__,
@@ -2853,67 +2851,123 @@ sna_copy_bitmap_blt(DrawablePtr _bitmap, DrawablePtr drawable, GCPtr gc,
box->x2, box->y2,
sx, sy, bx1, bx2));
- if (!kgem_check_batch(&sna->kgem, 8) ||
- !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
- !kgem_check_reloc(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
- _kgem_set_mode(&sna->kgem, KGEM_BLT);
- }
+ src_stride = bstride*bh;
+ if (src_stride <= 128) {
+ src_stride = ALIGN(src_stride, 8) / 4;
+ if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
+ !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_reloc(&sna->kgem, 1)) {
+ _kgem_submit(&sna->kgem);
+ _kgem_set_mode(&sna->kgem, KGEM_BLT);
+ }
- upload = kgem_create_buffer(&sna->kgem,
- bstride*bh,
- KGEM_BUFFER_WRITE,
- &ptr);
- if (!upload)
- break;
+ b = sna->kgem.batch + sna->kgem.nbatch;
+ b[0] = XY_MONO_SRC_COPY_IMM | (5 + src_stride);
+ if (drawable->bitsPerPixel == 32)
+ b[0] |= 3 << 20;
+ b[0] |= ((box->x1 + sx) & 7) << 17;
+ b[1] = priv->gpu_bo->pitch;
+ if (sna->kgem.gen >= 40) {
+ if (priv->gpu_bo->tiling)
+ b[0] |= BLT_DST_TILED;
+ b[1] >>= 2;
+ }
+ b[1] |= blt_depth(drawable->depth) << 24;
+ b[1] |= rop << 16;
+ b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
+ b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+ priv->gpu_bo,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ I915_GEM_DOMAIN_RENDER |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[5] = gc->bgPixel;
+ b[6] = gc->fgPixel;
- dst = ptr;
- bstride -= bw;
+ sna->kgem.nbatch += 7 + src_stride;
- src_stride = bitmap->devKind;
- src = (uint8_t*)bitmap->devPrivate.ptr;
- src += (box->y1 + sy) * src_stride + bx1/8;
- src_stride -= bw;
- do {
- int i = bw;
+ dst = (uint8_t *)&b[7];
+ bstride -= bw;
+
+ src_stride = bitmap->devKind;
+ src = (uint8_t*)bitmap->devPrivate.ptr;
+ src += (box->y1 + sy) * src_stride + bx1/8;
+ src_stride -= bw;
do {
- *dst++ = byte_reverse(*src++);
- } while (--i);
- dst += bstride;
- src += src_stride;
- } while (--bh);
+ int i = bw;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ dst += bstride;
+ src += src_stride;
+ } while (--bh);
+ } else {
+ struct kgem_bo *upload;
+ void *ptr;
- b = sna->kgem.batch + sna->kgem.nbatch;
- b[0] = XY_MONO_SRC_COPY;
- if (drawable->bitsPerPixel == 32)
- b[0] |= 3 << 20;
- b[0] |= ((box->x1 + sx) & 7) << 17;
- b[1] = priv->gpu_bo->pitch;
- if (sna->kgem.gen >= 40) {
- if (priv->gpu_bo->tiling)
- b[0] |= BLT_DST_TILED;
- b[1] >>= 2;
- }
- b[1] |= blt_depth(drawable->depth) << 24;
- b[1] |= rop << 16;
- b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
- b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
- b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
- priv->gpu_bo,
- I915_GEM_DOMAIN_RENDER << 16 |
- I915_GEM_DOMAIN_RENDER |
- KGEM_RELOC_FENCED,
- 0);
- b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5,
- upload,
- I915_GEM_DOMAIN_RENDER << 16 |
- KGEM_RELOC_FENCED,
- 0);
- b[6] = gc->bgPixel;
- b[7] = gc->fgPixel;
+ if (!kgem_check_batch(&sna->kgem, 8) ||
+ !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_reloc(&sna->kgem, 2)) {
+ _kgem_submit(&sna->kgem);
+ _kgem_set_mode(&sna->kgem, KGEM_BLT);
+ }
- sna->kgem.nbatch += 8;
- kgem_bo_destroy(&sna->kgem, upload);
+ upload = kgem_create_buffer(&sna->kgem,
+ bstride*bh,
+ KGEM_BUFFER_WRITE,
+ &ptr);
+ if (!upload)
+ break;
+
+ dst = ptr;
+ bstride -= bw;
+
+ src_stride = bitmap->devKind;
+ src = (uint8_t*)bitmap->devPrivate.ptr;
+ src += (box->y1 + sy) * src_stride + bx1/8;
+ src_stride -= bw;
+ do {
+ int i = bw;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ dst += bstride;
+ src += src_stride;
+ } while (--bh);
+
+ b = sna->kgem.batch + sna->kgem.nbatch;
+ b[0] = XY_MONO_SRC_COPY;
+ if (drawable->bitsPerPixel == 32)
+ b[0] |= 3 << 20;
+ b[0] |= ((box->x1 + sx) & 7) << 17;
+ b[1] = priv->gpu_bo->pitch;
+ if (sna->kgem.gen >= 40) {
+ if (priv->gpu_bo->tiling)
+ b[0] |= BLT_DST_TILED;
+ b[1] >>= 2;
+ }
+ b[1] |= blt_depth(drawable->depth) << 24;
+ b[1] |= rop << 16;
+ b[2] = (box->y1 + dy) << 16 | (box->x1 + dx);
+ b[3] = (box->y2 + dy) << 16 | (box->x2 + dx);
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+ priv->gpu_bo,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ I915_GEM_DOMAIN_RENDER |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5,
+ upload,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[6] = gc->bgPixel;
+ b[7] = gc->fgPixel;
+
+ sna->kgem.nbatch += 8;
+ kgem_bo_destroy(&sna->kgem, upload);
+ }
box++;
} while (--n);
@@ -6183,122 +6237,68 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
int src_stride;
uint8_t *dst, *src;
uint32_t *b;
- struct kgem_bo *upload;
- void *ptr;
DBG(("%s: rect (%d, %d)x(%d, %d) stipple [%d,%d]\n",
__FUNCTION__,
r->x, r->y, r->width, r->height,
bx1, bx2));
- if (!kgem_check_batch(&sna->kgem, 8) ||
- !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
- !kgem_check_reloc(&sna->kgem, 2)) {
- _kgem_submit(&sna->kgem);
- _kgem_set_mode(&sna->kgem, KGEM_BLT);
- }
-
- upload = kgem_create_buffer(&sna->kgem,
- bstride*bh,
- KGEM_BUFFER_WRITE,
- &ptr);
- if (!upload)
- break;
-
- dst = ptr;
- bstride -= bw;
-
- src_stride = stipple->devKind;
- src = (uint8_t*)stipple->devPrivate.ptr;
- src += (r->y - origin->y) * src_stride + bx1/8;
- src_stride -= bw;
- do {
- int i = bw;
- do {
- *dst++ = byte_reverse(*src++);
- } while (--i);
- dst += bstride;
- src += src_stride;
- } while (--bh);
-
- b = sna->kgem.batch + sna->kgem.nbatch;
- b[0] = XY_MONO_SRC_COPY;
- if (drawable->bitsPerPixel == 32)
- b[0] |= 3 << 20;
- b[0] |= ((r->x - origin->x) & 7) << 17;
- b[1] = priv->gpu_bo->pitch;
- if (sna->kgem.gen >= 40) {
- if (priv->gpu_bo->tiling)
- b[0] |= BLT_DST_TILED;
- b[1] >>= 2;
- }
- b[1] |= (gc->fillStyle == FillStippled) << 29;
- b[1] |= blt_depth(drawable->depth) << 24;
- b[1] |= rop << 16;
- b[2] = (r->y + dy) << 16 | (r->x + dx);
- b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
- b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
- priv->gpu_bo,
- I915_GEM_DOMAIN_RENDER << 16 |
- I915_GEM_DOMAIN_RENDER |
- KGEM_RELOC_FENCED,
- 0);
- b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5,
- upload,
- I915_GEM_DOMAIN_RENDER << 16 |
- KGEM_RELOC_FENCED,
- 0);
- b[6] = gc->bgPixel;
- b[7] = gc->fgPixel;
-
- sna->kgem.nbatch += 8;
- kgem_bo_destroy(&sna->kgem, upload);
+ src_stride = bstride*bh;
+ if (src_stride <= 128) {
+ src_stride = ALIGN(src_stride, 8) / 4;
+ if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
+ !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_reloc(&sna->kgem, 1)) {
+ _kgem_submit(&sna->kgem);
+ _kgem_set_mode(&sna->kgem, KGEM_BLT);
+ }
- r++;
- } while (--n);
- } else {
- RegionRec clip;
- DDXPointRec pat;
+ b = sna->kgem.batch + sna->kgem.nbatch;
+ b[0] = XY_MONO_SRC_COPY_IMM | (5 + src_stride);
+ if (drawable->bitsPerPixel == 32)
+ b[0] |= 3 << 20;
+ b[0] |= ((r->x - origin->x) & 7) << 17;
+ b[1] = priv->gpu_bo->pitch;
+ if (sna->kgem.gen >= 40) {
+ if (priv->gpu_bo->tiling)
+ b[0] |= BLT_DST_TILED;
+ b[1] >>= 2;
+ }
+ b[1] |= (gc->fillStyle == FillStippled) << 29;
+ b[1] |= blt_depth(drawable->depth) << 24;
+ b[1] |= rop << 16;
+ b[2] = (r->y + dy) << 16 | (r->x + dx);
+ b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+ priv->gpu_bo,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ I915_GEM_DOMAIN_RENDER |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[5] = gc->bgPixel;
+ b[6] = gc->fgPixel;
- region_set(&clip, extents);
- region_maybe_clip(&clip, gc->pCompositeClip);
- if (!RegionNotEmpty(&clip))
- return true;
+ sna->kgem.nbatch += 7 + src_stride;
- pat.x = origin->x + drawable->x;
- pat.y = origin->y + drawable->y;
+ dst = (uint8_t *)&b[7];
+ bstride -= bw;
- if (clip.data == NULL) {
- do {
- BoxRec box;
- int bx1, bx2, bw, bh, bstride;
- int src_stride;
- uint8_t *dst, *src;
- uint32_t *b;
+ src_stride = stipple->devKind;
+ src = (uint8_t*)stipple->devPrivate.ptr;
+ src += (r->y - origin->y) * src_stride + bx1/8;
+ src_stride -= bw;
+ do {
+ int i = bw;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ dst += bstride;
+ src += src_stride;
+ } while (--bh);
+ } else {
struct kgem_bo *upload;
void *ptr;
- box.x1 = r->x + drawable->x;
- box.x2 = bound(r->x, r->width);
- box.y1 = r->y + drawable->y;
- box.y2 = bound(r->y, r->height);
- r++;
-
- if (!box_intersect(&box, &clip.extents))
- continue;
-
- bx1 = (box.x1 - pat.x) & ~7;
- bx2 = (box.x2 - pat.x + 7) & ~7;
- bw = (bx2 - bx1)/8;
- bh = box.y2 - box.y1;
- bstride = ALIGN(bw, 8);
-
- DBG(("%s: rect (%d, %d)x(%d, %d), box (%d,%d),(%d,%d) stipple [%d,%d], pitch=%d, stride=%d\n",
- __FUNCTION__,
- r->x, r->y, r->width, r->height,
- box.x1, box.y1, box.x2, box.y2,
- bx1, bx2, bw, bstride));
-
if (!kgem_check_batch(&sna->kgem, 8) ||
!kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
!kgem_check_reloc(&sna->kgem, 2)) {
@@ -6318,7 +6318,7 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
src_stride = stipple->devKind;
src = (uint8_t*)stipple->devPrivate.ptr;
- src += (box.y1 - pat.y) * src_stride + bx1/8;
+ src += (r->y - origin->y) * src_stride + bx1/8;
src_stride -= bw;
do {
int i = bw;
@@ -6328,12 +6328,11 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
dst += bstride;
src += src_stride;
} while (--bh);
-
b = sna->kgem.batch + sna->kgem.nbatch;
b[0] = XY_MONO_SRC_COPY;
if (drawable->bitsPerPixel == 32)
b[0] |= 3 << 20;
- b[0] |= ((box.x1 - pat.x) & 7) << 17;
+ b[0] |= ((r->x - origin->x) & 7) << 17;
b[1] = priv->gpu_bo->pitch;
if (sna->kgem.gen >= 40) {
if (priv->gpu_bo->tiling)
@@ -6343,8 +6342,8 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
b[1] |= (gc->fillStyle == FillStippled) << 29;
b[1] |= blt_depth(drawable->depth) << 24;
b[1] |= rop << 16;
- b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
- b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
+ b[2] = (r->y + dy) << 16 | (r->x + dx);
+ b[3] = (r->y + r->height + dy) << 16 | (r->x + r->width + dx);
b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
priv->gpu_bo,
I915_GEM_DOMAIN_RENDER << 16 |
@@ -6361,6 +6360,168 @@ sna_poly_fill_rect_stippled_1_blt(DrawablePtr drawable,
sna->kgem.nbatch += 8;
kgem_bo_destroy(&sna->kgem, upload);
+ }
+
+ r++;
+ } while (--n);
+ } else {
+ RegionRec clip;
+ DDXPointRec pat;
+
+ region_set(&clip, extents);
+ region_maybe_clip(&clip, gc->pCompositeClip);
+ if (!RegionNotEmpty(&clip))
+ return true;
+
+ pat.x = origin->x + drawable->x;
+ pat.y = origin->y + drawable->y;
+
+ if (clip.data == NULL) {
+ do {
+ BoxRec box;
+ int bx1, bx2, bw, bh, bstride;
+ int src_stride;
+ uint8_t *dst, *src;
+ uint32_t *b;
+ struct kgem_bo *upload;
+ void *ptr;
+
+ box.x1 = r->x + drawable->x;
+ box.x2 = bound(r->x, r->width);
+ box.y1 = r->y + drawable->y;
+ box.y2 = bound(r->y, r->height);
+ r++;
+
+ if (!box_intersect(&box, &clip.extents))
+ continue;
+
+ bx1 = (box.x1 - pat.x) & ~7;
+ bx2 = (box.x2 - pat.x + 7) & ~7;
+ bw = (bx2 - bx1)/8;
+ bh = box.y2 - box.y1;
+ bstride = ALIGN(bw, 8);
+
+ DBG(("%s: rect (%d, %d)x(%d, %d), box (%d,%d),(%d,%d) stipple [%d,%d], pitch=%d, stride=%d\n",
+ __FUNCTION__,
+ r->x, r->y, r->width, r->height,
+ box.x1, box.y1, box.x2, box.y2,
+ bx1, bx2, bw, bstride));
+
+ src_stride = bstride*bh;
+ if (src_stride <= 128) {
+ src_stride = ALIGN(src_stride, 8) / 4;
+ if (!kgem_check_batch(&sna->kgem, 7+src_stride) ||
+ !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_reloc(&sna->kgem, 1)) {
+ _kgem_submit(&sna->kgem);
+ _kgem_set_mode(&sna->kgem, KGEM_BLT);
+ }
+
+ b = sna->kgem.batch + sna->kgem.nbatch;
+ b[0] = XY_MONO_SRC_COPY_IMM | (5 + src_stride);
+ if (drawable->bitsPerPixel == 32)
+ b[0] |= 3 << 20;
+ b[0] |= ((box.x1 - pat.x) & 7) << 17;
+ b[1] = priv->gpu_bo->pitch;
+ if (sna->kgem.gen >= 40) {
+ if (priv->gpu_bo->tiling)
+ b[0] |= BLT_DST_TILED;
+ b[1] >>= 2;
+ }
+ b[1] |= blt_depth(drawable->depth) << 24;
+ b[1] |= rop << 16;
+ b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
+ b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+ priv->gpu_bo,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ I915_GEM_DOMAIN_RENDER |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[5] = gc->bgPixel;
+ b[6] = gc->fgPixel;
+
+ sna->kgem.nbatch += 7 + src_stride;
+
+ dst = (uint8_t *)&b[7];
+ bstride -= bw;
+
+ src_stride = stipple->devKind;
+ src = (uint8_t*)stipple->devPrivate.ptr;
+ src += (box.y1 - pat.y) * src_stride + bx1/8;
+ src_stride -= bw;
+ do {
+ int i = bw;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ dst += bstride;
+ src += src_stride;
+ } while (--bh);
+ } else {
+ if (!kgem_check_batch(&sna->kgem, 8) ||
+ !kgem_check_bo_fenced(&sna->kgem, priv->gpu_bo, NULL) ||
+ !kgem_check_reloc(&sna->kgem, 2)) {
+ _kgem_submit(&sna->kgem);
+ _kgem_set_mode(&sna->kgem, KGEM_BLT);
+ }
+
+ upload = kgem_create_buffer(&sna->kgem,
+ bstride*bh,
+ KGEM_BUFFER_WRITE,
+ &ptr);
+ if (!upload)
+ break;
+
+ dst = ptr;
+ bstride -= bw;
+
+ src_stride = stipple->devKind;
+ src = (uint8_t*)stipple->devPrivate.ptr;
+ src += (box.y1 - pat.y) * src_stride + bx1/8;
+ src_stride -= bw;
+ do {
+ int i = bw;
+ do {
+ *dst++ = byte_reverse(*src++);
+ } while (--i);
+ dst += bstride;
+ src += src_stride;
+ } while (--bh);
+
+ b = sna->kgem.batch + sna->kgem.nbatch;
+ b[0] = XY_MONO_SRC_COPY;
+ if (drawable->bitsPerPixel == 32)
+ b[0] |= 3 << 20;
+ b[0] |= ((box.x1 - pat.x) & 7) << 17;
+ b[1] = priv->gpu_bo->pitch;
+ if (sna->kgem.gen >= 40) {
+ if (priv->gpu_bo->tiling)
+ b[0] |= BLT_DST_TILED;
+ b[1] >>= 2;
+ }
+ b[1] |= (gc->fillStyle == FillStippled) << 29;
+ b[1] |= blt_depth(drawable->depth) << 24;
+ b[1] |= rop << 16;
+ b[2] = (box.y1 + dy) << 16 | (box.x1 + dx);
+ b[3] = (box.y2 + dy) << 16 | (box.x2 + dx);
+ b[4] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 4,
+ priv->gpu_bo,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ I915_GEM_DOMAIN_RENDER |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[5] = kgem_add_reloc(&sna->kgem, sna->kgem.nbatch + 5,
+ upload,
+ I915_GEM_DOMAIN_RENDER << 16 |
+ KGEM_RELOC_FENCED,
+ 0);
+ b[6] = gc->bgPixel;
+ b[7] = gc->fgPixel;
+
+ sna->kgem.nbatch += 8;
+ kgem_bo_destroy(&sna->kgem, upload);
+ }
} while (--n);
} else {
const BoxRec * const clip_start = RegionBoxptr(&clip);
commit 73b2ef5a7de8d733fa1821e5010480ede305e25e
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Wed Nov 2 22:02:51 2011 +0000
sna: gc->pCompositeClip always exists after validate
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 9b9c7cc..f88222f 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -111,7 +111,7 @@ static inline void region_set(RegionRec *r, const BoxRec *b)
static inline void region_maybe_clip(RegionRec *r, RegionRec *clip)
{
- if (clip && clip->data)
+ if (clip->data)
RegionIntersect(r, r, clip);
}
@@ -1079,9 +1079,6 @@ static inline bool clip_box(BoxPtr box, GCPtr gc)
const BoxRec *clip;
bool clipped;
- if (!gc->pCompositeClip)
- return false;
-
clip = &gc->pCompositeClip->extents;
clipped = gc->pCompositeClip->data != NULL;
@@ -2202,8 +2199,7 @@ sna_copy_area(DrawablePtr src, DrawablePtr dst, GCPtr gc,
region.extents.x2 = region.extents.x1 + width;
region.extents.y2 = region.extents.y1 + height;
region.data = NULL;
- if (gc->pCompositeClip)
- RegionIntersect(®ion, ®ion, gc->pCompositeClip);
+ RegionIntersect(®ion, ®ion, gc->pCompositeClip);
if (!RegionNotEmpty(®ion))
return NULL;
More information about the xorg-commit
mailing list