xf86-video-intel: 4 commits - src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_blt.c src/sna/sna_damage.h src/sna/sna_io.c src/sna/sna_render.h
Chris Wilson
ickle at kemper.freedesktop.org
Sun Dec 25 03:54:38 PST 2011
src/sna/kgem.h | 16 +++++++++++++
src/sna/sna_accel.c | 61 +++++++++++++++++++++++++++++++++++----------------
src/sna/sna_blt.c | 16 -------------
src/sna/sna_damage.h | 7 +++--
src/sna/sna_io.c | 16 ++-----------
src/sna/sna_render.h | 27 +++++++++++++++++++++-
6 files changed, 93 insertions(+), 50 deletions(-)
New commits:
commit f7593a995a829978ce81397dde5c9ea6f9bb7681
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Dec 25 09:40:46 2011 +0000
sna: Move the is-mappable check into the callers of region_inplace()
As they slightly differ in their requirements.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 3e69be5..7f2ebaf 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -361,6 +361,21 @@ static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
return bo->rq;
}
+static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
+{
+ DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n",
+ __FUNCTION__, bo->handle,
+ bo->domain, bo->presumed_offset, bo->size));
+
+ if (kgem_bo_is_busy(bo))
+ return true;
+
+ if (bo->presumed_offset == 0)
+ return !list_is_empty(&kgem->requests);
+
+ return !kgem_bo_is_mappable(kgem, bo);
+}
+
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
{
if (bo == NULL)
@@ -370,6 +385,7 @@ static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
bo = bo->proxy;
return bo->dirty;
}
+
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
{
if (bo->proxy)
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 8a5bf7d..449bc72 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -303,7 +303,7 @@ static inline uint32_t default_tiling(PixmapPtr pixmap)
pixmap->drawable.height) ? I915_TILING_Y : sna->default_tiling;
}
-static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap)
+constant static uint32_t sna_pixmap_choose_tiling(PixmapPtr pixmap)
{
struct sna *sna = to_sna_from_pixmap(pixmap);
uint32_t tiling = default_tiling(pixmap);
@@ -814,15 +814,30 @@ static inline bool region_inplace(struct sna *sna,
if (priv->mapped)
return true;
- if (priv->gpu_bo && !kgem_bo_is_mappable(&sna->kgem, priv->gpu_bo))
- return false;
-
return ((region->extents.x2 - region->extents.x1) *
(region->extents.y2 - region->extents.y1) *
pixmap->drawable.bitsPerPixel >> 12)
>= sna->kgem.half_cpu_cache_pages;
}
+static bool
+sna_pixmap_create_mappable_gpu(PixmapPtr pixmap)
+{
+ struct sna *sna = to_sna_from_pixmap(pixmap);
+ struct sna_pixmap *priv = sna_pixmap(pixmap);;
+
+ assert(priv->gpu_bo == NULL);
+ priv->gpu_bo =
+ kgem_create_2d(&sna->kgem,
+ pixmap->drawable.width,
+ pixmap->drawable.height,
+ pixmap->drawable.bitsPerPixel,
+ sna_pixmap_choose_tiling(pixmap),
+ CREATE_GTT_MAP | CREATE_INACTIVE);
+
+ return priv->gpu_bo != NULL;
+}
+
bool
sna_drawable_move_region_to_cpu(DrawablePtr drawable,
RegionPtr region,
@@ -866,14 +881,20 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
priv->gpu_bo->exec == NULL)
kgem_retire(&sna->kgem);
- if (!sync_will_stall(priv->gpu_bo)) {
+ if (!kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)) {
pixmap->devPrivate.ptr =
kgem_bo_map(&sna->kgem, priv->gpu_bo,
PROT_WRITE);
priv->mapped = 1;
sna_damage_subtract(&priv->cpu_damage, region);
- sna_damage_add(&priv->gpu_damage, region);
+ if (priv->cpu_damage == NULL)
+ sna_damage_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height);
+ else
+ sna_damage_add(&priv->gpu_damage,
+ region);
priv->gpu = true;
return true;
@@ -894,19 +915,23 @@ sna_drawable_move_region_to_cpu(DrawablePtr drawable,
if (priv->gpu_bo == NULL &&
sna_pixmap_choose_tiling(pixmap) != I915_TILING_NONE &&
- region_inplace(sna, pixmap, region, priv)) {
- sna_damage_subtract(&priv->cpu_damage, region);
- if (sna_pixmap_move_to_gpu(pixmap, MOVE_WRITE)) {
- pixmap->devPrivate.ptr =
- kgem_bo_map(&sna->kgem, priv->gpu_bo,
- PROT_WRITE);
- priv->mapped = 1;
+ region_inplace(sna, pixmap, region, priv) &&
+ sna_pixmap_create_mappable_gpu(pixmap)) {
+ pixmap->devPrivate.ptr =
+ kgem_bo_map(&sna->kgem, priv->gpu_bo,
+ PROT_WRITE);
+ priv->mapped = 1;
+ sna_damage_subtract(&priv->cpu_damage, region);
+ if (priv->cpu_damage == NULL)
+ sna_damage_all(&priv->gpu_damage,
+ pixmap->drawable.width,
+ pixmap->drawable.height);
+ else
sna_damage_add(&priv->gpu_damage, region);
- priv->gpu = true;
- return true;
- }
+ priv->gpu = true;
+ return true;
}
}
@@ -1752,7 +1777,7 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
if ((priv->flush ||
(priv->gpu_bo &&
region_inplace(sna, pixmap, region, priv) &&
- !kgem_bo_is_busy(priv->gpu_bo))) &&
+ !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))) &&
sna_put_image_upload_blt(drawable, gc, region,
x, y, w, h, bits, stride)) {
if (region_subsumes_drawable(region, &pixmap->drawable)) {
diff --git a/src/sna/sna_io.c b/src/sna/sna_io.c
index 15fe42c..489c1cc 100644
--- a/src/sna/sna_io.c
+++ b/src/sna/sna_io.c
@@ -82,17 +82,6 @@ static void read_boxes_inplace(struct kgem *kgem,
} while (--n);
}
-static bool map_will_stall(struct kgem *kgem, struct kgem_bo *bo)
-{
- if (kgem_bo_is_busy(bo))
- return true;
-
- if (!kgem_bo_is_mappable(kgem, bo))
- return true;
-
- return false;
-}
-
void sna_read_boxes(struct sna *sna,
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy,
PixmapPtr dst, int16_t dst_dx, int16_t dst_dy,
@@ -119,7 +108,8 @@ void sna_read_boxes(struct sna *sna,
return;
}
- if (src_bo->tiling != I915_TILING_X && !map_will_stall(kgem, src_bo)){
+ if (src_bo->tiling != I915_TILING_X &&
+ !kgem_bo_map_will_stall(kgem, src_bo)) {
read_boxes_inplace(kgem,
src_bo, src_dx, src_dy,
dst, dst_dx, dst_dy,
@@ -314,7 +304,7 @@ void sna_write_boxes(struct sna *sna,
DBG(("%s x %d\n", __FUNCTION__, nbox));
if (DEBUG_NO_IO || kgem->wedged || dst_bo->tiling == I915_TILING_Y ||
- !map_will_stall(kgem, dst_bo)) {
+ !kgem_bo_map_will_stall(kgem, dst_bo)) {
write_boxes_inplace(kgem,
src, stride, bpp, src_dx, src_dy,
dst_bo, dst_dx, dst_dy,
commit aff32e3e08fce3c7b8dab3a25c96d69c409471fc
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Dec 25 00:36:33 2011 +0000
sna: Tweak damage not to reduce if it will not affect the outcome of reducing to all
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_damage.h b/src/sna/sna_damage.h
index d74d5de..67c44c3 100644
--- a/src/sna/sna_damage.h
+++ b/src/sna/sna_damage.h
@@ -147,14 +147,15 @@ static inline void sna_damage_reduce_all(struct sna_damage **damage,
if (*damage == NULL)
return;
- if ((*damage)->dirty && (*damage = _sna_damage_reduce(*damage)) == NULL)
- return;
-
if ((*damage)->mode == DAMAGE_ADD &&
(*damage)->extents.x1 <= 0 &&
(*damage)->extents.y1 <= 0 &&
(*damage)->extents.x2 >= width &&
(*damage)->extents.y2 >= height) {
+ if ((*damage)->dirty &&
+ (*damage = _sna_damage_reduce(*damage)) == NULL)
+ return;
+
if ((*damage)->region.data == NULL)
*damage = _sna_damage_all(*damage, width, height);
}
commit be21a09fef0a3499153f2c123b65f1d5c872d437
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Dec 25 00:28:55 2011 +0000
sna: Only call move-to-gpu on scanout flush if we have cpu damage to move
Reduce the number of redundant calls.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 284c489..8a5bf7d 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -9110,7 +9110,7 @@ static bool sna_accel_flush(struct sna *sna)
if (nothing_to_do && !sna->kgem.busy)
_sna_accel_disarm_timer(sna, FLUSH_TIMER);
- else
+ if (priv->cpu_damage)
sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ);
sna->kgem.busy = !nothing_to_do;
kgem_bo_flush(&sna->kgem, priv->gpu_bo);
commit 7b69dc88094ee2bb017364cd4356f3ae69cbc1f1
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Dec 25 00:21:21 2011 +0000
sna: Inline the common portion of sna_get_pixel_from_rgba()
The function overhead completely dominates for the common case.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_blt.c b/src/sna/sna_blt.c
index 053b357..220b900 100644
--- a/src/sna/sna_blt.c
+++ b/src/sna/sna_blt.c
@@ -455,7 +455,7 @@ get_rgba_from_pixel(uint32_t pixel,
}
Bool
-sna_get_pixel_from_rgba(uint32_t * pixel,
+_sna_get_pixel_from_rgba(uint32_t * pixel,
uint16_t red,
uint16_t green,
uint16_t blue,
@@ -465,20 +465,6 @@ sna_get_pixel_from_rgba(uint32_t * pixel,
int rbits, bbits, gbits, abits;
int rshift, bshift, gshift, ashift;
- switch (format) {
- case PICT_x8r8g8b8:
- alpha = 0xffff;
- case PICT_a8r8g8b8:
- *pixel = ((alpha >> 8 << 24) |
- (red >> 8 << 16) |
- (green & 0xff00) |
- (blue >> 8));
- return TRUE;
- case PICT_a8:
- *pixel = alpha >> 8;
- return TRUE;
- }
-
rbits = PICT_FORMAT_R(format);
gbits = PICT_FORMAT_G(format);
bbits = PICT_FORMAT_B(format);
diff --git a/src/sna/sna_render.h b/src/sna/sna_render.h
index bebbed2..d5c7b2e 100644
--- a/src/sna/sna_render.h
+++ b/src/sna/sna_render.h
@@ -527,13 +527,38 @@ Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu,
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy,
const BoxRec *box, int nbox);
-Bool sna_get_pixel_from_rgba(uint32_t *pixel,
+Bool _sna_get_pixel_from_rgba(uint32_t *pixel,
uint16_t red,
uint16_t green,
uint16_t blue,
uint16_t alpha,
uint32_t format);
+static inline Bool
+sna_get_pixel_from_rgba(uint32_t * pixel,
+ uint16_t red,
+ uint16_t green,
+ uint16_t blue,
+ uint16_t alpha,
+ uint32_t format)
+{
+ switch (format) {
+ case PICT_x8r8g8b8:
+ alpha = 0xffff;
+ case PICT_a8r8g8b8:
+ *pixel = ((alpha >> 8 << 24) |
+ (red >> 8 << 16) |
+ (green & 0xff00) |
+ (blue >> 8));
+ return TRUE;
+ case PICT_a8:
+ *pixel = alpha >> 8;
+ return TRUE;
+ }
+
+ return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format);
+}
+
int
sna_render_pixmap_bo(struct sna *sna,
struct sna_composite_channel *channel,
More information about the xorg-commit
mailing list