xf86-video-intel: 7 commits - src/sna/blt.c src/sna/kgem.c src/sna/sna_accel.c src/sna/sna_damage.c
Chris Wilson
ickle at kemper.freedesktop.org
Sun Jan 1 13:16:46 PST 2012
src/sna/blt.c | 4 -
src/sna/kgem.c | 23 +++++++---
src/sna/sna_accel.c | 11 ----
src/sna/sna_damage.c | 113 ++++++++++++++++++++++++++++++++++-----------------
4 files changed, 97 insertions(+), 54 deletions(-)
New commits:
commit 777fcc9b11e8cbfe942aa6bf898749f552acb3cf
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 16:03:42 2012 +0000
sna: Prefer not to force the creation of gpu bo for pixmap uploads
As this causes a significant regression when benchmarking firefox on SNB
with firefox-planet-gnome if we already have CPU buffers.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index d66a67a..0b9565c 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -1820,7 +1820,8 @@ sna_put_zpixmap_blt(DrawablePtr drawable, GCPtr gc, RegionPtr region,
*/
if ((priv->flush ||
(region_inplace(sna, pixmap, region, priv) &&
- (priv->gpu_bo == NULL || !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo)))) &&
+ ((priv->gpu_bo == NULL && priv->cpu_bo == NULL) ||
+ (priv->gpu_bo != NULL && !kgem_bo_map_will_stall(&sna->kgem, priv->gpu_bo))))) &&
sna_put_image_upload_blt(drawable, gc, region,
x, y, w, h, bits, stride)) {
if (region_subsumes_drawable(region, &pixmap->drawable)) {
commit 1a6c16a24cb74f82df9757c034c50f2cf141523f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 14:32:48 2012 +0000
sna: Keep a freelist of buffers
As reallocation of bo is the most frequent cause of malloc/free.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 969036e..7424a8a 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -108,6 +108,7 @@ struct kgem_partial_bo {
uint32_t write : 1;
};
+static struct kgem_bo *__kgem_freed_bo;
static struct drm_i915_gem_exec_object2 _kgem_dummy_exec;
static void kgem_sna_reset(struct kgem *kgem)
@@ -381,9 +382,14 @@ static struct kgem_bo *__kgem_bo_alloc(int handle, int size)
{
struct kgem_bo *bo;
- bo = malloc(sizeof(*bo));
- if (bo == NULL)
- return NULL;
+ if (__kgem_freed_bo) {
+ bo = __kgem_freed_bo;
+ __kgem_freed_bo = *(struct kgem_bo **)bo;
+ } else {
+ bo = malloc(sizeof(*bo));
+ if (bo == NULL)
+ return NULL;
+ }
return __kgem_bo_init(bo, handle, size);
}
@@ -747,7 +753,12 @@ static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo)
_list_del(&bo->list);
_list_del(&bo->request);
gem_close(kgem->fd, bo->handle);
- free(bo);
+
+ if (!bo->io) {
+ *(struct kgem_bo **)bo = __kgem_freed_bo;
+ __kgem_freed_bo = bo;
+ } else
+ free(bo);
}
static bool is_mmaped_buffer(struct kgem_partial_bo *bo)
commit f008a2a90c632d843f07846426f6d14af728c13b
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 10:24:49 2012 +0000
sna: Tune blt routine
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/blt.c b/src/sna/blt.c
index 1b56cc7..7a77fa4 100644
--- a/src/sna/blt.c
+++ b/src/sna/blt.c
@@ -60,8 +60,8 @@ memcpy_blt(const void *src, void *dst, int bpp,
byte_width = width * bpp;
if (byte_width == src_stride && byte_width == dst_stride) {
- memcpy(dst_bytes, src_bytes, byte_width * height);
- return;
+ byte_width *= height;
+ height = 1;
}
switch (byte_width) {
commit b57c7248acc2ad6a51b7eac74449d155defc998f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 14:10:20 2012 +0000
sna: Delete some redundant code
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 94b051e..d66a67a 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -721,8 +721,6 @@ skip_inplace_map:
sna_pixmap_free_cpu(sna, priv);
}
}
-
- sna_damage_destroy(&priv->gpu_damage);
}
if (priv->mapped) {
@@ -734,11 +732,6 @@ skip_inplace_map:
!sna_pixmap_alloc_cpu(sna, pixmap, priv, priv->gpu_damage != NULL))
return false;
- if (priv->gpu_bo == NULL) {
- DBG(("%s: no GPU bo\n", __FUNCTION__));
- goto done;
- }
-
if (priv->gpu_damage) {
BoxPtr box;
int n;
@@ -769,7 +762,6 @@ skip_inplace_map:
priv->gpu_damage = NULL;
}
-done:
if (priv->cpu_bo) {
DBG(("%s: syncing CPU bo\n", __FUNCTION__));
kgem_bo_sync__cpu(&sna->kgem, priv->cpu_bo);
commit 83d16455b70e2f0348e51f95128d4fcae3736e7f
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 13:58:44 2012 +0000
sna: Eliminate one union pass when reducing damage-add
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index d720c5c..44677ed 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -196,7 +196,7 @@ static bool _sna_damage_create_boxes(struct sna_damage *damage,
box = list_entry(damage->embedded_box.list.prev,
struct sna_damage_box,
list);
- n = 2*box->size;
+ n = 4*box->size;
if (n < count)
n = ALIGN(count, 64);
@@ -388,8 +388,8 @@ static void free_list(struct list *head)
static void __sna_damage_reduce(struct sna_damage *damage)
{
int n, nboxes;
- BoxPtr boxes;
- pixman_region16_t tmp, *region = &damage->region;
+ BoxPtr boxes, free_boxes = NULL;
+ pixman_region16_t *region = &damage->region;
struct sna_damage_box *iter;
assert(damage->mode != DAMAGE_ALL);
@@ -398,38 +398,72 @@ static void __sna_damage_reduce(struct sna_damage *damage)
DBG((" reduce: before region.n=%d\n", REGION_NUM_RECTS(region)));
nboxes = damage->embedded_box.size;
- boxes = damage->embedded_box.box;
list_for_each_entry(iter, &damage->embedded_box.list, list)
nboxes += iter->size;
DBG((" nboxes=%d, residual=%d\n", nboxes, damage->remain));
nboxes -= damage->remain;
if (nboxes == 0)
goto done;
- if (nboxes > damage->embedded_box.size) {
+ if (damage->mode == DAMAGE_ADD)
+ nboxes += REGION_NUM_RECTS(region);
+
+ iter = list_entry(damage->embedded_box.list.prev,
+ struct sna_damage_box,
+ list);
+ n = iter->size - damage->remain;
+ boxes = (BoxRec *)(iter+1);
+ if (nboxes > iter->size) {
boxes = malloc(sizeof(BoxRec)*nboxes);
if (boxes == NULL)
goto done;
- memcpy(boxes, damage->embedded_box.box, sizeof(damage->embedded_box.box));
- n = damage->embedded_box.size;
- list_for_each_entry(iter, &damage->embedded_box.list, list) {
- int len = iter->size;
- if (n + len > nboxes)
- len = nboxes - n;
- DBG((" copy %d/%d boxes from %d\n", len, iter->size, n));
- memcpy(boxes + n, iter+1, len * sizeof(BoxRec));
- n += len;
+ free_boxes = boxes;
+ }
+
+ if (boxes != damage->embedded_box.box) {
+ if (list_is_empty(&damage->embedded_box.list)) {
+ memcpy(boxes,
+ damage->embedded_box.box,
+ n*sizeof(BoxRec));
+ } else {
+ if (damage->mode == DAMAGE_ADD)
+ nboxes -= REGION_NUM_RECTS(region);
+
+ memcpy(boxes,
+ damage->embedded_box.box,
+ sizeof(damage->embedded_box.box));
+ n = damage->embedded_box.size;
+
+ list_for_each_entry(iter, &damage->embedded_box.list, list) {
+ int len = iter->size;
+ if (n + len > nboxes)
+ len = nboxes - n;
+ DBG((" copy %d/%d boxes from %d\n", len, iter->size, n));
+ memcpy(boxes + n, iter+1, len * sizeof(BoxRec));
+ n += len;
+ }
+
+ if (damage->mode == DAMAGE_ADD)
+ nboxes += REGION_NUM_RECTS(region);
}
- assert(n == nboxes);
}
- pixman_region_init_rects(&tmp, boxes, nboxes);
- if (damage->mode == DAMAGE_ADD)
- pixman_region_union(region, region, &tmp);
- else
+ if (damage->mode == DAMAGE_ADD) {
+ memcpy(boxes + n,
+ REGION_RECTS(region),
+ REGION_NUM_RECTS(region)*sizeof(BoxRec));
+ assert(n + REGION_NUM_RECTS(region) == nboxes);
+ pixman_region_fini(region);
+ pixman_region_init_rects(region, boxes, nboxes);
+ } else {
+ pixman_region16_t tmp;
+
+ pixman_region_init_rects(&tmp, boxes, nboxes);
pixman_region_subtract(region, region, &tmp);
- pixman_region_fini(&tmp);
- if (boxes != damage->embedded_box.box)
+ pixman_region_fini(&tmp);
+ }
+
+ if (free_boxes)
free(boxes);
damage->extents = region->extents;
@@ -969,6 +1003,15 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
return NULL;
}
+ if (damage->mode == DAMAGE_ALL) {
+ pixman_region_subtract(&damage->region,
+ &damage->region,
+ region);
+ damage->extents = damage->region.extents;
+ damage->mode = DAMAGE_ADD;
+ return damage;
+ }
+
if (damage->mode != DAMAGE_SUBTRACT) {
if (damage->dirty)
__sna_damage_reduce(damage);
@@ -989,7 +1032,6 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
&damage->region,
region);
damage->extents = damage->region.extents;
- damage->mode = DAMAGE_ADD; /* reduce from ALL */
return damage;
}
commit 2f5b769e7144c7b85010429e6d8aebf9c9fc1628
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Jan 1 10:00:37 2012 +0000
sna: Move some checks against programming errors to asserts
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_damage.c b/src/sna/sna_damage.c
index 4a2dd80..d720c5c 100644
--- a/src/sna/sna_damage.c
+++ b/src/sna/sna_damage.c
@@ -445,8 +445,7 @@ done:
inline static struct sna_damage *__sna_damage_add(struct sna_damage *damage,
RegionPtr region)
{
- if (!RegionNotEmpty(region))
- return damage;
+ assert(RegionNotEmpty(region));
if (!damage) {
damage = _sna_damage_create();
@@ -907,8 +906,13 @@ struct sna_damage *_sna_damage_all(struct sna_damage *damage,
struct sna_damage *_sna_damage_is_all(struct sna_damage *damage,
int width, int height)
{
- if (damage->dirty)
+ if (damage->dirty) {
__sna_damage_reduce(damage);
+ if (!RegionNotEmpty(&damage->region)) {
+ __sna_damage_destroy(damage);
+ return NULL;
+ }
+ }
if (damage->region.data)
return damage;
@@ -952,17 +956,13 @@ static struct sna_damage *__sna_damage_subtract(struct sna_damage *damage,
if (damage == NULL)
return NULL;
- if (!RegionNotEmpty(&damage->region)) {
- __sna_damage_destroy(damage);
- return NULL;
- }
-
- if (!RegionNotEmpty(region))
- return damage;
+ assert(RegionNotEmpty(region));
if (!sna_damage_maybe_contains_box(damage, ®ion->extents))
return damage;
+ assert(RegionNotEmpty(&damage->region));
+
if (region_is_singular(region) &&
box_contains(®ion->extents, &damage->extents)) {
__sna_damage_destroy(damage);
@@ -1033,14 +1033,11 @@ inline static struct sna_damage *__sna_damage_subtract_box(struct sna_damage *da
if (damage == NULL)
return NULL;
- if (!RegionNotEmpty(&damage->region)) {
- __sna_damage_destroy(damage);
- return NULL;
- }
-
if (!sna_damage_maybe_contains_box(damage, box))
return damage;
+ assert(RegionNotEmpty(&damage->region));
+
if (box_contains(box, &damage->extents)) {
__sna_damage_destroy(damage);
return NULL;
commit 24bf59c43fbedd9d85c913ac3c7f0e752c232eb4
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sat Dec 31 18:57:57 2011 +0000
Revert "sna: Increase the minimum alignment constraint for g33"
This reverts commit 2934e778f01cdf1307732b248b11a31c0e79e866. The actual
cause of the bug I was seeing on my PNV box turned out to be
a1f585a3d0a, so time to reinvestigate the alignment issues.
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 89b464e..969036e 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -559,7 +559,7 @@ static uint32_t kgem_untiled_pitch(struct kgem *kgem,
/* XXX workaround an issue on gen3 where we appear to fail to
* disable dual-stream mode */
return ALIGN(width * bpp,
- scanout || kgem->gen < 40 ? 8*64 : 8*4) >> 3;
+ scanout || (kgem->gen >= 30 && kgem->gen < 33) ? 8*64 : 8*4) >> 3;
}
static uint32_t kgem_surface_size(struct kgem *kgem,
@@ -585,7 +585,7 @@ static uint32_t kgem_surface_size(struct kgem *kgem,
} else switch (tiling) {
default:
case I915_TILING_NONE:
- tile_width = scanout || kgem->gen < 40 ? 64 : 4;
+ tile_width = scanout || kgem->gen < 33 ? 64 : 4;
tile_height = 2;
break;
case I915_TILING_X:
More information about the xorg-commit
mailing list