xf86-video-intel: 4 commits - src/sna/gen2_render.c src/sna/gen3_render.c src/sna/kgem.c src/sna/kgem.h src/sna/sna_accel.c src/sna/sna_render_inline.h
Chris Wilson
ickle at kemper.freedesktop.org
Sun Dec 18 16:41:04 PST 2011
src/sna/gen2_render.c | 17 +++++++++++++----
src/sna/gen3_render.c | 17 +++++++++++++----
src/sna/kgem.c | 6 ++++++
src/sna/kgem.h | 9 +++++++--
src/sna/sna_accel.c | 7 ++++++-
src/sna/sna_render_inline.h | 2 +-
6 files changed, 46 insertions(+), 12 deletions(-)
New commits:
commit 1fa5721f064a8d1f34e4032b52f24597f4015313
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Dec 19 00:37:03 2011 +0000
sna: Reset the GTT mapping flag when freeing the shadow pointers
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index 69bbc54..e00f620 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -242,6 +242,7 @@ static void sna_pixmap_free_cpu(struct sna *sna, struct sna_pixmap *priv)
priv->pixmap->devPrivate.ptr = priv->ptr = NULL;
list_del(&priv->list);
+ priv->mapped = 0;
}
static Bool sna_destroy_private(PixmapPtr pixmap, struct sna_pixmap *priv)
commit 7326d3098662688b8040c6e1261064caed1f5d06
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Dec 19 00:35:14 2011 +0000
sna: Restore CPU shadow after a GTT memory
When mixing operations and switching between GTT and CPU mappings we
need to restore the original CPU shadow rather than accidentally
overwrite.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/sna_accel.c b/src/sna/sna_accel.c
index c44dcc1..69bbc54 100644
--- a/src/sna/sna_accel.c
+++ b/src/sna/sna_accel.c
@@ -190,7 +190,10 @@ sna_pixmap_alloc_cpu(struct sna *sna,
struct sna_pixmap *priv,
bool from_gpu)
{
- assert(priv->ptr == NULL);
+ /* Restore after a GTT mapping? */
+ if (priv->ptr)
+ goto done;
+
assert(pixmap->devKind);
if (!DEBUG_NO_LLC && sna->kgem.gen >= 60) {
@@ -219,6 +222,7 @@ sna_pixmap_alloc_cpu(struct sna *sna,
priv->ptr = malloc(pixmap->devKind * pixmap->drawable.height);
assert(priv->ptr);
+done:
pixmap->devPrivate.ptr = priv->ptr;
return priv->ptr != NULL;
}
commit ae32aaf4b20dafef138dc9c28dbddbfe49f24b83
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Mon Dec 19 00:34:12 2011 +0000
sna/gen[23]: We need to check the batch before doing an inline flush
A missing check before emitting a dword into the batch opened up the
possibility of overflowing the batch and corrupting our state.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/gen2_render.c b/src/sna/gen2_render.c
index c8d093b..373866b 100644
--- a/src/sna/gen2_render.c
+++ b/src/sna/gen2_render.c
@@ -499,14 +499,22 @@ static void gen2_emit_invariant(struct sna *sna)
sna->render_state.gen2.need_invariant = FALSE;
}
+static bool
+gen2_check_batch(struct sna *sna)
+{
+ return (kgem_check_batch(&sna->kgem, 30+40) &&
+ kgem_check_reloc(&sna->kgem, 3) &&
+ kgem_check_exec(&sna->kgem, 3));
+}
+
static void
gen2_get_batch(struct sna *sna)
{
kgem_set_mode(&sna->kgem, KGEM_RENDER);
- if (!kgem_check_batch(&sna->kgem, 28+40)) {
+ if (!kgem_check_batch(&sna->kgem, 30+40)) {
DBG(("%s: flushing batch: size %d > %d\n",
- __FUNCTION__, 28+40,
+ __FUNCTION__, 30+40,
sna->kgem.surface-sna->kgem.nbatch));
kgem_submit(&sna->kgem);
}
@@ -1537,12 +1545,13 @@ gen2_render_composite(struct sna *sna,
if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
if (tmp->src.bo == tmp->dst.bo || tmp->mask.bo == tmp->dst.bo) {
kgem_emit_flush(&sna->kgem);
- } else {
+ } else if (gen2_check_batch(sna)) {
BATCH(_3DSTATE_MODES_5_CMD |
PIPELINE_FLUSH_RENDER_CACHE |
PIPELINE_FLUSH_TEXTURE_CACHE);
kgem_clear_dirty(&sna->kgem);
- }
+ } else
+ kgem_submit(&sna->kgem);
}
gen2_emit_composite_state(sna, tmp);
diff --git a/src/sna/gen3_render.c b/src/sna/gen3_render.c
index 96c0d9b..fe3a359 100644
--- a/src/sna/gen3_render.c
+++ b/src/sna/gen3_render.c
@@ -1198,11 +1198,19 @@ static void gen3_emit_invariant(struct sna *sna)
sna->render_state.gen3.need_invariant = FALSE;
}
+#define MAX_OBJECTS 3 /* worst case: dst + src + mask */
+
+static bool
+gen3_check_batch(struct sna *sna)
+{
+ return (kgem_check_batch(&sna->kgem, 200) &&
+ kgem_check_reloc(&sna->kgem, MAX_OBJECTS) &&
+ kgem_check_exec(&sna->kgem, MAX_OBJECTS));
+}
+
static void
gen3_get_batch(struct sna *sna)
{
-#define MAX_OBJECTS 3 /* worst case: dst + src + mask */
-
kgem_set_mode(&sna->kgem, KGEM_RENDER);
if (!kgem_check_batch(&sna->kgem, 200)) {
@@ -2619,12 +2627,13 @@ gen3_render_composite(struct sna *sna,
if (kgem_bo_is_dirty(tmp->src.bo) || kgem_bo_is_dirty(tmp->mask.bo)) {
if (tmp->src.bo == tmp->dst.bo || tmp->mask.bo == tmp->dst.bo) {
kgem_emit_flush(&sna->kgem);
- } else {
+ } else if (gen3_check_batch(sna)) {
OUT_BATCH(_3DSTATE_MODES_5_CMD |
PIPELINE_FLUSH_RENDER_CACHE |
PIPELINE_FLUSH_TEXTURE_CACHE);
kgem_clear_dirty(&sna->kgem);
- }
+ } else
+ kgem_submit(&sna->kgem);
}
gen3_emit_composite_state(sna, tmp);
diff --git a/src/sna/kgem.h b/src/sna/kgem.h
index 4e30d58..efc4e80 100644
--- a/src/sna/kgem.h
+++ b/src/sna/kgem.h
@@ -277,9 +277,14 @@ static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
}
-static inline bool kgem_check_reloc(struct kgem *kgem, int num_reloc)
+static inline bool kgem_check_reloc(struct kgem *kgem, int n)
{
- return likely(kgem->nreloc + num_reloc <= KGEM_RELOC_SIZE(kgem));
+ return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
+}
+
+static inline bool kgem_check_exec(struct kgem *kgem, int n)
+{
+ return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
}
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
diff --git a/src/sna/sna_render_inline.h b/src/sna/sna_render_inline.h
index daa8f6f..758833e 100644
--- a/src/sna/sna_render_inline.h
+++ b/src/sna/sna_render_inline.h
@@ -52,7 +52,7 @@ static inline int batch_space(struct sna *sna)
static inline void batch_emit(struct sna *sna, uint32_t dword)
{
- assert(sna->kgem.nbatch < sna->kgem.surface);
+ assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface);
sna->kgem.batch[sna->kgem.nbatch++] = dword;
}
commit e32ad646762ccc7f22f938454e222d43abfb38ed
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Sun Dec 18 23:42:07 2011 +0000
sna: Continue searching the linear lists for CPU mappings
Prefer to reuse an available CPU mapping which are considered precious
and reaped if we keep too many unused entries availabled.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index c0034d5..41aded6 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1483,6 +1483,12 @@ search_linear_cache(struct kgem *kgem, unsigned int size, unsigned flags)
first = bo;
continue;
}
+ } else {
+ if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) {
+ if (first == NULL)
+ first = bo;
+ continue;
+ }
}
if (I915_TILING_NONE != bo->tiling) {
More information about the xorg-commit
mailing list