xf86-video-intel: 3 commits - src/sna/kgem.c
Chris Wilson
ickle at kemper.freedesktop.org
Fri Aug 7 04:04:01 PDT 2015
src/sna/kgem.c | 35 +++++++++++++++++++++--------------
1 file changed, 21 insertions(+), 14 deletions(-)
New commits:
commit 672436efd5b69fb9007cb80804a351b1e1572b60
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Fri Aug 7 12:03:34 2015 +0100
sna: Treat being wedged as unlikely during rendering with HW
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6873a18..bc393ff 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1518,7 +1518,7 @@ static bool kgem_init_pinned_batches(struct kgem *kgem)
int ret = 0;
int n, i;
- if (kgem->wedged)
+ if (unlikely(kgem->wedged))
return true;
for (n = 0; n < ARRAY_SIZE(count); n++) {
@@ -4127,7 +4127,7 @@ void _kgem_submit(struct kgem *kgem)
kgem_commit(kgem);
}
- if (kgem->wedged)
+ if (unlikely(kgem->wedged))
kgem_cleanup(kgem);
kgem_reset(kgem);
@@ -4137,7 +4137,7 @@ void _kgem_submit(struct kgem *kgem)
void kgem_throttle(struct kgem *kgem)
{
- if (kgem->wedged)
+ if (unlikely(kgem->wedged))
return;
if (__kgem_throttle(kgem, true)) {
@@ -4274,7 +4274,7 @@ bool kgem_expire_cache(struct kgem *kgem)
#endif
kgem_retire(kgem);
- if (kgem->wedged)
+ if (unlikely(kgem->wedged))
kgem_cleanup(kgem);
kgem->expire(kgem);
commit ccc553ff034534233f08ce306d4c4911059337c6
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Fri Aug 7 12:01:38 2015 +0100
sna: Stop allocating requests once wedged
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 6ebca96..6873a18 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1005,13 +1005,17 @@ static struct kgem_request *__kgem_request_alloc(struct kgem *kgem)
{
struct kgem_request *rq;
- rq = __kgem_freed_request;
- if (rq) {
- __kgem_freed_request = *(struct kgem_request **)rq;
+ if (unlikely(kgem->wedged)) {
+ rq = &kgem->static_request;
} else {
- rq = malloc(sizeof(*rq));
- if (rq == NULL)
- rq = &kgem->static_request;
+ rq = __kgem_freed_request;
+ if (rq) {
+ __kgem_freed_request = *(struct kgem_request **)rq;
+ } else {
+ rq = malloc(sizeof(*rq));
+ if (rq == NULL)
+ rq = &kgem->static_request;
+ }
}
list_init(&rq->buffers);
commit b0f125495caced05548442bc2fe64a4b1b46339c
Author: Chris Wilson <chris at chris-wilson.co.uk>
Date: Fri Aug 7 11:57:27 2015 +0100
sna: Tweak DBG traces for cache cleanup
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
diff --git a/src/sna/kgem.c b/src/sna/kgem.c
index 014a31b..6ebca96 100644
--- a/src/sna/kgem.c
+++ b/src/sna/kgem.c
@@ -1704,6 +1704,7 @@ static int kgem_bo_wait(struct kgem *kgem, struct kgem_bo *bo)
struct drm_i915_gem_wait wait;
int ret;
+ DBG(("%s: waiting for handle=%d\n", __FUNCTION__, bo->handle));
if (bo->rq == NULL)
return 0;
@@ -1756,14 +1757,12 @@ restart:
if (kgem->batch_bo)
kgem->batch = kgem_bo_map__cpu(kgem, kgem->batch_bo);
if (kgem->batch == NULL) {
- DBG(("%s: unable to map batch bo, mallocing(size=%d)\n",
- __FUNCTION__,
- sizeof(uint32_t)*kgem->batch_size));
if (kgem->batch_bo) {
kgem_bo_destroy(kgem, kgem->batch_bo);
kgem->batch_bo = NULL;
}
+ assert(kgem->ring < ARRAY_SIZE(kgem->requests));
if (!list_is_empty(&kgem->requests[kgem->ring])) {
struct kgem_request *rq;
@@ -1773,6 +1772,8 @@ restart:
goto restart;
}
+ DBG(("%s: unable to map batch bo, mallocing(size=%d)\n",
+ __FUNCTION__, sizeof(uint32_t)*kgem->batch_size));
if (posix_memalign((void **)&kgem->batch, PAGE_SIZE,
ALIGN(sizeof(uint32_t) * kgem->batch_size, PAGE_SIZE))) {
ERR(("%s: batch allocation failed, disabling acceleration\n", __FUNCTION__));
@@ -2666,7 +2667,6 @@ static void kgem_bo_move_to_scanout(struct kgem *kgem, struct kgem_bo *bo)
list_move(&bo->list, &kgem->scanout);
kgem->need_expire = true;
-
}
static void kgem_bo_move_to_snoop(struct kgem *kgem, struct kgem_bo *bo)
@@ -3101,6 +3101,7 @@ static bool kgem_retire__requests_ring(struct kgem *kgem, int ring)
{
bool retired = false;
+ assert(ring < ARRAY_SIZE(kgem->requests));
while (!list_is_empty(&kgem->requests[ring])) {
struct kgem_request *rq;
@@ -3980,6 +3981,7 @@ void _kgem_submit(struct kgem *kgem)
assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc));
assert(kgem->nexec < ARRAY_SIZE(kgem->exec));
assert(kgem->nfence <= kgem->fence_max);
+ assert(kgem->ring < ARRAY_SIZE(kgem->requests));
kgem_finish_buffers(kgem);
@@ -6001,6 +6003,7 @@ inline static bool nearly_idle(struct kgem *kgem)
{
int ring = kgem->ring == KGEM_BLT;
+ assert(ring < ARRAY_SIZE(kgem->requests));
if (list_is_singular(&kgem->requests[ring]))
return true;
More information about the xorg-commit
mailing list