[PATCH 2/4] drm/etnaviv: move dependency handling to scheduler

Lucas Stach l.stach at pengutronix.de
Wed Dec 6 17:10:50 UTC 2017


Move the fence dependency handling to the scheduler where it belongs.
Jobs with unsignaled dependencies just get to sit in the scheduler queue
without holding any locks.

Signed-off-by: Lucas Stach <l.stach at pengutronix.de>
---
 drivers/gpu/drm/etnaviv/etnaviv_gem.h        |  3 ++
 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 37 +++++++++++----------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.c        | 48 ----------------------------
 drivers/gpu/drm/etnaviv/etnaviv_gpu.h        |  3 --
 drivers/gpu/drm/etnaviv/etnaviv_sched.c      | 45 ++++++++++++++++++++++++++
 5 files changed, 69 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index ae352f2a77f9..93e696fcc14f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -94,6 +94,9 @@ struct etnaviv_gem_submit_bo {
 	u32 flags;
 	struct etnaviv_gem_object *obj;
 	struct etnaviv_vram_mapping *mapping;
+	struct dma_fence *excl;
+	unsigned int nr_shared;
+	struct dma_fence **shared;
 };
 
 /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 0bc89e4daade..ae0bf3e94580 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -170,29 +170,34 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
 	return ret;
 }
 
-static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+static int submit_fence_sync(struct etnaviv_gem_submit *submit)
 {
-	unsigned int context = submit->gpu->fence_context;
 	int i, ret = 0;
 
 	for (i = 0; i < submit->nr_bos; i++) {
-		struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+		struct reservation_object *robj = bo->obj->resv;
 		bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
-		bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
 
-		ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
-						 explicit);
-		if (ret)
-			break;
-	}
+		if (!write) {
+			ret = reservation_object_reserve_shared(robj);
+			if (ret)
+				return ret;
+		}
+
+		if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
+			return 0;
+
+		if (write) {
+			ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+								&bo->nr_shared,
+								&bo->shared);
+			if (ret)
+				return ret;
+		} else {
+			submit->bos[i].excl = reservation_object_get_excl_rcu(robj);
+		}
 
-	if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
-		/*
-		 * Wait if the fence is from a foreign context, or if the fence
-		 * array contains any fence from a foreign context.
-		 */
-		if (!dma_fence_match_context(submit->in_fence, context))
-			ret = dma_fence_wait(submit->in_fence, true);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index bfa54abbbdd1..d5d5cfab8477 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1078,54 +1078,6 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
 	return &f->base;
 }
 
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-	unsigned int context, bool exclusive, bool explicit)
-{
-	struct reservation_object *robj = etnaviv_obj->resv;
-	struct reservation_object_list *fobj;
-	struct dma_fence *fence;
-	int i, ret;
-
-	if (!exclusive) {
-		ret = reservation_object_reserve_shared(robj);
-		if (ret)
-			return ret;
-	}
-
-	if (explicit)
-		return 0;
-
-	/*
-	 * If we have any shared fences, then the exclusive fence
-	 * should be ignored as it will already have been signalled.
-	 */
-	fobj = reservation_object_get_list(robj);
-	if (!fobj || fobj->shared_count == 0) {
-		/* Wait on any existing exclusive fence which isn't our own */
-		fence = reservation_object_get_excl(robj);
-		if (fence && fence->context != context) {
-			ret = dma_fence_wait(fence, true);
-			if (ret)
-				return ret;
-		}
-	}
-
-	if (!exclusive || !fobj)
-		return 0;
-
-	for (i = 0; i < fobj->shared_count; i++) {
-		fence = rcu_dereference_protected(fobj->shared[i],
-						reservation_object_held(robj));
-		if (fence->context != context) {
-			ret = dma_fence_wait(fence, true);
-			if (ret)
-				return ret;
-		}
-	}
-
-	return 0;
-}
-
 /*
  * event management:
  */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 02f7ffa34f3b..f5c6dbe026d6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -188,9 +188,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
 #endif
 
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
-	unsigned int context, bool exclusive, bool implicit);
-
 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
 	u32 fence, struct timespec *timeout);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 143c3eca80b0..bb68c57e714f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -35,6 +35,51 @@ struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
 struct dma_fence *etnaviv_sched_dependency(struct drm_sched_job *sched_job,
 					   struct drm_sched_entity *entity)
 {
+	struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+	struct dma_fence *fence;
+	int i;
+
+	if (unlikely(submit->in_fence)) {
+		fence = submit->in_fence;
+
+		if (!dma_fence_is_signaled(fence))
+			return fence;
+
+		dma_fence_put(fence);
+		submit->in_fence = NULL;
+	}
+
+	for (i = 0; i < submit->nr_bos; i++) {
+		struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+		int j;
+
+		if (bo->excl) {
+			fence = bo->excl;
+			bo->excl = NULL;
+
+			if (!dma_fence_is_signaled(fence))
+				return fence;
+
+			dma_fence_put(fence);
+		}
+
+		for (j = 0; j < bo->nr_shared; j++) {
+			if (!bo->shared[j])
+				continue;
+
+			fence = bo->shared[j];
+			bo->shared[j] = NULL;
+
+			if (!dma_fence_is_signaled(fence))
+				return fence;
+
+			dma_fence_put(fence);
+		}
+		kfree(bo->shared);
+		bo->nr_shared = 0;
+		bo->shared = NULL;
+	}
+
 	return NULL;
 }
 
-- 
2.11.0



More information about the dri-devel mailing list