[PATCH 02/30] drm/i915: Rename struct i915_active_node

Chris Wilson chris at chris-wilson.co.uk
Thu Jul 9 02:55:58 UTC 2020


In preparation for the next patch, give the struct active_node a fully
qualified name.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_active.c           | 62 ++++++++++----------
 drivers/gpu/drm/i915/i915_active_types.h     |  4 +-
 drivers/gpu/drm/i915/selftests/i915_active.c |  6 +-
 3 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index d960d0be5bd2..f3ba03e74a41 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -27,17 +27,17 @@ static struct i915_global_active {
 	struct kmem_cache *slab_cache;
 } global;
 
-struct active_node {
+struct i915_active_node {
 	struct i915_active_fence base;
 	struct i915_active *ref;
 	struct rb_node node;
 	u64 timeline;
 };
 
-static inline struct active_node *
+static inline struct i915_active_node *
 node_from_active(struct i915_active_fence *active)
 {
-	return container_of(active, struct active_node, base);
+	return container_of(active, struct i915_active_node, base);
 }
 
 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
@@ -47,29 +47,29 @@ static inline bool is_barrier(const struct i915_active_fence *active)
 	return IS_ERR(rcu_access_pointer(active->fence));
 }
 
-static inline struct llist_node *barrier_to_ll(struct active_node *node)
+static inline struct llist_node *barrier_to_ll(struct i915_active_node *node)
 {
 	GEM_BUG_ON(!is_barrier(&node->base));
 	return (struct llist_node *)&node->base.cb.node;
 }
 
 static inline struct intel_engine_cs *
-__barrier_to_engine(struct active_node *node)
+__barrier_to_engine(struct i915_active_node *node)
 {
 	return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
 }
 
 static inline struct intel_engine_cs *
-barrier_to_engine(struct active_node *node)
+barrier_to_engine(struct i915_active_node *node)
 {
 	GEM_BUG_ON(!is_barrier(&node->base));
 	return __barrier_to_engine(node);
 }
 
-static inline struct active_node *barrier_from_ll(struct llist_node *x)
+static inline struct i915_active_node *barrier_from_ll(struct llist_node *x)
 {
 	return container_of((struct list_head *)x,
-			    struct active_node, base.cb.node);
+			    struct i915_active_node, base.cb.node);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
@@ -128,7 +128,7 @@ static inline void debug_active_assert(struct i915_active *ref) { }
 static void
 __active_retire(struct i915_active *ref)
 {
-	struct active_node *it, *n;
+	struct i915_active_node *it, *n;
 	struct rb_root root;
 	unsigned long flags;
 
@@ -206,7 +206,7 @@ static void
 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 {
 	if (active_fence_cb(fence, cb))
-		active_retire(container_of(cb, struct active_node, base.cb)->ref);
+		active_retire(container_of(cb, struct i915_active_node, base.cb)->ref);
 }
 
 static void
@@ -219,7 +219,7 @@ excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
 static struct i915_active_fence *
 active_instance(struct i915_active *ref, struct intel_timeline *tl)
 {
-	struct active_node *node, *prealloc;
+	struct i915_active_node *node, *prealloc;
 	struct rb_node **p, *parent;
 	u64 idx = tl->fence_context;
 
@@ -247,7 +247,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl)
 	while (*p) {
 		parent = *p;
 
-		node = rb_entry(parent, struct active_node, node);
+		node = rb_entry(parent, struct i915_active_node, node);
 		if (node->timeline == idx) {
 			kmem_cache_free(global.slab_cache, prealloc);
 			goto out;
@@ -306,7 +306,7 @@ void __i915_active_init(struct i915_active *ref,
 }
 
 static bool ____active_del_barrier(struct i915_active *ref,
-				   struct active_node *node,
+				   struct i915_active_node *node,
 				   struct intel_engine_cs *engine)
 
 {
@@ -322,7 +322,7 @@ static bool ____active_del_barrier(struct i915_active *ref,
 	 * which case either we or they will be upset :)
 	 *
 	 * A second __active_del_barrier() will report failure to claim
-	 * the active_node and the caller will just shrug and know not to
+	 * the i915_active_node and the caller will just shrug and know not to
 	 * claim ownership of its node.
 	 *
 	 * A concurrent i915_request_add_active_barriers() will miss adding
@@ -348,7 +348,7 @@ static bool ____active_del_barrier(struct i915_active *ref,
 }
 
 static bool
-__active_del_barrier(struct i915_active *ref, struct active_node *node)
+__active_del_barrier(struct i915_active *ref, struct i915_active_node *node)
 {
 	return ____active_del_barrier(ref, node, barrier_to_engine(node));
 }
@@ -464,7 +464,7 @@ static void enable_signaling(struct i915_active_fence *active)
 	dma_fence_put(fence);
 }
 
-static int flush_barrier(struct active_node *it)
+static int flush_barrier(struct i915_active_node *it)
 {
 	struct intel_engine_cs *engine;
 
@@ -481,7 +481,7 @@ static int flush_barrier(struct active_node *it)
 
 static int flush_lazy_signals(struct i915_active *ref)
 {
-	struct active_node *it, *n;
+	struct i915_active_node *it, *n;
 	int err = 0;
 
 	enable_signaling(&ref->excl);
@@ -602,7 +602,7 @@ static int await_active(struct i915_active *ref,
 	}
 
 	if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
-		struct active_node *it, *n;
+		struct i915_active_node *it, *n;
 
 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 			err = __await_active(&it->base, fn, arg);
@@ -662,12 +662,12 @@ void i915_active_fini(struct i915_active *ref)
 }
 #endif
 
-static inline bool is_idle_barrier(struct active_node *node, u64 idx)
+static inline bool is_idle_barrier(struct i915_active_node *node, u64 idx)
 {
 	return node->timeline == idx && !i915_active_fence_isset(&node->base);
 }
 
-static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
+static struct i915_active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 {
 	struct rb_node *prev, *p;
 
@@ -692,8 +692,8 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 	prev = NULL;
 	p = ref->tree.rb_node;
 	while (p) {
-		struct active_node *node =
-			rb_entry(p, struct active_node, node);
+		struct i915_active_node *node =
+			rb_entry(p, struct i915_active_node, node);
 
 		if (is_idle_barrier(node, idx))
 			goto match;
@@ -712,8 +712,8 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 	 * the first pending barrier.
 	 */
 	for (p = prev; p; p = rb_next(p)) {
-		struct active_node *node =
-			rb_entry(p, struct active_node, node);
+		struct i915_active_node *node =
+			rb_entry(p, struct i915_active_node, node);
 		struct intel_engine_cs *engine;
 
 		if (node->timeline > idx)
@@ -749,7 +749,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 		ref->cache = NULL;
 	spin_unlock_irq(&ref->tree_lock);
 
-	return rb_entry(p, struct active_node, node);
+	return rb_entry(p, struct i915_active_node, node);
 }
 
 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
@@ -776,7 +776,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 	for_each_engine_masked(engine, gt, mask, tmp) {
 		u64 idx = engine->kernel_context->timeline->fence_context;
 		struct llist_node *prev = first;
-		struct active_node *node;
+		struct i915_active_node *node;
 
 		node = reuse_idle_barrier(ref, idx);
 		if (!node) {
@@ -823,7 +823,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 
 unwind:
 	while (first) {
-		struct active_node *node = barrier_from_ll(first);
+		struct i915_active_node *node = barrier_from_ll(first);
 
 		first = first->next;
 
@@ -849,7 +849,7 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 	 * request that will eventually release them.
 	 */
 	llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
-		struct active_node *node = barrier_from_ll(pos);
+		struct i915_active_node *node = barrier_from_ll(pos);
 		struct intel_engine_cs *engine = barrier_to_engine(node);
 		struct rb_node **p, *parent;
 
@@ -858,11 +858,11 @@ void i915_active_acquire_barrier(struct i915_active *ref)
 		parent = NULL;
 		p = &ref->tree.rb_node;
 		while (*p) {
-			struct active_node *it;
+			struct i915_active_node *it;
 
 			parent = *p;
 
-			it = rb_entry(parent, struct active_node, node);
+			it = rb_entry(parent, struct i915_active_node, node);
 			if (it->timeline < node->timeline)
 				p = &parent->rb_right;
 			else
@@ -1067,7 +1067,7 @@ static struct i915_global_active global = { {
 
 int __init i915_global_active_init(void)
 {
-	global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+	global.slab_cache = KMEM_CACHE(i915_active_node, SLAB_HWCACHE_ALIGN);
 	if (!global.slab_cache)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h
index 6360c3e4b765..bbb22213eeba 100644
--- a/drivers/gpu/drm/i915/i915_active_types.h
+++ b/drivers/gpu/drm/i915/i915_active_types.h
@@ -22,7 +22,7 @@ struct i915_active_fence {
 	struct dma_fence_cb cb;
 };
 
-struct active_node;
+struct i915_active_node;
 
 #define I915_ACTIVE_MAY_SLEEP BIT(0)
 
@@ -34,7 +34,7 @@ struct i915_active {
 	struct mutex mutex;
 
 	spinlock_t tree_lock;
-	struct active_node *cache;
+	struct i915_active_node *cache;
 	struct rb_root tree;
 
 	/* Preallocated "exclusive" node */
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 4002c984c2e0..3ce048052375 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -260,7 +260,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
 	return i915_subtests(tests, i915);
 }
 
-static struct intel_engine_cs *node_to_barrier(struct active_node *it)
+static struct intel_engine_cs *node_to_barrier(struct i915_active_node *it)
 {
 	struct intel_engine_cs *engine;
 
@@ -283,7 +283,7 @@ void i915_active_print(struct i915_active *ref, struct drm_printer *m)
 		   yesno(!llist_empty(&ref->preallocated_barriers)));
 
 	if (i915_active_acquire_if_busy(ref)) {
-		struct active_node *it, *n;
+		struct i915_active_node *it, *n;
 
 		rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 			struct intel_engine_cs *engine;
@@ -331,7 +331,7 @@ static void active_flush(struct i915_active *ref,
 void i915_active_unlock_wait(struct i915_active *ref)
 {
 	if (i915_active_acquire_if_busy(ref)) {
-		struct active_node *it, *n;
+		struct i915_active_node *it, *n;
 
 		/* Wait for all active callbacks */
 		rcu_read_lock();
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list