[PATCH 03/33] drm: Add drm_mm_for_each_node_safe()

Chris Wilson chris at chris-wilson.co.uk
Mon Dec 12 11:09:37 UTC 2016


A complement to drm_mm_for_each_node(), wraps list_for_each_entry_safe()
for walking the list of nodes safe against removal.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/drm_mm.c |  9 ++++-----
 include/drm/drm_mm.h     | 19 ++++++++++++++++---
 2 files changed, 20 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ca1e344f318d..6e0735539545 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -138,7 +138,7 @@ static void show_leaks(struct drm_mm *mm)
 	if (!buf)
 		return;
 
-	list_for_each_entry(node, &mm->head_node.node_list, node_list) {
+	list_for_each_entry(node, __drm_mm_nodes(mm), node_list) {
 		struct stack_trace trace = {
 			.entries = entries,
 			.max_entries = STACKDEPTH
@@ -320,8 +320,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
 		if (hole->start < end)
 			return -ENOSPC;
 	} else {
-		hole = list_entry(&mm->head_node.node_list,
-				  typeof(*hole), node_list);
+		hole = list_entry(__drm_mm_nodes(mm), typeof(*hole), node_list);
 	}
 
 	hole = list_last_entry(&hole->node_list, typeof(*hole), node_list);
@@ -884,7 +883,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
  */
 bool drm_mm_clean(struct drm_mm * mm)
 {
-	struct list_head *head = &mm->head_node.node_list;
+	struct list_head *head = __drm_mm_nodes(mm);
 
 	return (head->next->next == head);
 }
@@ -930,7 +929,7 @@ EXPORT_SYMBOL(drm_mm_init);
  */
 void drm_mm_takedown(struct drm_mm *mm)
 {
-	if (WARN(!list_empty(&mm->head_node.node_list),
+	if (WARN(!list_empty(__drm_mm_nodes(mm)),
 		 "Memory manager not clean during takedown.\n"))
 		show_leaks(mm);
 
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 0b8371795aeb..8faa28ad97b3 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -179,6 +179,8 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 	return __drm_mm_hole_node_end(hole_node);
 }
 
+#define __drm_mm_nodes(mm) (&(mm)->head_node.node_list)
+
 /**
  * drm_mm_for_each_node - iterator to walk over all allocated nodes
  * @entry: drm_mm_node structure to assign to in each iteration step
@@ -187,9 +189,20 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node)
  * This iterator walks over all nodes in the range allocator. It is implemented
  * with list_for_each, so not save against removal of elements.
  */
-#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
-						&(mm)->head_node.node_list, \
-						node_list)
+#define drm_mm_for_each_node(entry, mm) \
+	list_for_each_entry(entry, __drm_mm_nodes(mm), node_list)
+
+/**
+ * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes
+ * @entry: drm_mm_node structure to assign to in each iteration step
+ * @next: drm_mm_node structure to store the next step
+ * @mm: drm_mm allocator to walk
+ *
+ * This iterator walks over all nodes in the range allocator. It is implemented
+ * with list_for_each_safe, so save against removal of elements.
+ */
+#define drm_mm_for_each_node_safe(entry, n, mm) \
+	list_for_each_entry_safe(entry, n, __drm_mm_nodes(mm), node_list)
 
 #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
 	for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
-- 
2.11.0



More information about the Intel-gfx-trybot mailing list