[PATCH 4/4] drm/mm: remove unused API
Daniel Vetter
daniel at ffwll.ch
Thu Jul 25 07:27:51 PDT 2013
On Thu, Jul 25, 2013 at 03:56:02PM +0200, David Herrmann wrote:
> We used to pre-allocate drm_mm nodes and save them in a linked list for
> later usage so we always have spare ones in atomic contexts. However, this
> is really racy if multiple threads are in an atomic context at the same
> time and we don't have enough spare nodes. Moreover, all remaining users
> run in user-context and just lock drm_mm with a spinlock. So we can easily
> preallocate the node, take the spinlock and insert the node.
>
> This may have worked well with BKL in place, however, with today's
> infrastructure it really doesn't make any sense. Besides, most users can
> easily embed drm_mm_node into their objects so no allocation is needed at
> all.
>
> Thus, remove the old pre-alloc API and all the helpers that it provides.
> Drivers have already been converted and we should not use the old API for
> new code, anymore.
>
> Signed-off-by: David Herrmann <dh.herrmann at gmail.com>
Acked-by: Daniel Vetter <daniel.vetter at ffwll.ch>
> ---
> drivers/gpu/drm/drm_mm.c | 160 ++++++-----------------------------------------
> include/drm/drm_mm.h | 95 ----------------------------
> 2 files changed, 20 insertions(+), 235 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index 5aad736..0946251 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -49,58 +49,18 @@
>
> #define MM_UNUSED_TARGET 4
>
> -static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
> -{
> - struct drm_mm_node *child;
> -
> - if (atomic)
> - child = kzalloc(sizeof(*child), GFP_ATOMIC);
> - else
> - child = kzalloc(sizeof(*child), GFP_KERNEL);
> -
> - if (unlikely(child == NULL)) {
> - spin_lock(&mm->unused_lock);
> - if (list_empty(&mm->unused_nodes))
> - child = NULL;
> - else {
> - child =
> - list_entry(mm->unused_nodes.next,
> - struct drm_mm_node, node_list);
> - list_del(&child->node_list);
> - --mm->num_unused;
> - }
> - spin_unlock(&mm->unused_lock);
> - }
> - return child;
> -}
> -
> -/* drm_mm_pre_get() - pre allocate drm_mm_node structure
> - * drm_mm: memory manager struct we are pre-allocating for
> - *
> - * Returns 0 on success or -ENOMEM if allocation fails.
> - */
> -int drm_mm_pre_get(struct drm_mm *mm)
> -{
> - struct drm_mm_node *node;
> -
> - spin_lock(&mm->unused_lock);
> - while (mm->num_unused < MM_UNUSED_TARGET) {
> - spin_unlock(&mm->unused_lock);
> - node = kzalloc(sizeof(*node), GFP_KERNEL);
> - spin_lock(&mm->unused_lock);
> -
> - if (unlikely(node == NULL)) {
> - int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
> - spin_unlock(&mm->unused_lock);
> - return ret;
> - }
> - ++mm->num_unused;
> - list_add_tail(&node->node_list, &mm->unused_nodes);
> - }
> - spin_unlock(&mm->unused_lock);
> - return 0;
> -}
> -EXPORT_SYMBOL(drm_mm_pre_get);
> +static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
> + unsigned long size,
> + unsigned alignment,
> + unsigned long color,
> + bool best_match);
> +static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
> + unsigned long size,
> + unsigned alignment,
> + unsigned long color,
> + unsigned long start,
> + unsigned long end,
> + bool best_match);
>
> static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> struct drm_mm_node *node,
> @@ -187,24 +147,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
> }
> EXPORT_SYMBOL(drm_mm_reserve_node);
>
> -struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - int atomic)
> -{
> - struct drm_mm_node *node;
> -
> - node = drm_mm_kmalloc(hole_node->mm, atomic);
> - if (unlikely(node == NULL))
> - return NULL;
> -
> - drm_mm_insert_helper(hole_node, node, size, alignment, color);
> -
> - return node;
> -}
> -EXPORT_SYMBOL(drm_mm_get_block_generic);
> -
> /**
> * Search for free space and insert a preallocated memory node. Returns
> * -ENOSPC if no suitable free area is available. The preallocated memory node
> @@ -278,27 +220,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> }
> }
>
> -struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - unsigned long start,
> - unsigned long end,
> - int atomic)
> -{
> - struct drm_mm_node *node;
> -
> - node = drm_mm_kmalloc(hole_node->mm, atomic);
> - if (unlikely(node == NULL))
> - return NULL;
> -
> - drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
> - start, end);
> -
> - return node;
> -}
> -EXPORT_SYMBOL(drm_mm_get_block_range_generic);
> -
> /**
> * Search for free space and insert a preallocated memory node. Returns
> * -ENOSPC if no suitable free area is available. This is for range
> @@ -357,28 +278,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
> }
> EXPORT_SYMBOL(drm_mm_remove_node);
>
> -/*
> - * Remove a memory node from the allocator and free the allocated struct
> - * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
> - * drm_mm_get_block functions.
> - */
> -void drm_mm_put_block(struct drm_mm_node *node)
> -{
> -
> - struct drm_mm *mm = node->mm;
> -
> - drm_mm_remove_node(node);
> -
> - spin_lock(&mm->unused_lock);
> - if (mm->num_unused < MM_UNUSED_TARGET) {
> - list_add(&node->node_list, &mm->unused_nodes);
> - ++mm->num_unused;
> - } else
> - kfree(node);
> - spin_unlock(&mm->unused_lock);
> -}
> -EXPORT_SYMBOL(drm_mm_put_block);
> -
> static int check_free_hole(unsigned long start, unsigned long end,
> unsigned long size, unsigned alignment)
> {
> @@ -394,11 +293,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
> return end >= start + size;
> }
>
> -struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - bool best_match)
> +static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
> + unsigned long size,
> + unsigned alignment,
> + unsigned long color,
> + bool best_match)
> {
> struct drm_mm_node *entry;
> struct drm_mm_node *best;
> @@ -432,9 +331,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
>
> return best;
> }
> -EXPORT_SYMBOL(drm_mm_search_free_generic);
>
> -struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
> +static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
> unsigned long size,
> unsigned alignment,
> unsigned long color,
> @@ -479,7 +377,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
>
> return best;
> }
> -EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
>
> /**
> * Moves an allocation. To be used with embedded struct drm_mm_node.
> @@ -652,10 +549,7 @@ EXPORT_SYMBOL(drm_mm_clean);
> void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
> {
> INIT_LIST_HEAD(&mm->hole_stack);
> - INIT_LIST_HEAD(&mm->unused_nodes);
> - mm->num_unused = 0;
> mm->scanned_blocks = 0;
> - spin_lock_init(&mm->unused_lock);
>
> /* Clever trick to avoid a special case in the free hole tracking. */
> INIT_LIST_HEAD(&mm->head_node.node_list);
> @@ -675,22 +569,8 @@ EXPORT_SYMBOL(drm_mm_init);
>
> void drm_mm_takedown(struct drm_mm * mm)
> {
> - struct drm_mm_node *entry, *next;
> -
> - if (WARN(!list_empty(&mm->head_node.node_list),
> - "Memory manager not clean. Delaying takedown\n")) {
> - return;
> - }
> -
> - spin_lock(&mm->unused_lock);
> - list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
> - list_del(&entry->node_list);
> - kfree(entry);
> - --mm->num_unused;
> - }
> - spin_unlock(&mm->unused_lock);
> -
> - BUG_ON(mm->num_unused != 0);
> + WARN(!list_empty(&mm->head_node.node_list),
> + "Memory manager not clean during takedown.\n");
> }
> EXPORT_SYMBOL(drm_mm_takedown);
>
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index a30c9aa..4890c51 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -62,9 +62,6 @@ struct drm_mm {
> /* head_node.node_list is the list of all memory nodes, ordered
> * according to the (increasing) start address of the memory node. */
> struct drm_mm_node head_node;
> - struct list_head unused_nodes;
> - int num_unused;
> - spinlock_t unused_lock;
> unsigned int scan_check_range : 1;
> unsigned scan_alignment;
> unsigned long scan_color;
> @@ -115,13 +112,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
> &(mm)->head_node.node_list, \
> node_list)
> -#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
> - for (entry = (mm)->prev_scanned_node, \
> - next = entry ? list_entry(entry->node_list.next, \
> - struct drm_mm_node, node_list) : NULL; \
> - entry != NULL; entry = next, \
> - next = entry ? list_entry(entry->node_list.next, \
> - struct drm_mm_node, node_list) : NULL) \
>
> /* Note that we need to unroll list_for_each_entry in order to inline
> * setting hole_start and hole_end on each iteration and keep the
> @@ -139,52 +129,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> * Basic range manager support (drm_mm.c)
> */
> extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
> -extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - int atomic);
> -extern struct drm_mm_node *drm_mm_get_block_range_generic(
> - struct drm_mm_node *node,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - unsigned long start,
> - unsigned long end,
> - int atomic);
> -
> -static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
> - unsigned long size,
> - unsigned alignment)
> -{
> - return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
> -}
> -static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
> - unsigned long size,
> - unsigned alignment)
> -{
> - return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
> -}
> -static inline struct drm_mm_node *drm_mm_get_block_range(
> - struct drm_mm_node *parent,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long start,
> - unsigned long end)
> -{
> - return drm_mm_get_block_range_generic(parent, size, alignment, 0,
> - start, end, 0);
> -}
> -static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
> - struct drm_mm_node *parent,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long start,
> - unsigned long end)
> -{
> - return drm_mm_get_block_range_generic(parent, size, alignment, 0,
> - start, end, 1);
> -}
>
> extern int drm_mm_insert_node_generic(struct drm_mm *mm,
> struct drm_mm_node *node,
> @@ -222,52 +166,13 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
> 0, start, end, best_match);
> }
>
> -extern void drm_mm_put_block(struct drm_mm_node *cur);
> extern void drm_mm_remove_node(struct drm_mm_node *node);
> extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
> -extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - bool best_match);
> -extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
> - const struct drm_mm *mm,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long color,
> - unsigned long start,
> - unsigned long end,
> - bool best_match);
> -static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
> - unsigned long size,
> - unsigned alignment,
> - bool best_match)
> -{
> - return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
> -}
> -static inline struct drm_mm_node *drm_mm_search_free_in_range(
> - const struct drm_mm *mm,
> - unsigned long size,
> - unsigned alignment,
> - unsigned long start,
> - unsigned long end,
> - bool best_match)
> -{
> - return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
> - start, end, best_match);
> -}
> -
> extern void drm_mm_init(struct drm_mm *mm,
> unsigned long start,
> unsigned long size);
> extern void drm_mm_takedown(struct drm_mm *mm);
> extern int drm_mm_clean(struct drm_mm *mm);
> -extern int drm_mm_pre_get(struct drm_mm *mm);
> -
> -static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
> -{
> - return block->mm;
> -}
>
> void drm_mm_init_scan(struct drm_mm *mm,
> unsigned long size,
> --
> 1.8.3.3
>
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
More information about the dri-devel
mailing list