[PATCH 1/4] drm: Optionally create mm blocks from top-to-bottom
Daniel Vetter
daniel at ffwll.ch
Wed Apr 2 01:25:27 PDT 2014
On Mon, Mar 31, 2014 at 03:27:54PM +0300, Lauri Kasanen wrote:
> Clients like i915 need to segregate cache domains within the GTT which
> can lead to small amounts of fragmentation. By allocating the uncached
> buffers from the bottom and the cacheable buffers from the top, we can
> reduce the amount of wasted space and also optimize allocation of the
> mappable portion of the GTT to only those buffers that require CPU
> access through the GTT.
>
> For other drivers, allocating small bos from one end and large ones
> from the other helps improve the quality of fragmentation.
>
> Original patch by Chris Wilson.
>
> v2 by Ben:
> Update callers in i915_gem_object_bind_to_gtt()
> Turn search flags and allocation flags into separate enums
> Make checkpatch happy where logical/easy
>
> v3 by Ben:
> Rebased on top of the many drm_mm changes since the original patches
> Remove ATOMIC from allocator flags (Chris)
> Reverse order of TOPDOWN and BOTTOMUP
>
> v4 by Lauri:
> Remove i915 parts, they don't apply
> Respin on top of drm-next
>
> Signed-off-by: Lauri Kasanen <cand at gmx.com>
Please also update the kerneldoc we now have in drm-next, thanks.
-Daniel
> ---
> drivers/gpu/drm/drm_mm.c | 56 +++++++++++++++++++++++++++++++++++-------------
> include/drm/drm_mm.h | 29 +++++++++++++++++++++----
> 2 files changed, 66 insertions(+), 19 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
> index a2d45b74..1728bcc 100644
> --- a/drivers/gpu/drm/drm_mm.c
> +++ b/drivers/gpu/drm/drm_mm.c
> @@ -102,7 +102,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
> static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> struct drm_mm_node *node,
> unsigned long size, unsigned alignment,
> - unsigned long color)
> + unsigned long color,
> + enum drm_mm_allocator_flags flags)
> {
> struct drm_mm *mm = hole_node->mm;
> unsigned long hole_start = drm_mm_hole_node_start(hole_node);
> @@ -115,12 +116,22 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
> if (mm->color_adjust)
> mm->color_adjust(hole_node, color, &adj_start, &adj_end);
>
> + if (flags & DRM_MM_CREATE_TOP)
> + adj_start = adj_end - size;
> +
> if (alignment) {
> unsigned tmp = adj_start % alignment;
> - if (tmp)
> - adj_start += alignment - tmp;
> + if (tmp) {
> + if (flags & DRM_MM_CREATE_TOP)
> + adj_start -= tmp;
> + else
> + adj_start += alignment - tmp;
> + }
> }
>
> + BUG_ON(adj_start < hole_start);
> + BUG_ON(adj_end > hole_end);
> +
> if (adj_start == hole_start) {
> hole_node->hole_follows = 0;
> list_del(&hole_node->hole_stack);
> @@ -215,16 +226,17 @@ EXPORT_SYMBOL(drm_mm_reserve_node);
> int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
> unsigned long size, unsigned alignment,
> unsigned long color,
> - enum drm_mm_search_flags flags)
> + enum drm_mm_search_flags sflags,
> + enum drm_mm_allocator_flags aflags)
> {
> struct drm_mm_node *hole_node;
>
> hole_node = drm_mm_search_free_generic(mm, size, alignment,
> - color, flags);
> + color, sflags);
> if (!hole_node)
> return -ENOSPC;
>
> - drm_mm_insert_helper(hole_node, node, size, alignment, color);
> + drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags);
> return 0;
> }
> EXPORT_SYMBOL(drm_mm_insert_node_generic);
> @@ -233,7 +245,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> struct drm_mm_node *node,
> unsigned long size, unsigned alignment,
> unsigned long color,
> - unsigned long start, unsigned long end)
> + unsigned long start, unsigned long end,
> + enum drm_mm_allocator_flags flags)
> {
> struct drm_mm *mm = hole_node->mm;
> unsigned long hole_start = drm_mm_hole_node_start(hole_node);
> @@ -248,13 +261,20 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> if (adj_end > end)
> adj_end = end;
>
> + if (flags & DRM_MM_CREATE_TOP)
> + adj_start = adj_end - size;
> +
> if (mm->color_adjust)
> mm->color_adjust(hole_node, color, &adj_start, &adj_end);
>
> if (alignment) {
> unsigned tmp = adj_start % alignment;
> - if (tmp)
> - adj_start += alignment - tmp;
> + if (tmp) {
> + if (flags & DRM_MM_CREATE_TOP)
> + adj_start -= tmp;
> + else
> + adj_start += alignment - tmp;
> + }
> }
>
> if (adj_start == hole_start) {
> @@ -271,6 +291,8 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> INIT_LIST_HEAD(&node->hole_stack);
> list_add(&node->node_list, &hole_node->node_list);
>
> + BUG_ON(node->start < start);
> + BUG_ON(node->start < adj_start);
> BUG_ON(node->start + node->size > adj_end);
> BUG_ON(node->start + node->size > end);
>
> @@ -298,21 +320,23 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
> * 0 on success, -ENOSPC if there's no suitable hole.
> */
> int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
> - unsigned long size, unsigned alignment, unsigned long color,
> + unsigned long size, unsigned alignment,
> + unsigned long color,
> unsigned long start, unsigned long end,
> - enum drm_mm_search_flags flags)
> + enum drm_mm_search_flags sflags,
> + enum drm_mm_allocator_flags aflags)
> {
> struct drm_mm_node *hole_node;
>
> hole_node = drm_mm_search_free_in_range_generic(mm,
> size, alignment, color,
> - start, end, flags);
> + start, end, sflags);
> if (!hole_node)
> return -ENOSPC;
>
> drm_mm_insert_helper_range(hole_node, node,
> size, alignment, color,
> - start, end);
> + start, end, aflags);
> return 0;
> }
> EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
> @@ -391,7 +415,8 @@ static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
> best = NULL;
> best_size = ~0UL;
>
> - drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
> + __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
> + flags & DRM_MM_SEARCH_BELOW) {
> if (mm->color_adjust) {
> mm->color_adjust(entry, color, &adj_start, &adj_end);
> if (adj_end <= adj_start)
> @@ -432,7 +457,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
> best = NULL;
> best_size = ~0UL;
>
> - drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
> + __drm_mm_for_each_hole(entry, mm, adj_start, adj_end,
> + flags & DRM_MM_SEARCH_BELOW) {
> if (adj_start < start)
> adj_start = start;
> if (adj_end > end)
> diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
> index 8b6981a..f2bc79b 100644
> --- a/include/drm/drm_mm.h
> +++ b/include/drm/drm_mm.h
> @@ -47,8 +47,17 @@
> enum drm_mm_search_flags {
> DRM_MM_SEARCH_DEFAULT = 0,
> DRM_MM_SEARCH_BEST = 1 << 0,
> + DRM_MM_SEARCH_BELOW = 1 << 1,
> };
>
> +enum drm_mm_allocator_flags {
> + DRM_MM_CREATE_DEFAULT = 0,
> + DRM_MM_CREATE_TOP = 1 << 0,
> +};
> +
> +#define DRM_MM_BOTTOMUP DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT
> +#define DRM_MM_TOPDOWN DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP
> +
> struct drm_mm_node {
> struct list_head node_list;
> struct list_head hole_stack;
> @@ -195,6 +204,14 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
> 1 : 0; \
> entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
>
> +#define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \
> + for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
> + &entry->hole_stack != &(mm)->hole_stack ? \
> + hole_start = drm_mm_hole_node_start(entry), \
> + hole_end = drm_mm_hole_node_end(entry), \
> + 1 : 0; \
> + entry = list_entry((backwards) ? entry->hole_stack.prev : entry->hole_stack.next, struct drm_mm_node, hole_stack))
> +
> /*
> * Basic range manager support (drm_mm.c)
> */
> @@ -205,7 +222,8 @@ int drm_mm_insert_node_generic(struct drm_mm *mm,
> unsigned long size,
> unsigned alignment,
> unsigned long color,
> - enum drm_mm_search_flags flags);
> + enum drm_mm_search_flags sflags,
> + enum drm_mm_allocator_flags aflags);
> /**
> * drm_mm_insert_node - search for space and insert @node
> * @mm: drm_mm to allocate from
> @@ -228,7 +246,8 @@ static inline int drm_mm_insert_node(struct drm_mm *mm,
> unsigned alignment,
> enum drm_mm_search_flags flags)
> {
> - return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
> + return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags,
> + DRM_MM_CREATE_DEFAULT);
> }
>
> int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
> @@ -238,7 +257,8 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
> unsigned long color,
> unsigned long start,
> unsigned long end,
> - enum drm_mm_search_flags flags);
> + enum drm_mm_search_flags sflags,
> + enum drm_mm_allocator_flags aflags);
> /**
> * drm_mm_insert_node_in_range - ranged search for space and insert @node
> * @mm: drm_mm to allocate from
> @@ -266,7 +286,8 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
> enum drm_mm_search_flags flags)
> {
> return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
> - 0, start, end, flags);
> + 0, start, end, flags,
> + DRM_MM_CREATE_DEFAULT);
> }
>
> void drm_mm_remove_node(struct drm_mm_node *node);
> --
> 1.8.3.1
>
> _______________________________________________
> dri-devel mailing list
> dri-devel at lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
More information about the dri-devel
mailing list