[RFC PATCH 6/7] drm/ttm: Introduce a huge page aligning TTM range manager.

Christian König christian.koenig at amd.com
Wed Nov 27 10:05:38 UTC 2019


I don't see the advantage over just increasing the alignment 
requirements in the driver side?

That would be a one liner if I'm not completely mistaken.

Regards,
Christian.

Am 27.11.19 um 09:31 schrieb Thomas Hellström (VMware):
> From: Thomas Hellstrom <thellstrom at vmware.com>
>
> Using huge page-table entries require that the start of a buffer object
> is huge page size aligned. So introduce a ttm_bo_man_get_node_huge()
> function that attempts to accomplish this for allocations that are larger
> than the huge page size, and provide a new range-manager instance that
> uses that function.
>
> Cc: Andrew Morton <akpm at linux-foundation.org>
> Cc: Michal Hocko <mhocko at suse.com>
> Cc: "Matthew Wilcox (Oracle)" <willy at infradead.org>
> Cc: "Kirill A. Shutemov" <kirill.shutemov at linux.intel.com>
> Cc: Ralph Campbell <rcampbell at nvidia.com>
> Cc: "Jérôme Glisse" <jglisse at redhat.com>
> Cc: "Christian König" <christian.koenig at amd.com>
> Signed-off-by: Thomas Hellstrom <thellstrom at vmware.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo_manager.c | 92 ++++++++++++++++++++++++++++
>   include/drm/ttm/ttm_bo_driver.h      |  1 +
>   2 files changed, 93 insertions(+)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> index 18d3debcc949..26aa1a2ae7f1 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
> @@ -89,6 +89,89 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
>   	return 0;
>   }
>   
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +static int ttm_bo_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
> +				 unsigned long align_pages,
> +				 const struct ttm_place *place,
> +				 struct ttm_mem_reg *mem,
> +				 unsigned long lpfn,
> +				 enum drm_mm_insert_mode mode)
> +{
> +	if (align_pages >= mem->page_alignment &&
> +	    (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
> +		return drm_mm_insert_node_in_range(mm, node,
> +						   mem->num_pages,
> +						   align_pages, 0,
> +						   place->fpfn, lpfn, mode);
> +	}
> +
> +	return -ENOSPC;
> +}
> +
> +static int ttm_bo_man_get_node_huge(struct ttm_mem_type_manager *man,
> +				    struct ttm_buffer_object *bo,
> +				    const struct ttm_place *place,
> +				    struct ttm_mem_reg *mem)
> +{
> +	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
> +	struct drm_mm *mm = &rman->mm;
> +	struct drm_mm_node *node;
> +	unsigned long align_pages;
> +	unsigned long lpfn;
> +	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
> +	int ret;
> +
> +	node = kzalloc(sizeof(*node), GFP_KERNEL);
> +	if (!node)
> +		return -ENOMEM;
> +
> +	lpfn = place->lpfn;
> +	if (!lpfn)
> +		lpfn = man->size;
> +
> +	mode = DRM_MM_INSERT_BEST;
> +	if (place->flags & TTM_PL_FLAG_TOPDOWN)
> +		mode = DRM_MM_INSERT_HIGH;
> +
> +	spin_lock(&rman->lock);
> +	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
> +		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
> +		if (mem->num_pages >= align_pages) {
> +			ret = ttm_bo_insert_aligned(mm, node, align_pages,
> +						    place, mem, lpfn, mode);
> +			if (!ret)
> +				goto found_unlock;
> +		}
> +	}
> +
> +	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
> +	if (mem->num_pages >= align_pages) {
> +		ret = ttm_bo_insert_aligned(mm, node, align_pages, place, mem,
> +					    lpfn, mode);
> +		if (!ret)
> +			goto found_unlock;
> +	}
> +
> +	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
> +					  mem->page_alignment, 0,
> +					  place->fpfn, lpfn, mode);
> +found_unlock:
> +	spin_unlock(&rman->lock);
> +
> +	if (unlikely(ret)) {
> +		kfree(node);
> +	} else {
> +		mem->mm_node = node;
> +		mem->start = node->start;
> +	}
> +
> +	return 0;
> +}
> +#else
> +#define ttm_bo_man_get_node_huge ttm_bo_man_get_node
> +#endif
> +
> +
>   static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
>   				struct ttm_mem_reg *mem)
>   {
> @@ -154,3 +237,12 @@ const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
>   	.debug = ttm_bo_man_debug
>   };
>   EXPORT_SYMBOL(ttm_bo_manager_func);
> +
> +const struct ttm_mem_type_manager_func ttm_bo_manager_huge_func = {
> +	.init = ttm_bo_man_init,
> +	.takedown = ttm_bo_man_takedown,
> +	.get_node = ttm_bo_man_get_node_huge,
> +	.put_node = ttm_bo_man_put_node,
> +	.debug = ttm_bo_man_debug
> +};
> +EXPORT_SYMBOL(ttm_bo_manager_huge_func);
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index cac7a8a0825a..868bd0d4be6a 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -888,5 +888,6 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
>   pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
>   
>   extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
> +extern const struct ttm_mem_type_manager_func ttm_bo_manager_huge_func;
>   
>   #endif



More information about the dri-devel mailing list