[PATCH 1/5] drm/ttm: use an operation ctx for ttm_mem_global_alloc_page

Christian König ckoenig.leichtzumerken at gmail.com
Thu Dec 21 10:02:31 UTC 2017


Am 21.12.2017 um 10:42 schrieb Roger He:
> forward the operation context to ttm_mem_global_alloc_page as well,
> and the ultimate goal is swapout enablement for per vm BOs.

As Thomas commented as well, better write "reserved BOs" in the commit 
message.

The term "per vm BOs" is something completely amdgpu specific.

With that fixed the patch is Reviewed-by: Christian König 
<christian.koenig at amd.com>.

Regards,
Christian.

>
> Per vm BOs refer to all BOs which share same reservation object.
>
> Change-Id: I4104a12e09a374b6477a0dd5a8fce26dce27a746
> Signed-off-by: Roger He <Hongbo.He at amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_memory.c         | 10 +++-------
>   drivers/gpu/drm/ttm/ttm_page_alloc.c     |  6 +++++-
>   drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  8 ++++++--
>   include/drm/ttm/ttm_memory.h             |  3 ++-
>   4 files changed, 16 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
> index 525d3b6..102b326 100644
> --- a/drivers/gpu/drm/ttm/ttm_memory.c
> +++ b/drivers/gpu/drm/ttm/ttm_memory.c
> @@ -539,14 +539,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
>   EXPORT_SYMBOL(ttm_mem_global_alloc);
>   
>   int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
> -			      struct page *page, uint64_t size)
> +			      struct page *page, uint64_t size,
> +			      struct ttm_operation_ctx *ctx)
>   {
> -
>   	struct ttm_mem_zone *zone = NULL;
> -	struct ttm_operation_ctx ctx = {
> -		.interruptible = false,
> -		.no_wait_gpu = false
> -	};
>   
>   	/**
>   	 * Page allocations may be registed in a single zone
> @@ -560,7 +556,7 @@ int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
>   	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
>   		zone = glob->zone_kernel;
>   #endif
> -	return ttm_mem_global_alloc_zone(glob, zone, size, &ctx);
> +	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
>   }
>   
>   void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index b5ba644..8f93ff3 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -1061,6 +1061,10 @@ void ttm_page_alloc_fini(void)
>   int ttm_pool_populate(struct ttm_tt *ttm)
>   {
>   	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
> +	struct ttm_operation_ctx ctx = {
> +		.interruptible = false,
> +		.no_wait_gpu = false
> +	};
>   	unsigned i;
>   	int ret;
>   
> @@ -1076,7 +1080,7 @@ int ttm_pool_populate(struct ttm_tt *ttm)
>   
>   	for (i = 0; i < ttm->num_pages; ++i) {
>   		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> -						PAGE_SIZE);
> +						PAGE_SIZE, &ctx);
>   		if (unlikely(ret != 0)) {
>   			ttm_pool_unpopulate(ttm);
>   			return -ENOMEM;
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index bda00b2..8aac86a 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> @@ -927,6 +927,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
>   {
>   	struct ttm_tt *ttm = &ttm_dma->ttm;
>   	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
> +	struct ttm_operation_ctx ctx = {
> +		.interruptible = false,
> +		.no_wait_gpu = false
> +	};
>   	unsigned long num_pages = ttm->num_pages;
>   	struct dma_pool *pool;
>   	enum pool_type type;
> @@ -962,7 +966,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
>   			break;
>   
>   		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> -						pool->size);
> +						pool->size, &ctx);
>   		if (unlikely(ret != 0)) {
>   			ttm_dma_unpopulate(ttm_dma, dev);
>   			return -ENOMEM;
> @@ -998,7 +1002,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
>   		}
>   
>   		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
> -						pool->size);
> +						pool->size, &ctx);
>   		if (unlikely(ret != 0)) {
>   			ttm_dma_unpopulate(ttm_dma, dev);
>   			return -ENOMEM;
> diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
> index 755c107..8936285 100644
> --- a/include/drm/ttm/ttm_memory.h
> +++ b/include/drm/ttm/ttm_memory.h
> @@ -84,7 +84,8 @@ extern int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
>   extern void ttm_mem_global_free(struct ttm_mem_global *glob,
>   				uint64_t amount);
>   extern int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
> -				     struct page *page, uint64_t size);
> +				     struct page *page, uint64_t size,
> +				     struct ttm_operation_ctx *ctx);
>   extern void ttm_mem_global_free_page(struct ttm_mem_global *glob,
>   				     struct page *page, uint64_t size);
>   extern size_t ttm_round_pot(size_t size);



More information about the dri-devel mailing list