[Nouveau] [PATCH 4/5] drm/ttm: add ttm_sg_tt_init

Daniel Vetter daniel at ffwll.ch
Tue Mar 6 09:19:07 UTC 2018


On Tue, Feb 27, 2018 at 01:07:06PM +0100, Christian König wrote:
> Hi guys,
> 
> at least on amdgpu and radeon the page array allocated by ttm_dma_tt_init is
> completely unused in the case of DMA-buf sharing. So I'm trying to get rid
> of that by only allocating the DMA address array.
> 
> Now the only other user of DMA-buf together with ttm_dma_tt_init is Nouveau.
> So my question is are you guys using the page array anywhere in your kernel
> driver in case of a DMA-buf sharing?
> 
> If no then I could just make this the default behavior for all drivers and
> save quite a bit of memory for everybody.

+1 on teaching ttm to no longer look at the struct page * in the dma-buf
sgt, but only the dma_buf address.

If there's still some need for in-kernel cpu or userspace mmap access then
imo ttm needs to be fixed to delegate all that to the right dma-buf
interfaces. The ttm abstraction is already there, it's just not passed
through.

I don't pretend to now enough of the details to review this stuff :-)
-Daniel

> 
> Thanks,
> Christian.
> 
> Am 27.02.2018 um 12:49 schrieb Christian König:
> > This allows drivers to only allocate dma addresses, but not a page
> > array.
> > 
> > Signed-off-by: Christian König <christian.koenig at amd.com>
> > ---
> >   drivers/gpu/drm/ttm/ttm_tt.c | 54 ++++++++++++++++++++++++++++++++++++--------
> >   include/drm/ttm/ttm_tt.h     |  2 ++
> >   2 files changed, 47 insertions(+), 9 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
> > index 8e0b525cda00..971133106ec2 100644
> > --- a/drivers/gpu/drm/ttm/ttm_tt.c
> > +++ b/drivers/gpu/drm/ttm/ttm_tt.c
> > @@ -108,6 +108,16 @@ static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
> >   	return 0;
> >   }
> > +static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
> > +{
> > +	ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
> > +					  sizeof(*ttm->dma_address),
> > +					  GFP_KERNEL | __GFP_ZERO);
> > +	if (!ttm->dma_address)
> > +		return -ENOMEM;
> > +	return 0;
> > +}
> > +
> >   #ifdef CONFIG_X86
> >   static inline int ttm_tt_set_page_caching(struct page *p,
> >   					  enum ttm_caching_state c_old,
> > @@ -227,8 +237,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
> >   	ttm->func->destroy(ttm);
> >   }
> > -int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> > -		unsigned long size, uint32_t page_flags)
> > +void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> > +			unsigned long size, uint32_t page_flags)
> >   {
> >   	ttm->bdev = bdev;
> >   	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> > @@ -236,6 +246,12 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> >   	ttm->page_flags = page_flags;
> >   	ttm->state = tt_unpopulated;
> >   	ttm->swap_storage = NULL;
> > +}
> > +
> > +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> > +		unsigned long size, uint32_t page_flags)
> > +{
> > +	ttm_tt_init_fields(ttm, bdev, size, page_flags);
> >   	if (ttm_tt_alloc_page_directory(ttm)) {
> >   		ttm_tt_destroy(ttm);
> > @@ -258,12 +274,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> >   {
> >   	struct ttm_tt *ttm = &ttm_dma->ttm;
> > -	ttm->bdev = bdev;
> > -	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
> > -	ttm->caching_state = tt_cached;
> > -	ttm->page_flags = page_flags;
> > -	ttm->state = tt_unpopulated;
> > -	ttm->swap_storage = NULL;
> > +	ttm_tt_init_fields(ttm, bdev, size, page_flags);
> >   	INIT_LIST_HEAD(&ttm_dma->pages_list);
> >   	if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
> > @@ -275,11 +286,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> >   }
> >   EXPORT_SYMBOL(ttm_dma_tt_init);
> > +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> > +		   unsigned long size, uint32_t page_flags)
> > +{
> > +	struct ttm_tt *ttm = &ttm_dma->ttm;
> > +	int ret;
> > +
> > +	ttm_tt_init_fields(ttm, bdev, size, page_flags);
> > +
> > +	INIT_LIST_HEAD(&ttm_dma->pages_list);
> > +	if (page_flags & TTM_PAGE_FLAG_SG)
> > +		ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
> > +	else
> > +		ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
> > +	if (ret) {
> > +		ttm_tt_destroy(ttm);
> > +		pr_err("Failed allocating page table\n");
> > +		return -ENOMEM;
> > +	}
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL(ttm_sg_tt_init);
> > +
> >   void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
> >   {
> >   	struct ttm_tt *ttm = &ttm_dma->ttm;
> > -	kvfree(ttm->pages);
> > +	if (ttm->pages)
> > +		kvfree(ttm->pages);
> > +	else
> > +		kvfree(ttm_dma->dma_address);
> >   	ttm->pages = NULL;
> >   	ttm_dma->dma_address = NULL;
> >   }
> > diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
> > index 9c78556b488e..1cf316a4257c 100644
> > --- a/include/drm/ttm/ttm_tt.h
> > +++ b/include/drm/ttm/ttm_tt.h
> > @@ -163,6 +163,8 @@ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
> >   		unsigned long size, uint32_t page_flags);
> >   int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> >   		    unsigned long size, uint32_t page_flags);
> > +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
> > +		   unsigned long size, uint32_t page_flags);
> >   /**
> >    * ttm_tt_fini
> 
> _______________________________________________
> Nouveau mailing list
> Nouveau at lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch


More information about the amd-gfx mailing list