[Nouveau] [PATCH 4/5] drm/ttm: add ttm_sg_tt_init

Ben Skeggs bskeggs at redhat.com
Mon Mar 5 20:55:25 UTC 2018


On Mon, Mar 5, 2018 at 10:06 PM, Christian König
<christian.koenig at amd.com> wrote:
> Ping?
On a quick look, it looks like both are used sometimes.  I believe
this had something to do with the addition of Tegra support, but I'll
need some time to look into the details of why/how these things are
used again.

Ben.

>
>
> Am 27.02.2018 um 13:07 schrieb Christian König:
>>
>> Hi guys,
>>
>> at least on amdgpu and radeon the page array allocated by ttm_dma_tt_init
>> is completely unused in the case of DMA-buf sharing. So I'm trying to get
>> rid of that by only allocating the DMA address array.
>>
>> Now the only other user of DMA-buf together with ttm_dma_tt_init is
>> Nouveau. So my question is are you guys using the page array anywhere in
>> your kernel driver in case of a DMA-buf sharing?
>>
>> If no then I could just make this the default behavior for all drivers and
>> save quite a bit of memory for everybody.
>>
>> Thanks,
>> Christian.
>>
>> Am 27.02.2018 um 12:49 schrieb Christian König:
>>>
>>> This allows drivers to only allocate dma addresses, but not a page
>>> array.
>>>
>>> Signed-off-by: Christian König <christian.koenig at amd.com>
>>> ---
>>>   drivers/gpu/drm/ttm/ttm_tt.c | 54
>>> ++++++++++++++++++++++++++++++++++++--------
>>>   include/drm/ttm/ttm_tt.h     |  2 ++
>>>   2 files changed, 47 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
>>> index 8e0b525cda00..971133106ec2 100644
>>> --- a/drivers/gpu/drm/ttm/ttm_tt.c
>>> +++ b/drivers/gpu/drm/ttm/ttm_tt.c
>>> @@ -108,6 +108,16 @@ static int ttm_dma_tt_alloc_page_directory(struct
>>> ttm_dma_tt *ttm)
>>>       return 0;
>>>   }
>>>   +static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
>>> +{
>>> +    ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
>>> +                      sizeof(*ttm->dma_address),
>>> +                      GFP_KERNEL | __GFP_ZERO);
>>> +    if (!ttm->dma_address)
>>> +        return -ENOMEM;
>>> +    return 0;
>>> +}
>>> +
>>>   #ifdef CONFIG_X86
>>>   static inline int ttm_tt_set_page_caching(struct page *p,
>>>                         enum ttm_caching_state c_old,
>>> @@ -227,8 +237,8 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
>>>       ttm->func->destroy(ttm);
>>>   }
>>>   -int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
>>> -        unsigned long size, uint32_t page_flags)
>>> +void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
>>> +            unsigned long size, uint32_t page_flags)
>>>   {
>>>       ttm->bdev = bdev;
>>>       ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
>>> @@ -236,6 +246,12 @@ int ttm_tt_init(struct ttm_tt *ttm, struct
>>> ttm_bo_device *bdev,
>>>       ttm->page_flags = page_flags;
>>>       ttm->state = tt_unpopulated;
>>>       ttm->swap_storage = NULL;
>>> +}
>>> +
>>> +int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
>>> +        unsigned long size, uint32_t page_flags)
>>> +{
>>> +    ttm_tt_init_fields(ttm, bdev, size, page_flags);
>>>         if (ttm_tt_alloc_page_directory(ttm)) {
>>>           ttm_tt_destroy(ttm);
>>> @@ -258,12 +274,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma,
>>> struct ttm_bo_device *bdev,
>>>   {
>>>       struct ttm_tt *ttm = &ttm_dma->ttm;
>>>   -    ttm->bdev = bdev;
>>> -    ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
>>> -    ttm->caching_state = tt_cached;
>>> -    ttm->page_flags = page_flags;
>>> -    ttm->state = tt_unpopulated;
>>> -    ttm->swap_storage = NULL;
>>> +    ttm_tt_init_fields(ttm, bdev, size, page_flags);
>>>         INIT_LIST_HEAD(&ttm_dma->pages_list);
>>>       if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
>>> @@ -275,11 +286,36 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma,
>>> struct ttm_bo_device *bdev,
>>>   }
>>>   EXPORT_SYMBOL(ttm_dma_tt_init);
>>>   +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device
>>> *bdev,
>>> +           unsigned long size, uint32_t page_flags)
>>> +{
>>> +    struct ttm_tt *ttm = &ttm_dma->ttm;
>>> +    int ret;
>>> +
>>> +    ttm_tt_init_fields(ttm, bdev, size, page_flags);
>>> +
>>> +    INIT_LIST_HEAD(&ttm_dma->pages_list);
>>> +    if (page_flags & TTM_PAGE_FLAG_SG)
>>> +        ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
>>> +    else
>>> +        ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
>>> +    if (ret) {
>>> +        ttm_tt_destroy(ttm);
>>> +        pr_err("Failed allocating page table\n");
>>> +        return -ENOMEM;
>>> +    }
>>> +    return 0;
>>> +}
>>> +EXPORT_SYMBOL(ttm_sg_tt_init);
>>> +
>>>   void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
>>>   {
>>>       struct ttm_tt *ttm = &ttm_dma->ttm;
>>>   -    kvfree(ttm->pages);
>>> +    if (ttm->pages)
>>> +        kvfree(ttm->pages);
>>> +    else
>>> +        kvfree(ttm_dma->dma_address);
>>>       ttm->pages = NULL;
>>>       ttm_dma->dma_address = NULL;
>>>   }
>>> diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
>>> index 9c78556b488e..1cf316a4257c 100644
>>> --- a/include/drm/ttm/ttm_tt.h
>>> +++ b/include/drm/ttm/ttm_tt.h
>>> @@ -163,6 +163,8 @@ int ttm_tt_init(struct ttm_tt *ttm, struct
>>> ttm_bo_device *bdev,
>>>           unsigned long size, uint32_t page_flags);
>>>   int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device
>>> *bdev,
>>>               unsigned long size, uint32_t page_flags);
>>> +int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device
>>> *bdev,
>>> +           unsigned long size, uint32_t page_flags);
>>>     /**
>>>    * ttm_tt_fini
>>
>>
>


More information about the Nouveau mailing list