[PATCH v3 01/12] drm: Add dummy page per device or GEM object

Andrey Grodzovsky Andrey.Grodzovsky at amd.com
Mon Nov 23 04:54:11 UTC 2020


On 11/21/20 9:15 AM, Christian König wrote:
> Am 21.11.20 um 06:21 schrieb Andrey Grodzovsky:
>> Will be used to reroute CPU mapped BO's page faults once
>> device is removed.
>
> Uff, one page for each exported DMA-buf? That's not something we can do.
>
> We need to find a different approach here.
>
> Can't we call alloc_page() on each fault and link them together so they are 
> freed when the device is finally reaped?


For sure better to optimize and allocate on demand when we reach this corner 
case, but why the linking ?
Shouldn't drm_prime_gem_destroy be good enough place to free ?

Andrey


>
> Regards,
> Christian.
>
>>
>> Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky at amd.com>
>> ---
>>   drivers/gpu/drm/drm_file.c  |  8 ++++++++
>>   drivers/gpu/drm/drm_prime.c | 10 ++++++++++
>>   include/drm/drm_file.h      |  2 ++
>>   include/drm/drm_gem.h       |  2 ++
>>   4 files changed, 22 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
>> index 0ac4566..ff3d39f 100644
>> --- a/drivers/gpu/drm/drm_file.c
>> +++ b/drivers/gpu/drm/drm_file.c
>> @@ -193,6 +193,12 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
>>               goto out_prime_destroy;
>>       }
>>   +    file->dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
>> +    if (!file->dummy_page) {
>> +        ret = -ENOMEM;
>> +        goto out_prime_destroy;
>> +    }
>> +
>>       return file;
>>     out_prime_destroy:
>> @@ -289,6 +295,8 @@ void drm_file_free(struct drm_file *file)
>>       if (dev->driver->postclose)
>>           dev->driver->postclose(dev, file);
>>   +    __free_page(file->dummy_page);
>> +
>>       drm_prime_destroy_file_private(&file->prime);
>>         WARN_ON(!list_empty(&file->event_list));
>> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
>> index 1693aa7..987b45c 100644
>> --- a/drivers/gpu/drm/drm_prime.c
>> +++ b/drivers/gpu/drm/drm_prime.c
>> @@ -335,6 +335,13 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
>>         ret = drm_prime_add_buf_handle(&file_priv->prime,
>>               dma_buf, *handle);
>> +
>> +    if (!ret) {
>> +        obj->dummy_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
>> +        if (!obj->dummy_page)
>> +            ret = -ENOMEM;
>> +    }
>> +
>>       mutex_unlock(&file_priv->prime.lock);
>>       if (ret)
>>           goto fail;
>> @@ -1020,6 +1027,9 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, 
>> struct sg_table *sg)
>>           dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
>>       dma_buf = attach->dmabuf;
>>       dma_buf_detach(attach->dmabuf, attach);
>> +
>> +    __free_page(obj->dummy_page);
>> +
>>       /* remove the reference */
>>       dma_buf_put(dma_buf);
>>   }
>> diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
>> index 716990b..2a011fc 100644
>> --- a/include/drm/drm_file.h
>> +++ b/include/drm/drm_file.h
>> @@ -346,6 +346,8 @@ struct drm_file {
>>        */
>>       struct drm_prime_file_private prime;
>>   +    struct page *dummy_page;
>> +
>>       /* private: */
>>   #if IS_ENABLED(CONFIG_DRM_LEGACY)
>>       unsigned long lock_count; /* DRI1 legacy lock count */
>> diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
>> index 337a483..76a97a3 100644
>> --- a/include/drm/drm_gem.h
>> +++ b/include/drm/drm_gem.h
>> @@ -311,6 +311,8 @@ struct drm_gem_object {
>>        *
>>        */
>>       const struct drm_gem_object_funcs *funcs;
>> +
>> +    struct page *dummy_page;
>>   };
>>     /**
>


More information about the amd-gfx mailing list