[PATCH v3 1/1] drm/ttm: Fix COW check

Christian König ckoenig.leichtzumerken at gmail.com
Fri Jul 23 09:09:05 UTC 2021


Am 23.07.21 um 11:00 schrieb Daniel Vetter:
> On Fri, Jul 23, 2021 at 10:33:48AM +0200, Christian König wrote:
>>
>> Am 23.07.21 um 10:21 schrieb Daniel Vetter:
>>> On Wed, Jul 14, 2021 at 10:51 AM Christian König
>>> <christian.koenig at amd.com> wrote:
>>>>
>>>> Am 13.07.21 um 17:28 schrieb Alex Deucher:
>>>>> On Tue, Jul 13, 2021 at 2:57 AM Christian König
>>>>> <ckoenig.leichtzumerken at gmail.com> wrote:
>>>>>> Am 13.07.21 um 00:06 schrieb Felix Kuehling:
>>>>>>> KFD Thunk maps invisible VRAM BOs with PROT_NONE, MAP_PRIVATE.
>>>>>>> is_cow_mapping returns true for these mappings. Add a check for
>>>>>>> vm_flags & VM_WRITE to avoid mmap failures on private read-only or
>>>>>>> PROT_NONE mappings.
>>>>>>>
>>>>>>> v2: protect against mprotect making a mapping writable after the fact
>>>>>>> v3: update driver-specific vm_operations_structs
>>>>>>>
>>>>>>> Fixes: f91142c62161 ("drm/ttm: nuke VM_MIXEDMAP on BO mappings v3")
>>>>>>> Signed-off-by: Felix Kuehling <Felix.Kuehling at amd.com>
>>>>>>> Signed-off-by: Alex Deucher <alexander.deucher at amd.com>
>>>>>> Reviewed-by: Christian König <christian.koenig at amd.com>
>>>>> Are you planning to push this to drm-misc?
>>>> Yes, just didn't found time yesterday.
>>> This is pushed to the wrong tree drm-misc-next-fixes, should have been
>>> in drm-misc-fixes. Please be careful with that because every time that
>>> goes wrong the script gets confused about which the current tree is,
>>> and pushes the wrong tree to linux-next branches.
>>>
>>> I'm going to hard-reset drm-misc-next-fixes now and hope that's good
>>> enough to fix things up (since Thomas is not around all the time for
>>> this merge window).
>> STOP! I'm about to push a revert for this patch.
>>
>> And yes that was pushed to the wrong branch, but it turned out that this was
>> fortunate since the patch doesn't work correctly.
> Well I just hard-reset, so you can push the right patch to the right
> branch now. The trouble is that outside of the merge window no one is
> allowed to push to drm-misc-next-fixes. If you do, then dim pushes
> drm-misc-next-fixes to for-linux-next instead of drm-misc-next, and we
> have bad surprises.

Could we then make the branch read-only for that time?

> Which unfortunately happens like every merge window a few times and always
> takes a few days/weeks to get caught.

Yeah, at least to me it's absolutely not obvious when the merge windows 
for a certain version start/end.

Christian.

> -Danie
>
>> Christian.
>>
>>> -Daniel
>>>
>>>
>>>> Christian.
>>>>
>>>>> Alex
>>>>>
>>>>>>> ---
>>>>>>>      drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c  |  3 ++-
>>>>>>>      drivers/gpu/drm/nouveau/nouveau_gem.c    |  3 ++-
>>>>>>>      drivers/gpu/drm/radeon/radeon_gem.c      |  3 ++-
>>>>>>>      drivers/gpu/drm/ttm/ttm_bo_vm.c          | 14 +++++++++++++-
>>>>>>>      drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c |  1 +
>>>>>>>      include/drm/ttm/ttm_bo_api.h             |  4 ++++
>>>>>>>      6 files changed, 24 insertions(+), 4 deletions(-)
>>>>>>>
>>>>>>> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>>>>> index b3404c43a911..1aa750a6a5d2 100644
>>>>>>> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>>>>> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
>>>>>>> @@ -79,7 +79,8 @@ static const struct vm_operations_struct amdgpu_gem_vm_ops = {
>>>>>>>          .fault = amdgpu_gem_fault,
>>>>>>>          .open = ttm_bo_vm_open,
>>>>>>>          .close = ttm_bo_vm_close,
>>>>>>> -     .access = ttm_bo_vm_access
>>>>>>> +     .access = ttm_bo_vm_access,
>>>>>>> +     .mprotect = ttm_bo_vm_mprotect
>>>>>>>      };
>>>>>>>
>>>>>>>      static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
>>>>>>> diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
>>>>>>> index 5b27845075a1..164ea564bb7a 100644
>>>>>>> --- a/drivers/gpu/drm/nouveau/nouveau_gem.c
>>>>>>> +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
>>>>>>> @@ -70,7 +70,8 @@ static const struct vm_operations_struct nouveau_ttm_vm_ops = {
>>>>>>>          .fault = nouveau_ttm_fault,
>>>>>>>          .open = ttm_bo_vm_open,
>>>>>>>          .close = ttm_bo_vm_close,
>>>>>>> -     .access = ttm_bo_vm_access
>>>>>>> +     .access = ttm_bo_vm_access,
>>>>>>> +     .mprotect = ttm_bo_vm_mprotect
>>>>>>>      };
>>>>>>>
>>>>>>>      void
>>>>>>> diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
>>>>>>> index 458f92a70887..c19ad07eb7b5 100644
>>>>>>> --- a/drivers/gpu/drm/radeon/radeon_gem.c
>>>>>>> +++ b/drivers/gpu/drm/radeon/radeon_gem.c
>>>>>>> @@ -77,7 +77,8 @@ static const struct vm_operations_struct radeon_gem_vm_ops = {
>>>>>>>          .fault = radeon_gem_fault,
>>>>>>>          .open = ttm_bo_vm_open,
>>>>>>>          .close = ttm_bo_vm_close,
>>>>>>> -     .access = ttm_bo_vm_access
>>>>>>> +     .access = ttm_bo_vm_access,
>>>>>>> +     .mprotect = ttm_bo_vm_mprotect
>>>>>>>      };
>>>>>>>
>>>>>>>      static void radeon_gem_object_free(struct drm_gem_object *gobj)
>>>>>>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>>>>>>> index f56be5bc0861..fb325bad5db6 100644
>>>>>>> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
>>>>>>> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
>>>>>>> @@ -542,17 +542,29 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
>>>>>>>      }
>>>>>>>      EXPORT_SYMBOL(ttm_bo_vm_access);
>>>>>>>
>>>>>>> +int ttm_bo_vm_mprotect(struct vm_area_struct *vma, unsigned long start,
>>>>>>> +                    unsigned long end, unsigned long newflags)
>>>>>>> +{
>>>>>>> +     /* Enforce no COW since would have really strange behavior with it. */
>>>>>>> +     if (is_cow_mapping(newflags) && (newflags & VM_WRITE))
>>>>>>> +             return -EINVAL;
>>>>>>> +
>>>>>>> +     return 0;
>>>>>>> +}
>>>>>>> +EXPORT_SYMBOL(ttm_bo_vm_mprotect);
>>>>>>> +
>>>>>>>      static const struct vm_operations_struct ttm_bo_vm_ops = {
>>>>>>>          .fault = ttm_bo_vm_fault,
>>>>>>>          .open = ttm_bo_vm_open,
>>>>>>>          .close = ttm_bo_vm_close,
>>>>>>>          .access = ttm_bo_vm_access,
>>>>>>> +     .mprotect = ttm_bo_vm_mprotect,
>>>>>>>      };
>>>>>>>
>>>>>>>      int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
>>>>>>>      {
>>>>>>>          /* Enforce no COW since would have really strange behavior with it. */
>>>>>>> -     if (is_cow_mapping(vma->vm_flags))
>>>>>>> +     if (is_cow_mapping(vma->vm_flags) && (vma->vm_flags & VM_WRITE))
>>>>>>>                  return -EINVAL;
>>>>>>>
>>>>>>>          ttm_bo_get(bo);
>>>>>>> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
>>>>>>> index e6b1f98ec99f..e4bf7dc99320 100644
>>>>>>> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
>>>>>>> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
>>>>>>> @@ -61,6 +61,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
>>>>>>>                  .fault = vmw_bo_vm_fault,
>>>>>>>                  .open = ttm_bo_vm_open,
>>>>>>>                  .close = ttm_bo_vm_close,
>>>>>>> +             .mprotect = ttm_bo_vm_mprotect,
>>>>>>>      #ifdef CONFIG_TRANSPARENT_HUGEPAGE
>>>>>>>                  .huge_fault = vmw_bo_vm_huge_fault,
>>>>>>>      #endif
>>>>>>> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
>>>>>>> index f681bbdbc698..40eb95875355 100644
>>>>>>> --- a/include/drm/ttm/ttm_bo_api.h
>>>>>>> +++ b/include/drm/ttm/ttm_bo_api.h
>>>>>>> @@ -605,6 +605,10 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
>>>>>>>
>>>>>>>      int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
>>>>>>>                       void *buf, int len, int write);
>>>>>>> +
>>>>>>> +int ttm_bo_vm_mprotect(struct vm_area_struct *vma, unsigned long start,
>>>>>>> +                    unsigned long end, unsigned long newflags);
>>>>>>> +
>>>>>>>      bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
>>>>>>>
>>>>>>>      vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot);



More information about the amd-gfx mailing list