[Intel-gfx] [PATCH v3 5/5] drm/i915: Add cpu fault handler for mmap_offset

Chris Wilson chris at chris-wilson.co.uk
Tue Oct 15 08:48:19 UTC 2019


Quoting Abdiel Janulgue (2019-10-15 09:37:24)
> Fault handler to handle missing pages for shmem-backed objects.
> 
> v2: bail out of inserting PTEs when failing to insert the
>     fault address
> 
> Signed-off-by: Abdiel Janulgue <abdiel.janulgue at linux.intel.com>
> Signed-off-by: Matthew Auld <matthew.auld at intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen at linux.intel.com>
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_mman.c | 125 ++++++++++++++++++-----
>  1 file changed, 100 insertions(+), 25 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index 5ee97c64c6d6..92327fa27ec8 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -5,6 +5,7 @@
>   */
>  
>  #include <linux/mman.h>
> +#include <linux/pfn_t.h>
>  #include <linux/sizes.h>
>  
>  #include "gt/intel_gt.h"
> @@ -200,6 +201,67 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
>         return view;
>  }
>  
> +static vm_fault_t i915_error_to_vmf_fault(int err)
> +{
> +       switch (err) {
> +       default:
> +               WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
> +               /* fallthrough */
> +       case -EIO: /* shmemfs failure from swap device */
> +       case -EFAULT: /* purged object */
> +       case -ENODEV: /* bad object, how did you get here! */
> +               return VM_FAULT_SIGBUS;
> +
> +       case -ENOSPC: /* shmemfs allocation failure */
> +       case -ENOMEM: /* our allocation failure */
> +               return VM_FAULT_OOM;
> +
> +       case 0:
> +       case -EAGAIN:
> +       case -ERESTARTSYS:
> +       case -EINTR:
> +       case -EBUSY:
> +               /*
> +                * EBUSY is ok: this just means that another thread
> +                * already did the job.
> +                */
> +               return VM_FAULT_NOPAGE;
> +       }
> +}
> +
> +static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
> +{
> +       struct vm_area_struct *area = vmf->vma;
> +       struct i915_mmap_offset *priv = area->vm_private_data;
> +       struct drm_i915_gem_object *obj = priv->obj;
> +       vm_fault_t vmf_ret;
> +       unsigned long i, size = area->vm_end - area->vm_start;
> +       bool write = area->vm_flags & VM_WRITE;
> +       int ret;
> +
> +       /* Sanity check that we allow writing into this object */
> +       if (i915_gem_object_is_readonly(obj) && write)
> +               return VM_FAULT_SIGBUS;
> +
> +       ret = i915_gem_object_pin_pages(obj);
> +       if (ret)
> +               return i915_error_to_vmf_fault(ret);
> +
> +       for (i = 0; i < size >> PAGE_SHIFT; i++) {
> +               struct page *page = i915_gem_object_get_page(obj, i);
> +
> +               vmf_ret = vmf_insert_pfn(area,
> +                                        (unsigned long)area->vm_start + i * PAGE_SIZE,
> +                                        page_to_pfn(page));
> +               if (vmf_ret != VM_FAULT_NOPAGE)
> +                       break;
> +       }
> +
> +       i915_gem_object_unpin_pages(obj);

There's no page reference held here or zapping of the PTE when we
release the pages. Nor any check that we have struct page?

Another crazy (probably not so crazy, just the usual close race, or
forked mempressure I think) test required.
-Chris


More information about the Intel-gfx mailing list