[Intel-gfx] [PATCH 36/39] drm/i915: Use reservation_object to coordinate userptr get_pages()
Daniel Vetter
daniel at ffwll.ch
Thu Aug 8 07:30:49 UTC 2019
Adding a pile of people since there's lots of discussoins going on around this.
-Daniel
On Wed, Aug 7, 2019 at 10:50 PM Daniel Vetter <daniel at ffwll.ch> wrote:
>
> On Fri, Jun 14, 2019 at 08:10:20AM +0100, Chris Wilson wrote:
> > Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
>
> Not sure this works, 2 thoughts way down ...
>
> > ---
> > drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 27 +--
> > .../gpu/drm/i915/gem/i915_gem_execbuffer.c | 3 -
> > drivers/gpu/drm/i915/gem/i915_gem_internal.c | 30 +--
> > .../gpu/drm/i915/gem/i915_gem_object_types.h | 11 +-
> > drivers/gpu/drm/i915/gem/i915_gem_pages.c | 137 ++++++++++-
> > drivers/gpu/drm/i915/gem/i915_gem_phys.c | 33 ++-
> > drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 30 +--
> > drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 25 +-
> > drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 221 ++++--------------
> > .../drm/i915/gem/selftests/huge_gem_object.c | 17 +-
> > .../gpu/drm/i915/gem/selftests/huge_pages.c | 45 ++--
> > drivers/gpu/drm/i915/gvt/dmabuf.c | 17 +-
> > drivers/gpu/drm/i915/i915_drv.h | 9 +-
> > drivers/gpu/drm/i915/i915_gem.c | 5 +-
> > drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 14 +-
> > 15 files changed, 300 insertions(+), 324 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > index 84992d590da5..a44d6d2ef7ed 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
> > @@ -225,33 +225,24 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
> > return drm_gem_dmabuf_export(dev, &exp_info);
> > }
> >
> > -static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +dmabuf_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > - struct sg_table *pages;
> > - unsigned int sg_page_sizes;
> > -
> > - pages = dma_buf_map_attachment(obj->base.import_attach,
> > - DMA_BIDIRECTIONAL);
> > - if (IS_ERR(pages))
> > - return PTR_ERR(pages);
> > -
> > - sg_page_sizes = i915_sg_page_sizes(pages->sgl);
> > -
> > - __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
> > -
> > - return 0;
> > + return dma_buf_map_attachment(ctx->object->base.import_attach,
> > + DMA_BIDIRECTIONAL);
> > }
> >
> > -static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages)
> > +static void dmabuf_put_pages(struct drm_i915_gem_object *obj,
> > + struct sg_table *pages)
> > {
> > dma_buf_unmap_attachment(obj->base.import_attach, pages,
> > DMA_BIDIRECTIONAL);
> > }
> >
> > static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
> > - .get_pages = i915_gem_object_get_pages_dmabuf,
> > - .put_pages = i915_gem_object_put_pages_dmabuf,
> > + .get_pages = dmabuf_get_pages,
> > + .put_pages = dmabuf_put_pages,
> > };
> >
> > struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> > index 44bcb681c168..68faf1a71c97 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
> > @@ -1784,9 +1784,6 @@ static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
> > goto out;
> > }
> >
> > - /* A frequent cause for EAGAIN are currently unavailable client pages */
> > - flush_workqueue(eb->i915->mm.userptr_wq);
> > -
> > err = i915_mutex_lock_interruptible(dev);
> > if (err) {
> > mutex_lock(&dev->struct_mutex);
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> > index 0c41e04ab8fa..aa0bd5de313b 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
> > @@ -32,11 +32,14 @@ static void internal_free_pages(struct sg_table *st)
> > kfree(st);
> > }
> >
> > -static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +internal_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct drm_i915_private *i915 = to_i915(obj->base.dev);
> > - struct sg_table *st;
> > struct scatterlist *sg;
> > + struct sg_table *st;
> > unsigned int sg_page_sizes;
> > unsigned int npages;
> > int max_order;
> > @@ -66,12 +69,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
> > create_st:
> > st = kmalloc(sizeof(*st), GFP_KERNEL);
> > if (!st)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > npages = obj->base.size / PAGE_SIZE;
> > if (sg_alloc_table(st, npages, GFP_KERNEL)) {
> > kfree(st);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > sg = st->sgl;
> > @@ -117,27 +120,26 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
> > goto err;
> > }
> >
> > - /* Mark the pages as dontneed whilst they are still pinned. As soon
> > + /*
> > + * Mark the pages as dontneed whilst they are still pinned. As soon
> > * as they are unpinned they are allowed to be reaped by the shrinker,
> > * and the caller is expected to repopulate - the contents of this
> > * object are only valid whilst active and pinned.
> > */
> > obj->mm.madv = I915_MADV_DONTNEED;
> >
> > - __i915_gem_object_set_pages(obj, st, sg_page_sizes);
> > -
> > - return 0;
> > + *sizes = sg_page_sizes;
> > + return st;
> >
> > err:
> > sg_set_page(sg, NULL, 0, 0);
> > sg_mark_end(sg);
> > internal_free_pages(st);
> > -
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > -static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages)
> > +static void internal_put_pages(struct drm_i915_gem_object *obj,
> > + struct sg_table *pages)
> > {
> > i915_gem_gtt_finish_pages(obj, pages);
> > internal_free_pages(pages);
> > @@ -149,8 +151,8 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
> > static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
> > .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
> > I915_GEM_OBJECT_IS_SHRINKABLE,
> > - .get_pages = i915_gem_object_get_pages_internal,
> > - .put_pages = i915_gem_object_put_pages_internal,
> > + .get_pages = internal_get_pages,
> > + .put_pages = internal_put_pages,
> > };
> >
> > /**
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> > index f792953b8a71..0ea404cfbc1c 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
> > @@ -19,6 +19,7 @@
> >
> > struct drm_i915_gem_object;
> > struct intel_fronbuffer;
> > +struct task_struct;
> >
> > /*
> > * struct i915_lut_handle tracks the fast lookups from handle to vma used
> > @@ -32,6 +33,11 @@ struct i915_lut_handle {
> > u32 handle;
> > };
> >
> > +struct i915_gem_object_get_pages_context {
> > + struct drm_i915_gem_object *object;
> > + struct task_struct *task;
> > +};
> > +
> > struct drm_i915_gem_object_ops {
> > unsigned int flags;
> > #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
> > @@ -52,9 +58,11 @@ struct drm_i915_gem_object_ops {
> > * being released or under memory pressure (where we attempt to
> > * reap pages for the shrinker).
> > */
> > - int (*get_pages)(struct drm_i915_gem_object *obj);
> > + struct sg_table *(*get_pages)(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes);
> > void (*put_pages)(struct drm_i915_gem_object *obj,
> > struct sg_table *pages);
> > +
> > void (*truncate)(struct drm_i915_gem_object *obj);
> > void (*writeback)(struct drm_i915_gem_object *obj);
> >
> > @@ -252,7 +260,6 @@ struct drm_i915_gem_object {
> >
> > struct i915_mm_struct *mm;
> > struct i915_mmu_object *mmu_object;
> > - struct work_struct *work;
> > } userptr;
> >
> > unsigned long scratch;
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> > index 6bec301cee79..f65a983248c6 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
> > @@ -8,16 +8,49 @@
> > #include "i915_gem_object.h"
> > #include "i915_scatterlist.h"
> >
> > -void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages,
> > - unsigned int sg_page_sizes)
> > +static DEFINE_SPINLOCK(fence_lock);
> > +
> > +struct get_pages_work {
> > + struct dma_fence dma; /* Must be first for dma_fence_free() */
> > + struct i915_sw_fence wait;
> > + struct work_struct work;
> > + struct i915_gem_object_get_pages_context ctx;
> > +};
> > +
> > +static const char *get_pages_work_driver_name(struct dma_fence *fence)
> > +{
> > + return DRIVER_NAME;
> > +}
> > +
> > +static const char *get_pages_work_timeline_name(struct dma_fence *fence)
> > +{
> > + return "allocation";
> > +}
> > +
> > +static void get_pages_work_release(struct dma_fence *fence)
> > +{
> > + struct get_pages_work *w = container_of(fence, typeof(*w), dma);
> > +
> > + i915_sw_fence_fini(&w->wait);
> > +
> > + BUILD_BUG_ON(offsetof(typeof(*w), dma));
> > + dma_fence_free(&w->dma);
> > +}
> > +
> > +static const struct dma_fence_ops get_pages_work_ops = {
> > + .get_driver_name = get_pages_work_driver_name,
> > + .get_timeline_name = get_pages_work_timeline_name,
> > + .release = get_pages_work_release,
> > +};
> > +
> > +static void __set_pages(struct drm_i915_gem_object *obj,
> > + struct sg_table *pages,
> > + unsigned int sg_page_sizes)
> > {
> > struct drm_i915_private *i915 = to_i915(obj->base.dev);
> > unsigned long supported = INTEL_INFO(i915)->page_sizes;
> > int i;
> >
> > - lockdep_assert_held(&obj->mm.lock);
> > -
> > /* Make the pages coherent with the GPU (flushing any swapin). */
> > if (obj->cache_dirty) {
> > obj->write_domain = 0;
> > @@ -29,8 +62,6 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> > obj->mm.get_page.sg_pos = pages->sgl;
> > obj->mm.get_page.sg_idx = 0;
> >
> > - obj->mm.pages = pages;
> > -
> > if (i915_gem_object_is_tiled(obj) &&
> > i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
> > GEM_BUG_ON(obj->mm.quirked);
> > @@ -38,7 +69,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> > obj->mm.quirked = true;
> > }
> >
> > - GEM_BUG_ON(!sg_page_sizes);
> > + if (!sg_page_sizes)
> > + sg_page_sizes = i915_sg_page_sizes(pages->sgl);
> > obj->mm.page_sizes.phys = sg_page_sizes;
> >
> > /*
> > @@ -73,18 +105,105 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
> >
> > spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
> > }
> > +}
> >
> > +static void
> > +get_pages_worker(struct work_struct *_work)
> > +{
> > + struct get_pages_work *work = container_of(_work, typeof(*work), work);
> > + struct drm_i915_gem_object *obj = work->ctx.object;
> > + struct sg_table *pages;
> > + unsigned int sizes = 0;
> > +
> > + if (!work->dma.error) {
> > + pages = obj->ops->get_pages(&work->ctx, &sizes);
> > + if (!IS_ERR(pages))
> > + __set_pages(obj, pages, sizes);
> > + else
> > + dma_fence_set_error(&work->dma, PTR_ERR(pages));
> > + } else {
> > + pages = ERR_PTR(work->dma.error);
> > + }
> > +
> > + obj->mm.pages = pages;
> > complete_all(&obj->mm.completion);
> > + atomic_dec(&obj->mm.pages_pin_count);
> > +
> > + i915_gem_object_put(obj);
> > + put_task_struct(work->ctx.task);
> > +
> > + dma_fence_signal(&work->dma);
> > + dma_fence_put(&work->dma);
> > +}
> > +
> > +static int __i915_sw_fence_call
> > +get_pages_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
> > +{
> > + struct get_pages_work *w = container_of(fence, typeof(*w), wait);
> > +
> > + switch (state) {
> > + case FENCE_COMPLETE:
> > + if (fence->error)
> > + dma_fence_set_error(&w->dma, fence->error);
> > + queue_work(system_unbound_wq, &w->work);
> > + break;
> > +
> > + case FENCE_FREE:
> > + dma_fence_put(&w->dma);
> > + break;
> > + }
> > +
> > + return NOTIFY_DONE;
> > }
> >
> > int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> > {
> > + struct get_pages_work *work;
> > + int err;
> > +
> > if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
> > DRM_DEBUG("Attempting to obtain a purgeable object\n");
> > return -EFAULT;
> > }
> >
> > - return obj->ops->get_pages(obj);
> > + /* XXX inline? */
> > +
> > + work = kmalloc(sizeof(*work), GFP_KERNEL);
> > + if (!work)
> > + return -ENOMEM;
> > +
> > + dma_fence_init(&work->dma,
> > + &get_pages_work_ops,
> > + &fence_lock,
> > + to_i915(obj->base.dev)->mm.unordered_timeline,
> > + 0);
>
> With all the shrinkers (across the tree, not just i915) we have that just
> blindly assume you can wait for a fence (at least if you manage to acquire
> the reservation_lock) I expect this to deadlock. I think the rule is that
> no published fence can depend on any memory allocation anywhere.
>
> Yes it sucks real hard.
>
> > + i915_sw_fence_init(&work->wait, get_pages_notify);
> > +
> > + work->ctx.object = i915_gem_object_get(obj);
> > +
> > + work->ctx.task = current;
> > + get_task_struct(work->ctx.task);
> > +
> > + INIT_WORK(&work->work, get_pages_worker);
> > +
> > + i915_gem_object_lock(obj);
>
> Other bit is nesting reservation_object lock within obj->mm.lock.
>
> obj->mm.lock is a very neat&tidy answer to dealing with bo alloc vs.
> shrinker madness. But looking at where dma-buf is headed we'll need to
> have the reservation_object nest outside of the obj->mm.lock (because the
> importer is going to acquire that for us, before calling down into
> exporter code).
>
> Of course on the importer side we're currently nesting the other way
> round, and Christian König already set CI on fire figuring that out.
>
> I think what could work is pulling the reservation_object out of
> obj->mm.lock, and entirely bypassing it for imported dma-buf (since for
> that case dealing with shrinker lolz isn't our problem, but the exporters,
> so we don't need the special shrinker lock).
>
> 3rd part is just irrational me freaking out when we hide userptr behind a
> wait_completion, that didn't work out well last time around because
> lockdep fails us. But that's really just an aside.
> -Daniel
>
> > + GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv, true));
> > + err = i915_sw_fence_await_reservation(&work->wait,
> > + obj->resv, NULL,
> > + true, I915_FENCE_TIMEOUT,
> > + I915_FENCE_GFP);
> > + if (err == 0) {
> > + reservation_object_add_excl_fence(obj->resv, &work->dma);
> > + atomic_inc(&obj->mm.pages_pin_count);
> > + } else {
> > + dma_fence_set_error(&work->dma, err);
> > + }
> > + i915_gem_object_unlock(obj);
> > +
> > + dma_fence_get(&work->dma);
> > + i915_sw_fence_commit(&work->wait);
> > +
> > + return err;
> > }
> >
> > /* Ensure that the associated pages are gathered from the backing storage
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> > index 2deac933cf59..6b4a5fb52055 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
> > @@ -17,18 +17,21 @@
> > #include "i915_gem_object.h"
> > #include "i915_scatterlist.h"
> >
> > -static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +phys_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct address_space *mapping = obj->base.filp->f_mapping;
> > struct drm_dma_handle *phys;
> > - struct sg_table *st;
> > struct scatterlist *sg;
> > + struct sg_table *st;
> > char *vaddr;
> > int i;
> > int err;
> >
> > if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
> > - return -EINVAL;
> > + return ERR_PTR(-EINVAL);
> >
> > /* Always aligning to the object size, allows a single allocation
> > * to handle all possible callers, and given typical object sizes,
> > @@ -38,7 +41,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
> > roundup_pow_of_two(obj->base.size),
> > roundup_pow_of_two(obj->base.size));
> > if (!phys)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > vaddr = phys->vaddr;
> > for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
> > @@ -83,19 +86,16 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
> >
> > obj->phys_handle = phys;
> >
> > - __i915_gem_object_set_pages(obj, st, sg->length);
> > -
> > - return 0;
> > + *sizes = sg->length;
> > + return st;
> >
> > err_phys:
> > drm_pci_free(obj->base.dev, phys);
> > -
> > - return err;
> > + return ERR_PTR(err);
> > }
> >
> > static void
> > -i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages)
> > +phys_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
> > {
> > __i915_gem_object_release_shmem(obj, pages, false);
> >
> > @@ -139,8 +139,8 @@ i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
> > }
> >
> > static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
> > - .get_pages = i915_gem_object_get_pages_phys,
> > - .put_pages = i915_gem_object_put_pages_phys,
> > + .get_pages = phys_get_pages,
> > + .put_pages = phys_put_pages,
> > .release = i915_gem_object_release_phys,
> > };
> >
> > @@ -193,15 +193,12 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
> > if (!IS_ERR_OR_NULL(pages))
> > i915_gem_shmem_ops.put_pages(obj, pages);
> > mutex_unlock(&obj->mm.lock);
> > +
> > + wait_for_completion(&obj->mm.completion);
> > return 0;
> >
> > err_xfer:
> > obj->ops = &i915_gem_shmem_ops;
> > - if (!IS_ERR_OR_NULL(pages)) {
> > - unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
> > -
> > - __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
> > - }
> > err_unlock:
> > mutex_unlock(&obj->mm.lock);
> > return err;
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> > index 19d9ecdb2894..c43304e3bada 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
> > @@ -22,11 +22,13 @@ static void check_release_pagevec(struct pagevec *pvec)
> > cond_resched();
> > }
> >
> > -static int shmem_get_pages(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +shmem_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct drm_i915_private *i915 = to_i915(obj->base.dev);
> > const unsigned long page_count = obj->base.size / PAGE_SIZE;
> > - unsigned long i;
> > struct address_space *mapping;
> > struct sg_table *st;
> > struct scatterlist *sg;
> > @@ -37,31 +39,24 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
> > unsigned int sg_page_sizes;
> > struct pagevec pvec;
> > gfp_t noreclaim;
> > + unsigned long i;
> > int ret;
> >
> > - /*
> > - * Assert that the object is not currently in any GPU domain. As it
> > - * wasn't in the GTT, there shouldn't be any way it could have been in
> > - * a GPU cache
> > - */
> > - GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
> > - GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
> > -
> > /*
> > * If there's no chance of allocating enough pages for the whole
> > * object, bail early.
> > */
> > if (page_count > totalram_pages())
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > st = kmalloc(sizeof(*st), GFP_KERNEL);
> > if (!st)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > rebuild_st:
> > if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
> > kfree(st);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > /*
> > @@ -179,9 +174,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
> > if (i915_gem_object_needs_bit17_swizzle(obj))
> > i915_gem_object_do_bit_17_swizzle(obj, st);
> >
> > - __i915_gem_object_set_pages(obj, st, sg_page_sizes);
> > -
> > - return 0;
> > + *sizes = sg_page_sizes;
> > + return st;
> >
> > err_sg:
> > sg_mark_end(sg);
> > @@ -209,7 +203,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
> > if (ret == -ENOSPC)
> > ret = -ENOMEM;
> >
> > - return ret;
> > + return ERR_PTR(ret);
> > }
> >
> > static void
> > @@ -276,8 +270,6 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
> > struct sg_table *pages,
> > bool needs_clflush)
> > {
> > - GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
> > -
> > if (obj->mm.madv == I915_MADV_DONTNEED)
> > obj->mm.dirty = false;
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
> > index 7066044d63cf..28ea06e667cd 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
> > @@ -499,22 +499,19 @@ i915_pages_create_for_stolen(struct drm_device *dev,
> > return st;
> > }
> >
> > -static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +stolen_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > - struct sg_table *pages =
> > - i915_pages_create_for_stolen(obj->base.dev,
> > - obj->stolen->start,
> > - obj->stolen->size);
> > - if (IS_ERR(pages))
> > - return PTR_ERR(pages);
> > -
> > - __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
> > + struct drm_i915_gem_object *obj = ctx->object;
> >
> > - return 0;
> > + return i915_pages_create_for_stolen(obj->base.dev,
> > + obj->stolen->start,
> > + obj->stolen->size);
> > }
> >
> > -static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages)
> > +static void
> > +stolen_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
> > {
> > /* Should only be called from i915_gem_object_release_stolen() */
> > sg_free_table(pages);
> > @@ -536,8 +533,8 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
> > }
> >
> > static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
> > - .get_pages = i915_gem_object_get_pages_stolen,
> > - .put_pages = i915_gem_object_put_pages_stolen,
> > + .get_pages = stolen_get_pages,
> > + .put_pages = stolen_put_pages,
> > .release = i915_gem_object_release_stolen,
> > };
> >
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> > index f093deaeb5c0..6748e15bf89a 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
> > @@ -378,7 +378,7 @@ __i915_mm_struct_free(struct kref *kref)
> > mutex_unlock(&mm->i915->mm_lock);
> >
> > INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
> > - queue_work(mm->i915->mm.userptr_wq, &mm->work);
> > + schedule_work(&mm->work);
> > }
> >
> > static void
> > @@ -393,19 +393,12 @@ i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
> > obj->userptr.mm = NULL;
> > }
> >
> > -struct get_pages_work {
> > - struct work_struct work;
> > - struct drm_i915_gem_object *obj;
> > - struct task_struct *task;
> > -};
> > -
> > static struct sg_table *
> > __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
> > struct page **pvec, int num_pages)
> > {
> > unsigned int max_segment = i915_sg_segment_size();
> > struct sg_table *st;
> > - unsigned int sg_page_sizes;
> > int ret;
> >
> > st = kmalloc(sizeof(*st), GFP_KERNEL);
> > @@ -435,131 +428,23 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
> > return ERR_PTR(ret);
> > }
> >
> > - sg_page_sizes = i915_sg_page_sizes(st->sgl);
> > -
> > - __i915_gem_object_set_pages(obj, st, sg_page_sizes);
> > -
> > return st;
> > }
> >
> > -static void
> > -__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
> > -{
> > - struct get_pages_work *work = container_of(_work, typeof(*work), work);
> > - struct drm_i915_gem_object *obj = work->obj;
> > - const int npages = obj->base.size >> PAGE_SHIFT;
> > - struct page **pvec;
> > - int pinned, ret;
> > -
> > - ret = -ENOMEM;
> > - pinned = 0;
> > -
> > - pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
> > - if (pvec != NULL) {
> > - struct mm_struct *mm = obj->userptr.mm->mm;
> > - unsigned int flags = 0;
> > -
> > - if (!i915_gem_object_is_readonly(obj))
> > - flags |= FOLL_WRITE;
> > -
> > - ret = -EFAULT;
> > - if (mmget_not_zero(mm)) {
> > - down_read(&mm->mmap_sem);
> > - while (pinned < npages) {
> > - ret = get_user_pages_remote
> > - (work->task, mm,
> > - obj->userptr.ptr + pinned * PAGE_SIZE,
> > - npages - pinned,
> > - flags,
> > - pvec + pinned, NULL, NULL);
> > - if (ret < 0)
> > - break;
> > -
> > - pinned += ret;
> > - }
> > - up_read(&mm->mmap_sem);
> > - mmput(mm);
> > - }
> > - }
> > -
> > - mutex_lock(&obj->mm.lock);
> > - if (obj->userptr.work == &work->work) {
> > - struct sg_table *pages = ERR_PTR(ret);
> > -
> > - if (pinned == npages) {
> > - pages = __i915_gem_userptr_alloc_pages(obj, pvec,
> > - npages);
> > - if (!IS_ERR(pages)) {
> > - pinned = 0;
> > - pages = NULL;
> > - }
> > - }
> > -
> > - obj->userptr.work = ERR_CAST(pages);
> > - if (IS_ERR(pages))
> > - __i915_gem_userptr_set_active(obj, false);
> > - }
> > - mutex_unlock(&obj->mm.lock);
> > -
> > - release_pages(pvec, pinned);
> > - kvfree(pvec);
> > -
> > - i915_gem_object_put(obj);
> > - put_task_struct(work->task);
> > - kfree(work);
> > -}
> > -
> > static struct sg_table *
> > -__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
> > -{
> > - struct get_pages_work *work;
> > -
> > - /* Spawn a worker so that we can acquire the
> > - * user pages without holding our mutex. Access
> > - * to the user pages requires mmap_sem, and we have
> > - * a strict lock ordering of mmap_sem, struct_mutex -
> > - * we already hold struct_mutex here and so cannot
> > - * call gup without encountering a lock inversion.
> > - *
> > - * Userspace will keep on repeating the operation
> > - * (thanks to EAGAIN) until either we hit the fast
> > - * path or the worker completes. If the worker is
> > - * cancelled or superseded, the task is still run
> > - * but the results ignored. (This leads to
> > - * complications that we may have a stray object
> > - * refcount that we need to be wary of when
> > - * checking for existing objects during creation.)
> > - * If the worker encounters an error, it reports
> > - * that error back to this function through
> > - * obj->userptr.work = ERR_PTR.
> > - */
> > - work = kmalloc(sizeof(*work), GFP_KERNEL);
> > - if (work == NULL)
> > - return ERR_PTR(-ENOMEM);
> > -
> > - obj->userptr.work = &work->work;
> > -
> > - work->obj = i915_gem_object_get(obj);
> > -
> > - work->task = current;
> > - get_task_struct(work->task);
> > -
> > - INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
> > - queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work);
> > -
> > - return ERR_PTR(-EAGAIN);
> > -}
> > -
> > -static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
> > +userptr_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > const int num_pages = obj->base.size >> PAGE_SHIFT;
> > struct mm_struct *mm = obj->userptr.mm->mm;
> > - struct page **pvec;
> > struct sg_table *pages;
> > - bool active;
> > + struct page **pvec;
> > int pinned;
> > + int ret;
> >
> > - /* If userspace should engineer that these pages are replaced in
> > + /*
> > + * If userspace should engineer that these pages are replaced in
> > * the vma between us binding this page into the GTT and completion
> > * of rendering... Their loss. If they change the mapping of their
> > * pages they need to create a new bo to point to the new vma.
> > @@ -576,59 +461,58 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
> > * egregious cases from causing harm.
> > */
> >
> > - if (obj->userptr.work) {
> > - /* active flag should still be held for the pending work */
> > - if (IS_ERR(obj->userptr.work))
> > - return PTR_ERR(obj->userptr.work);
> > - else
> > - return -EAGAIN;
> > - }
> > + pvec = kvmalloc_array(num_pages, sizeof(struct page *),
> > + GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
> > + if (!pvec)
> > + return ERR_PTR(-ENOMEM);
> > +
> > + __i915_gem_userptr_set_active(obj, true);
> >
> > - pvec = NULL;
> > pinned = 0;
> > + ret = -EFAULT;
> > + if (mmget_not_zero(mm)) {
> > + unsigned int flags;
> >
> > - if (mm == current->mm) {
> > - pvec = kvmalloc_array(num_pages, sizeof(struct page *),
> > - GFP_KERNEL |
> > - __GFP_NORETRY |
> > - __GFP_NOWARN);
> > - if (pvec) /* defer to worker if malloc fails */
> > - pinned = __get_user_pages_fast(obj->userptr.ptr,
> > - num_pages,
> > - !i915_gem_object_is_readonly(obj),
> > - pvec);
> > - }
> > + flags = 0;
> > + if (!i915_gem_object_is_readonly(obj))
> > + flags |= FOLL_WRITE;
> >
> > - active = false;
> > - if (pinned < 0) {
> > - pages = ERR_PTR(pinned);
> > - pinned = 0;
> > - } else if (pinned < num_pages) {
> > - pages = __i915_gem_userptr_get_pages_schedule(obj);
> > - active = pages == ERR_PTR(-EAGAIN);
> > - } else {
> > - pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
> > - active = !IS_ERR(pages);
> > + down_read(&mm->mmap_sem);
> > + while (pinned < num_pages) {
> > + ret = get_user_pages_remote
> > + (ctx->task, mm,
> > + obj->userptr.ptr + pinned * PAGE_SIZE,
> > + num_pages - pinned,
> > + flags,
> > + pvec + pinned, NULL, NULL);
> > + if (ret < 0)
> > + break;
> > +
> > + pinned += ret;
> > + }
> > + up_read(&mm->mmap_sem);
> > + mmput(mm);
> > }
> > - if (active)
> > - __i915_gem_userptr_set_active(obj, true);
> >
> > - if (IS_ERR(pages))
> > + if (ret)
> > + pages = ERR_PTR(ret);
> > + else
> > + pages = __i915_gem_userptr_alloc_pages(obj, pvec, num_pages);
> > + if (IS_ERR(pages)) {
> > release_pages(pvec, pinned);
> > + __i915_gem_userptr_set_active(obj, false);
> > + }
> > kvfree(pvec);
> >
> > - return PTR_ERR_OR_ZERO(pages);
> > + return pages;
> > }
> >
> > static void
> > -i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
> > - struct sg_table *pages)
> > +userptr_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
> > {
> > struct sgt_iter sgt_iter;
> > struct page *page;
> >
> > - /* Cancel any inflight work and force them to restart their gup */
> > - obj->userptr.work = NULL;
> > __i915_gem_userptr_set_active(obj, false);
> > if (!pages)
> > return;
> > @@ -669,8 +553,8 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
> > .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
> > I915_GEM_OBJECT_IS_SHRINKABLE |
> > I915_GEM_OBJECT_ASYNC_CANCEL,
> > - .get_pages = i915_gem_userptr_get_pages,
> > - .put_pages = i915_gem_userptr_put_pages,
> > + .get_pages = userptr_get_pages,
> > + .put_pages = userptr_put_pages,
> > .dmabuf_export = i915_gem_userptr_dmabuf_export,
> > .release = i915_gem_userptr_release,
> > };
> > @@ -786,22 +670,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
> > return 0;
> > }
> >
> > -int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
> > +void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
> > {
> > mutex_init(&dev_priv->mm_lock);
> > hash_init(dev_priv->mm_structs);
> > -
> > - dev_priv->mm.userptr_wq =
> > - alloc_workqueue("i915-userptr-acquire",
> > - WQ_HIGHPRI | WQ_UNBOUND,
> > - 0);
> > - if (!dev_priv->mm.userptr_wq)
> > - return -ENOMEM;
> > -
> > - return 0;
> > }
> >
> > void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv)
> > {
> > - destroy_workqueue(dev_priv->mm.userptr_wq);
> > + mutex_destroy(&dev_priv->mm_lock);
> > }
> > diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
> > index 3c5d17b2b670..02e6edce715e 100644
> > --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
> > +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
> > @@ -21,9 +21,12 @@ static void huge_free_pages(struct drm_i915_gem_object *obj,
> > kfree(pages);
> > }
> >
> > -static int huge_get_pages(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +huge_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
> > + struct drm_i915_gem_object *obj = ctx->object;
> > const unsigned long nreal = obj->scratch / PAGE_SIZE;
> > const unsigned long npages = obj->base.size / PAGE_SIZE;
> > struct scatterlist *sg, *src, *end;
> > @@ -32,11 +35,11 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
> >
> > pages = kmalloc(sizeof(*pages), GFP);
> > if (!pages)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > if (sg_alloc_table(pages, npages, GFP)) {
> > kfree(pages);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > sg = pages->sgl;
> > @@ -64,14 +67,12 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
> > if (i915_gem_gtt_prepare_pages(obj, pages))
> > goto err;
> >
> > - __i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
> > -
> > - return 0;
> > + *sizes = PAGE_SIZE;
> > + return pages;
> >
> > err:
> > huge_free_pages(obj, pages);
> > -
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > #undef GFP
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> > index b03a29366bd1..3b93a7337f27 100644
> > --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> > +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
> > @@ -51,9 +51,12 @@ static void huge_pages_free_pages(struct sg_table *st)
> > kfree(st);
> > }
> >
> > -static int get_huge_pages(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +get_huge_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
> > + struct drm_i915_gem_object *obj = ctx->object;
> > unsigned int page_mask = obj->mm.page_mask;
> > struct sg_table *st;
> > struct scatterlist *sg;
> > @@ -62,11 +65,11 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
> >
> > st = kmalloc(sizeof(*st), GFP);
> > if (!st)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
> > kfree(st);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > rem = obj->base.size;
> > @@ -114,16 +117,14 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
> > obj->mm.madv = I915_MADV_DONTNEED;
> >
> > GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
> > - __i915_gem_object_set_pages(obj, st, sg_page_sizes);
> > -
> > - return 0;
> > + *sizes = sg_page_sizes;
> > + return st;
> >
> > err:
> > sg_set_page(sg, NULL, 0, 0);
> > sg_mark_end(sg);
> > huge_pages_free_pages(st);
> > -
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > static void put_huge_pages(struct drm_i915_gem_object *obj,
> > @@ -175,8 +176,11 @@ huge_pages_object(struct drm_i915_private *i915,
> > return obj;
> > }
> >
> > -static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +fake_get_huge_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct drm_i915_private *i915 = to_i915(obj->base.dev);
> > const u64 max_len = rounddown_pow_of_two(UINT_MAX);
> > struct sg_table *st;
> > @@ -186,11 +190,11 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
> >
> > st = kmalloc(sizeof(*st), GFP);
> > if (!st)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > if (sg_alloc_table(st, obj->base.size >> PAGE_SHIFT, GFP)) {
> > kfree(st);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > /* Use optimal page sized chunks to fill in the sg table */
> > @@ -227,13 +231,15 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
> >
> > obj->mm.madv = I915_MADV_DONTNEED;
> >
> > - __i915_gem_object_set_pages(obj, st, sg_page_sizes);
> > -
> > - return 0;
> > + *sizes = sg_page_sizes;
> > + return st;
> > }
> >
> > -static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +fake_get_huge_pages_single(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct drm_i915_private *i915 = to_i915(obj->base.dev);
> > struct sg_table *st;
> > struct scatterlist *sg;
> > @@ -241,11 +247,11 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
> >
> > st = kmalloc(sizeof(*st), GFP);
> > if (!st)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > if (sg_alloc_table(st, 1, GFP)) {
> > kfree(st);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > sg = st->sgl;
> > @@ -261,9 +267,8 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
> >
> > obj->mm.madv = I915_MADV_DONTNEED;
> >
> > - __i915_gem_object_set_pages(obj, st, sg->length);
> > -
> > - return 0;
> > + *sizes = sg->length;
> > + return st;
> > #undef GFP
> > }
> >
> > diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
> > index 41c8ebc60c63..c2a81939f698 100644
> > --- a/drivers/gpu/drm/i915/gvt/dmabuf.c
> > +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
> > @@ -36,9 +36,11 @@
> >
> > #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
> >
> > -static int vgpu_gem_get_pages(
> > - struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +vgpu_gem_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
> > struct sg_table *st;
> > struct scatterlist *sg;
> > @@ -49,17 +51,17 @@ static int vgpu_gem_get_pages(
> >
> > fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
> > if (WARN_ON(!fb_info))
> > - return -ENODEV;
> > + return ERR_PTR(-ENODEV);
> >
> > st = kmalloc(sizeof(*st), GFP_KERNEL);
> > if (unlikely(!st))
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > page_num = obj->base.size >> PAGE_SHIFT;
> > ret = sg_alloc_table(st, page_num, GFP_KERNEL);
> > if (ret) {
> > kfree(st);
> > - return ret;
> > + return ERR_PTR(ret);
> > }
> > gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
> > (fb_info->start >> PAGE_SHIFT);
> > @@ -71,9 +73,8 @@ static int vgpu_gem_get_pages(
> > sg_dma_len(sg) = PAGE_SIZE;
> > }
> >
> > - __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
> > -
> > - return 0;
> > + *sizes = PAGE_SIZE;
> > + return st;
> > }
> >
> > static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> > index 9a1ec487e8b1..ef9e4cb49b4a 100644
> > --- a/drivers/gpu/drm/i915/i915_drv.h
> > +++ b/drivers/gpu/drm/i915/i915_drv.h
> > @@ -786,13 +786,6 @@ struct i915_gem_mm {
> > struct notifier_block vmap_notifier;
> > struct shrinker shrinker;
> >
> > - /**
> > - * Workqueue to fault in userptr pages, flushed by the execbuf
> > - * when required but otherwise left to userspace to try again
> > - * on EAGAIN.
> > - */
> > - struct workqueue_struct *userptr_wq;
> > -
> > u64 unordered_timeline;
> >
> > /* the indicator for dispatch video commands on two BSD rings */
> > @@ -2514,7 +2507,7 @@ static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
> > }
> >
> > /* i915_gem.c */
> > -int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
> > +void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
> > void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
> > void i915_gem_sanitize(struct drm_i915_private *i915);
> > int i915_gem_init_early(struct drm_i915_private *dev_priv);
> > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> > index 21416b87905e..1571c707ad15 100644
> > --- a/drivers/gpu/drm/i915/i915_gem.c
> > +++ b/drivers/gpu/drm/i915/i915_gem.c
> > @@ -1544,10 +1544,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
> > dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
> >
> > i915_timelines_init(dev_priv);
> > -
> > - ret = i915_gem_init_userptr(dev_priv);
> > - if (ret)
> > - return ret;
> > + i915_gem_init_userptr(dev_priv);
> >
> > ret = intel_uc_init_misc(dev_priv);
> > if (ret)
> > diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> > index 664b7e3e1d7c..dae1f8634a38 100644
> > --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> > +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
> > @@ -54,10 +54,13 @@ static void fake_free_pages(struct drm_i915_gem_object *obj,
> > kfree(pages);
> > }
> >
> > -static int fake_get_pages(struct drm_i915_gem_object *obj)
> > +static struct sg_table *
> > +fake_get_pages(struct i915_gem_object_get_pages_context *ctx,
> > + unsigned int *sizes)
> > {
> > #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
> > #define PFN_BIAS 0x1000
> > + struct drm_i915_gem_object *obj = ctx->object;
> > struct sg_table *pages;
> > struct scatterlist *sg;
> > unsigned int sg_page_sizes;
> > @@ -65,12 +68,12 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
> >
> > pages = kmalloc(sizeof(*pages), GFP);
> > if (!pages)
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> >
> > rem = round_up(obj->base.size, BIT(31)) >> 31;
> > if (sg_alloc_table(pages, rem, GFP)) {
> > kfree(pages);
> > - return -ENOMEM;
> > + return ERR_PTR(-ENOMEM);
> > }
> >
> > sg_page_sizes = 0;
> > @@ -90,9 +93,8 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
> >
> > obj->mm.madv = I915_MADV_DONTNEED;
> >
> > - __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
> > -
> > - return 0;
> > + *sizes = sg_page_sizes;
> > + return pages;
> > #undef GFP
> > }
> >
> > --
> > 2.20.1
> >
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx at lists.freedesktop.org
> > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch
--
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch
More information about the dri-devel
mailing list