[PATCH v2 03/17] misc/habana: Stop using frame_vector helpers

Oded Gabbay oded.gabbay at gmail.com
Sat Oct 10 21:47:39 UTC 2020


On Sun, Oct 11, 2020 at 12:41 AM Daniel Vetter <daniel.vetter at ffwll.ch> wrote:
>
> On Sat, Oct 10, 2020 at 11:32 PM Daniel Vetter <daniel.vetter at ffwll.ch> wrote:
> >
> > On Sat, Oct 10, 2020 at 10:27 PM Oded Gabbay <oded.gabbay at gmail.com> wrote:
> > >
> > > On Fri, Oct 9, 2020 at 10:59 AM Daniel Vetter <daniel.vetter at ffwll.ch> wrote:
> > > >
> > > > All we need are a pages array, pin_user_pages_fast can give us that
> > > > directly. Plus this avoids the entire raw pfn side of get_vaddr_frames.
> > > >
> > > Thanks for the patch Daniel.
> > >
> > > > Signed-off-by: Daniel Vetter <daniel.vetter at intel.com>
> > > > Cc: Jason Gunthorpe <jgg at ziepe.ca>
> > > > Cc: Andrew Morton <akpm at linux-foundation.org>
> > > > Cc: John Hubbard <jhubbard at nvidia.com>
> > > > Cc: Jérôme Glisse <jglisse at redhat.com>
> > > > Cc: Jan Kara <jack at suse.cz>
> > > > Cc: Dan Williams <dan.j.williams at intel.com>
> > > > Cc: linux-mm at kvack.org
> > > > Cc: linux-arm-kernel at lists.infradead.org
> > > > Cc: linux-samsung-soc at vger.kernel.org
> > > > Cc: linux-media at vger.kernel.org
> > > > Cc: Oded Gabbay <oded.gabbay at gmail.com>
> > > > Cc: Omer Shpigelman <oshpigelman at habana.ai>
> > > > Cc: Ofir Bitton <obitton at habana.ai>
> > > > Cc: Tomer Tayar <ttayar at habana.ai>
> > > > Cc: Moti Haimovski <mhaimovski at habana.ai>
> > > > Cc: Daniel Vetter <daniel.vetter at ffwll.ch>
> > > > Cc: Greg Kroah-Hartman <gregkh at linuxfoundation.org>
> > > > Cc: Pawel Piskorski <ppiskorski at habana.ai>
> > > > --
> > > > v2: Use unpin_user_pages_dirty_lock (John)
> > > > ---
> > > >  drivers/misc/habanalabs/Kconfig             |  1 -
> > > >  drivers/misc/habanalabs/common/habanalabs.h |  3 +-
> > > >  drivers/misc/habanalabs/common/memory.c     | 49 ++++++++-------------
> > > >  3 files changed, 20 insertions(+), 33 deletions(-)
> > > >
> > > > diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
> > > > index 8eb5d38c618e..2f04187f7167 100644
> > > > --- a/drivers/misc/habanalabs/Kconfig
> > > > +++ b/drivers/misc/habanalabs/Kconfig
> > > > @@ -6,7 +6,6 @@
> > > >  config HABANA_AI
> > > >         tristate "HabanaAI accelerators (habanalabs)"
> > > >         depends on PCI && HAS_IOMEM
> > > > -       select FRAME_VECTOR
> > > >         select DMA_SHARED_BUFFER
> > > >         select GENERIC_ALLOCATOR
> > > >         select HWMON
> > > > diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
> > > > index edbd627b29d2..c1b3ad613b15 100644
> > > > --- a/drivers/misc/habanalabs/common/habanalabs.h
> > > > +++ b/drivers/misc/habanalabs/common/habanalabs.h
> > > > @@ -881,7 +881,8 @@ struct hl_ctx_mgr {
> > > >  struct hl_userptr {
> > > >         enum vm_type_t          vm_type; /* must be first */
> > > >         struct list_head        job_node;
> > > > -       struct frame_vector     *vec;
> > > > +       struct page             **pages;
> > > > +       unsigned int            npages;
> > > Can you please update the kerneldoc comment section of this structure
> > > according to your changes ?
> >
> > Apologies I missed the nice kerneldoc. I'll fix that in the next round.
> >
> >
> > > >         struct sg_table         *sgt;
> > > >         enum dma_data_direction dir;
> > > >         struct list_head        debugfs_list;
> > > > diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
> > > > index 5ff4688683fd..327b64479f97 100644
> > > > --- a/drivers/misc/habanalabs/common/memory.c
> > > > +++ b/drivers/misc/habanalabs/common/memory.c
> > > > @@ -1281,45 +1281,41 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
> > > >                 return -EFAULT;
> > > >         }
> > > >
> > > > -       userptr->vec = frame_vector_create(npages);
> > > > -       if (!userptr->vec) {
> > > > +       userptr->pages = kvmalloc_array(npages, sizeof(*userptr->pages),
> > > > +                                       GFP_KERNEL);
> > > > +       if (!userptr->pages) {
> > > >                 dev_err(hdev->dev, "Failed to create frame vector\n");
> > > >                 return -ENOMEM;
> > > >         }
> > > >
> > > > -       rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
> > > > -                               userptr->vec);
> > > > +       rc = pin_user_pages_fast(start, npages, FOLL_FORCE | FOLL_WRITE,
> > > > +                                userptr->pages);
> > > >
> > > >         if (rc != npages) {
> > > >                 dev_err(hdev->dev,
> > > >                         "Failed to map host memory, user ptr probably wrong\n");
> > > >                 if (rc < 0)
> > > > -                       goto destroy_framevec;
> > > > +                       goto destroy_pages;
> > > > +               npages = rc;
> > > >                 rc = -EFAULT;
> > > > -               goto put_framevec;
> > > > -       }
> > > > -
> > > > -       if (frame_vector_to_pages(userptr->vec) < 0) {
> > > > -               dev_err(hdev->dev,
> > > > -                       "Failed to translate frame vector to pages\n");
> > > > -               rc = -EFAULT;
> > > > -               goto put_framevec;
> > > > +               goto put_pages;
> > > >         }
> > > > +       userptr->npages = npages;
> > > >
> > > >         rc = sg_alloc_table_from_pages(userptr->sgt,
> > > > -                                       frame_vector_pages(userptr->vec),
> > > > -                                       npages, offset, size, GFP_ATOMIC);
> > > > +                                      userptr->pages,
> > > > +                                      npages, offset, size, GFP_ATOMIC);
> > > I think that because the call to kvmalloc_array() is done with
> > > GFP_KERNEL, there is no point in using GFP_ATOMIC here.
> > > And actually, this path only needs to avoid yielding when using a
> > > special debug mode.
> > > So I suggest putting here GFP_KERNEL.
> >
> > Huh, I didn't even notice the GFP_ATOMIC here. This looks indeed
> > strange and GFP_KERNEL should be perfectly fine in a function that
> > also calls pin_user_pages (since that one can allocate and do worse
> > stuff like userspace pagefaults).
> >
> > But since that GFP_ATOMIC is there already I'll do that in a separate patch.
>
> Ok I read up on your usage of GFP_ATOMIC in habanalabs, and I'm not
> going to touch this. But I'm pretty sure it's broken.
>
> You seem to have some requirement of not allocating memory with
> blocking (see hl_cb_alloc()), and that seems to be way you allocate
> tons of structures with GFP_ATOMIC. There's 2 pretty tough problems
> with that:
> - GFP_ATOMIC can fail, even when the system hasn't run out of memory
> yet. You _must_ have a fallback back to handle allocation failures for
> these. Quick survey shows you a ton of GFP_ATOMIC callsites, and very
> little fallback code - I've found none, but I didn't check the failure
> handlers all going up the possible callchains.
> - pin_user_pages can allocate memory, so you're breaking your own "no
> sleeping in these paths" rules.
>
> This isn't going to get fixed with a quick oneliner patch, depending
> what's needed you're looking at a driver rearchitecture here :-/ Hence
> I'm not going to touch this in the next patch, but leave it all as-is.
>
Most of those requirements come from code that is only relevant in
initial bringup and in our first ASIC (GOYA) for the first few months
we had it.
I'm going to remove all that code from the upstream driver as it's not
needed there.

Then, I'll go and look at all other uses of GFP_ATOMIC to see how they
can be improved/removed, maybe with pre-allocated stuff.
Thanks for pointing this out.
Oded

> Cheers, Daniel
>
> >
> > > In the meanwhile, I'll run this patch (coupled with the next patch) in
> > > our C/I to make sure there are no regressions.
> >
> > Excellent. I'll wait with v3 until that's done, just in case you hit a
> > snag I need to fix.
> >
> > Cheers, Daniel
> >
> > > Thanks,
> > > Oded
> > >
> > > >         if (rc < 0) {
> > > >                 dev_err(hdev->dev, "failed to create SG table from pages\n");
> > > > -               goto put_framevec;
> > > > +               goto put_pages;
> > > >         }
> > > >
> > > >         return 0;
> > > >
> > > > -put_framevec:
> > > > -       put_vaddr_frames(userptr->vec);
> > > > -destroy_framevec:
> > > > -       frame_vector_destroy(userptr->vec);
> > > > +put_pages:
> > > > +       unpin_user_pages(userptr->pages, npages);
> > > > +destroy_pages:
> > > > +       kvfree(userptr->pages);
> > > >         return rc;
> > > >  }
> > > >
> > > > @@ -1405,8 +1401,6 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
> > > >   */
> > > >  void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
> > > >  {
> > > > -       struct page **pages;
> > > > -
> > > >         hl_debugfs_remove_userptr(hdev, userptr);
> > > >
> > > >         if (userptr->dma_mapped)
> > > > @@ -1414,15 +1408,8 @@ void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
> > > >                                                         userptr->sgt->nents,
> > > >                                                         userptr->dir);
> > > >
> > > > -       pages = frame_vector_pages(userptr->vec);
> > > > -       if (!IS_ERR(pages)) {
> > > > -               int i;
> > > > -
> > > > -               for (i = 0; i < frame_vector_count(userptr->vec); i++)
> > > > -                       set_page_dirty_lock(pages[i]);
> > > > -       }
> > > > -       put_vaddr_frames(userptr->vec);
> > > > -       frame_vector_destroy(userptr->vec);
> > > > +       unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
> > > > +       kvfree(userptr->pages);
> > > >
> > > >         list_del(&userptr->job_node);
> > > >
> > > > --
> > > > 2.28.0
> > > >
> >
> >
> >
> > --
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > http://blog.ffwll.ch
>
>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch


More information about the dri-devel mailing list