[PATCH v2 13/15] drm/amdgpu: Use mmu_range_insert instead of hmm_mirror
Jason Gunthorpe
jgg at mellanox.com
Tue Oct 29 23:09:46 UTC 2019
On Tue, Oct 29, 2019 at 10:14:29PM +0000, Kuehling, Felix wrote:
> > +static const struct mmu_range_notifier_ops amdgpu_mn_hsa_ops = {
> > + .invalidate = amdgpu_mn_invalidate_hsa,
> > +};
> > +
> > +static int amdgpu_mn_sync_pagetables(struct hmm_mirror *mirror,
> > + const struct mmu_notifier_range *update)
> > {
> > struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror);
> > - unsigned long start = update->start;
> > - unsigned long end = update->end;
> > - bool blockable = mmu_notifier_range_blockable(update);
> > - struct interval_tree_node *it;
> >
> > - /* notification is exclusive, but interval is inclusive */
> > - end -= 1;
> > -
> > - if (amdgpu_mn_read_lock(amn, blockable))
> > - return -EAGAIN;
> > -
> > - it = interval_tree_iter_first(&amn->objects, start, end);
> > - while (it) {
> > - struct amdgpu_mn_node *node;
> > - struct amdgpu_bo *bo;
> > -
> > - if (!blockable) {
> > - amdgpu_mn_read_unlock(amn);
> > - return -EAGAIN;
> > - }
> > -
> > - node = container_of(it, struct amdgpu_mn_node, it);
> > - it = interval_tree_iter_next(it, start, end);
> > -
> > - list_for_each_entry(bo, &node->bos, mn_list) {
> > - struct kgd_mem *mem = bo->kfd_bo;
> > -
> > - if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
> > - start, end))
> > - amdgpu_amdkfd_evict_userptr(mem, amn->mm);
> > - }
> > - }
> > -
> > - amdgpu_mn_read_unlock(amn);
> > + if (!mmu_notifier_range_blockable(update))
> > + return false;
>
> This should return -EAGAIN. Not sure it matters much, because this whole
> function disappears in the next commit in the series. It seems to be
> only vestigial at this point.
Right, the only reason it is still here is that I couldn't really tell
if this:
> > + down_read(&amn->lock);
> > + up_read(&amn->lock);
> > return 0;
> > }
Was serving as the 'driver lock' in the hmm scheme... If not then the
whole thing should just be deleted at this point.
I fixed the EAGAIN though
Jason
More information about the amd-gfx
mailing list