[PATCH hmm 05/15] RDMA/odp: Use mmu_range_notifier_insert()
Jason Gunthorpe
jgg at ziepe.ca
Mon Nov 4 20:25:00 UTC 2019
On Tue, Oct 15, 2019 at 03:12:32PM -0300, Jason Gunthorpe wrote:
> @@ -250,26 +85,15 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
> ret = -ENOMEM;
> goto out_page_list;
> }
> - }
>
> - mn = mmu_notifier_get(&ib_umem_notifiers, umem_odp->umem.owning_mm);
> - if (IS_ERR(mn)) {
> - ret = PTR_ERR(mn);
> - goto out_dma_list;
> - }
> - umem_odp->per_mm = per_mm =
> - container_of(mn, struct ib_ucontext_per_mm, mn);
> -
> - mutex_init(&umem_odp->umem_mutex);
> - init_completion(&umem_odp->notifier_completion);
> + ret = mmu_range_notifier_insert(&umem_odp->notifier, start,
> + end - start, current->mm);
> + if (ret)
> + goto out_dma_list;
It turns out 'current' can't be used here as this can be called from the
page fault work queue and should be 'umem_odp->umem.owning_mm'
The same problem applies to the tgid a few lines below
It also seems there is a pre-existing problem here as this code
doesn't guarentee to have a mmget() on the mm for the non-current case
when it called mmu_notifier_get() or now
mmu_range_notifier_insert().
I'll fix this in a dedicated patch.
This incremental sorts it out, I'll squash it into this patch:
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6132b8127e8435..0768bb60ce1662 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -87,12 +87,10 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp)
}
ret = mmu_range_notifier_insert(&umem_odp->notifier, start,
- end - start, current->mm);
+ end - start,
+ umem_odp->umem.owning_mm);
if (ret)
goto out_dma_list;
-
- umem_odp->tgid =
- get_task_pid(current->group_leader, PIDTYPE_PID);
}
return 0;
@@ -140,8 +138,10 @@ ib_umem_odp_alloc_implicit(struct ib_udata *udata, int access)
umem_odp->is_implicit_odp = 1;
umem_odp->page_shift = PAGE_SHIFT;
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
ret = ib_init_umem_odp(umem_odp);
if (ret) {
+ put_pid(umem_odp->tgid);
kfree(umem_odp);
return ERR_PTR(ret);
}
@@ -185,8 +185,10 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
odp_data->page_shift = PAGE_SHIFT;
odp_data->notifier.ops = ops;
+ odp_data->tgid = get_pid(root->tgid);
ret = ib_init_umem_odp(odp_data);
if (ret) {
+ put_pid(odp_data->tgid);
kfree(odp_data);
return ERR_PTR(ret);
}
@@ -254,11 +256,14 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
up_read(&mm->mmap_sem);
}
+ umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
ret = ib_init_umem_odp(umem_odp);
if (ret)
- goto err_free;
+ goto err_put_pid;
return umem_odp;
+err_put_pid:
+ put_pid(umem_odp->tgid);
err_free:
kfree(umem_odp);
return ERR_PTR(ret);
More information about the amd-gfx
mailing list