[PATCH v4 hmm 12/12] mm/hmm: Fix error flows in hmm_invalidate_range_start
Jason Gunthorpe
jgg at mellanox.com
Thu Jun 27 16:06:41 UTC 2019
On Wed, Jun 26, 2019 at 11:18:23AM -0700, Ralph Campbell wrote:
> > diff --git a/mm/hmm.c b/mm/hmm.c
> > index b224ea635a7716..89549eac03d506 100644
> > +++ b/mm/hmm.c
> > @@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
> > init_rwsem(&hmm->mirrors_sem);
> > hmm->mmu_notifier.ops = NULL;
> > INIT_LIST_HEAD(&hmm->ranges);
> > - mutex_init(&hmm->lock);
> > + spin_lock_init(&hmm->ranges_lock);
> > kref_init(&hmm->kref);
> > hmm->notifiers = 0;
> > hmm->mm = mm;
> > @@ -144,6 +144,23 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
> > hmm_put(hmm);
> > }
> > +static void notifiers_decrement(struct hmm *hmm)
> > +{
> > + lockdep_assert_held(&hmm->ranges_lock);
> > +
>
> Why not acquire the lock here and release at the end instead
> of asserting the lock is held?
> It looks like everywhere notifiers_decrement() is called does
> that.
Yes, this is just some left over mistake, thanks
>From aa371c720a9e3c632dcd9a6a2c73e325b9b2b98c Mon Sep 17 00:00:00 2001
From: Jason Gunthorpe <jgg at mellanox.com>
Date: Fri, 7 Jun 2019 12:10:33 -0300
Subject: [PATCH] mm/hmm: Fix error flows in hmm_invalidate_range_start
If the trylock on the hmm->mirrors_sem fails the function will return
without decrementing the notifiers that were previously incremented. Since
the caller will not call invalidate_range_end() on EAGAIN this will result
in notifiers becoming permanently incremented and deadlock.
If the sync_cpu_device_pagetables() required blocking the function will
not return EAGAIN even though the device continues to touch the
pages. This is a violation of the mmu notifier contract.
Switch, and rename, the ranges_lock to a spin lock so we can reliably
obtain it without blocking during error unwind.
The error unwind is necessary since the notifiers count must be held
incremented across the call to sync_cpu_device_pagetables() as we cannot
allow the range to become marked valid by a parallel
invalidate_start/end() pair while doing sync_cpu_device_pagetables().
Signed-off-by: Jason Gunthorpe <jgg at mellanox.com>
Reviewed-by: Ralph Campbell <rcampbell at nvidia.com>
Reviewed-by: Christoph Hellwig <hch at lst.de>
Tested-by: Philip Yang <Philip.Yang at amd.com>
---
v4
- Move lock into notifiers_decrement() (Ralph)
---
include/linux/hmm.h | 2 +-
mm/hmm.c | 69 ++++++++++++++++++++++++++-------------------
2 files changed, 41 insertions(+), 30 deletions(-)
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index bf013e96525771..0fa8ea34ccef6d 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -86,7 +86,7 @@
struct hmm {
struct mm_struct *mm;
struct kref kref;
- struct mutex lock;
+ spinlock_t ranges_lock;
struct list_head ranges;
struct list_head mirrors;
struct mmu_notifier mmu_notifier;
diff --git a/mm/hmm.c b/mm/hmm.c
index b224ea635a7716..de35289df20d43 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -64,7 +64,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
init_rwsem(&hmm->mirrors_sem);
hmm->mmu_notifier.ops = NULL;
INIT_LIST_HEAD(&hmm->ranges);
- mutex_init(&hmm->lock);
+ spin_lock_init(&hmm->ranges_lock);
kref_init(&hmm->kref);
hmm->notifiers = 0;
hmm->mm = mm;
@@ -144,6 +144,25 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
hmm_put(hmm);
}
+static void notifiers_decrement(struct hmm *hmm)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hmm->ranges_lock, flags);
+ hmm->notifiers--;
+ if (!hmm->notifiers) {
+ struct hmm_range *range;
+
+ list_for_each_entry(range, &hmm->ranges, list) {
+ if (range->valid)
+ continue;
+ range->valid = true;
+ }
+ wake_up_all(&hmm->wq);
+ }
+ spin_unlock_irqrestore(&hmm->ranges_lock, flags);
+}
+
static int hmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *nrange)
{
@@ -151,6 +170,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
struct hmm_mirror *mirror;
struct hmm_update update;
struct hmm_range *range;
+ unsigned long flags;
int ret = 0;
if (!kref_get_unless_zero(&hmm->kref))
@@ -161,12 +181,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
update.event = HMM_UPDATE_INVALIDATE;
update.blockable = mmu_notifier_range_blockable(nrange);
- if (mmu_notifier_range_blockable(nrange))
- mutex_lock(&hmm->lock);
- else if (!mutex_trylock(&hmm->lock)) {
- ret = -EAGAIN;
- goto out;
- }
+ spin_lock_irqsave(&hmm->ranges_lock, flags);
hmm->notifiers++;
list_for_each_entry(range, &hmm->ranges, list) {
if (update.end < range->start || update.start >= range->end)
@@ -174,7 +189,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
range->valid = false;
}
- mutex_unlock(&hmm->lock);
+ spin_unlock_irqrestore(&hmm->ranges_lock, flags);
if (mmu_notifier_range_blockable(nrange))
down_read(&hmm->mirrors_sem);
@@ -182,16 +197,23 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
ret = -EAGAIN;
goto out;
}
+
list_for_each_entry(mirror, &hmm->mirrors, list) {
- int ret;
+ int rc;
- ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
- if (!update.blockable && ret == -EAGAIN)
+ rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
+ if (rc) {
+ if (WARN_ON(update.blockable || rc != -EAGAIN))
+ continue;
+ ret = -EAGAIN;
break;
+ }
}
up_read(&hmm->mirrors_sem);
out:
+ if (ret)
+ notifiers_decrement(hmm);
hmm_put(hmm);
return ret;
}
@@ -204,20 +226,7 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
if (!kref_get_unless_zero(&hmm->kref))
return;
- mutex_lock(&hmm->lock);
- hmm->notifiers--;
- if (!hmm->notifiers) {
- struct hmm_range *range;
-
- list_for_each_entry(range, &hmm->ranges, list) {
- if (range->valid)
- continue;
- range->valid = true;
- }
- wake_up_all(&hmm->wq);
- }
- mutex_unlock(&hmm->lock);
-
+ notifiers_decrement(hmm);
hmm_put(hmm);
}
@@ -868,6 +877,7 @@ int hmm_range_register(struct hmm_range *range,
{
unsigned long mask = ((1UL << page_shift) - 1UL);
struct hmm *hmm = mirror->hmm;
+ unsigned long flags;
range->valid = false;
range->hmm = NULL;
@@ -886,7 +896,7 @@ int hmm_range_register(struct hmm_range *range,
return -EFAULT;
/* Initialize range to track CPU page table updates. */
- mutex_lock(&hmm->lock);
+ spin_lock_irqsave(&hmm->ranges_lock, flags);
range->hmm = hmm;
kref_get(&hmm->kref);
@@ -898,7 +908,7 @@ int hmm_range_register(struct hmm_range *range,
*/
if (!hmm->notifiers)
range->valid = true;
- mutex_unlock(&hmm->lock);
+ spin_unlock_irqrestore(&hmm->ranges_lock, flags);
return 0;
}
@@ -914,10 +924,11 @@ EXPORT_SYMBOL(hmm_range_register);
void hmm_range_unregister(struct hmm_range *range)
{
struct hmm *hmm = range->hmm;
+ unsigned long flags;
- mutex_lock(&hmm->lock);
+ spin_lock_irqsave(&hmm->ranges_lock, flags);
list_del_init(&range->list);
- mutex_unlock(&hmm->lock);
+ spin_unlock_irqrestore(&hmm->ranges_lock, flags);
/* Drop reference taken by hmm_range_register() */
mmput(hmm->mm);
--
2.22.0
More information about the amd-gfx
mailing list