[PATCH v2 12/11] mm/hmm: Fix error flows in hmm_invalidate_range_start
Ralph Campbell
rcampbell at nvidia.com
Fri Jun 7 23:52:58 UTC 2019
On 6/7/19 9:05 AM, Jason Gunthorpe wrote:
> If the trylock on the hmm->mirrors_sem fails the function will return
> without decrementing the notifiers that were previously incremented. Since
> the caller will not call invalidate_range_end() on EAGAIN this will result
> in notifiers becoming permanently incremented and deadlock.
>
> If the sync_cpu_device_pagetables() required blocking the function will
> not return EAGAIN even though the device continues to touch the
> pages. This is a violation of the mmu notifier contract.
>
> Switch, and rename, the ranges_lock to a spin lock so we can reliably
> obtain it without blocking during error unwind.
>
> The error unwind is necessary since the notifiers count must be held
> incremented across the call to sync_cpu_device_pagetables() as we cannot
> allow the range to become marked valid by a parallel
> invalidate_start/end() pair while doing sync_cpu_device_pagetables().
>
> Signed-off-by: Jason Gunthorpe <jgg at mellanox.com>
Reviewed-by: Ralph Campbell <rcampbell at nvidia.com>
> ---
> include/linux/hmm.h | 2 +-
> mm/hmm.c | 77 +++++++++++++++++++++++++++------------------
> 2 files changed, 48 insertions(+), 31 deletions(-)
>
> I almost lost this patch - it is part of the series, hasn't been
> posted before, and wasn't sent with the rest, sorry.
>
> diff --git a/include/linux/hmm.h b/include/linux/hmm.h
> index bf013e96525771..0fa8ea34ccef6d 100644
> --- a/include/linux/hmm.h
> +++ b/include/linux/hmm.h
> @@ -86,7 +86,7 @@
> struct hmm {
> struct mm_struct *mm;
> struct kref kref;
> - struct mutex lock;
> + spinlock_t ranges_lock;
> struct list_head ranges;
> struct list_head mirrors;
> struct mmu_notifier mmu_notifier;
> diff --git a/mm/hmm.c b/mm/hmm.c
> index 4215edf737ef5b..10103a24e9b7b3 100644
> --- a/mm/hmm.c
> +++ b/mm/hmm.c
> @@ -68,7 +68,7 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
> init_rwsem(&hmm->mirrors_sem);
> hmm->mmu_notifier.ops = NULL;
> INIT_LIST_HEAD(&hmm->ranges);
> - mutex_init(&hmm->lock);
> + spin_lock_init(&hmm->ranges_lock);
> kref_init(&hmm->kref);
> hmm->notifiers = 0;
> hmm->mm = mm;
> @@ -114,18 +114,19 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
> {
> struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
> struct hmm_mirror *mirror;
> + unsigned long flags;
>
> /* Bail out if hmm is in the process of being freed */
> if (!kref_get_unless_zero(&hmm->kref))
> return;
>
> - mutex_lock(&hmm->lock);
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
> /*
> * Since hmm_range_register() holds the mmget() lock hmm_release() is
> * prevented as long as a range exists.
> */
> WARN_ON(!list_empty(&hmm->ranges));
> - mutex_unlock(&hmm->lock);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
>
> down_read(&hmm->mirrors_sem);
> list_for_each_entry(mirror, &hmm->mirrors, list) {
> @@ -141,6 +142,23 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
> hmm_put(hmm);
> }
>
> +static void notifiers_decrement(struct hmm *hmm)
> +{
> + lockdep_assert_held(&hmm->ranges_lock);
> +
> + hmm->notifiers--;
> + if (!hmm->notifiers) {
> + struct hmm_range *range;
> +
> + list_for_each_entry(range, &hmm->ranges, list) {
> + if (range->valid)
> + continue;
> + range->valid = true;
> + }
This just effectively sets all ranges to valid.
I'm not sure that is best.
Shouldn't hmm_range_register() start with range.valid = true and
then hmm_invalidate_range_start() set affected ranges to false?
Then this becomes just wake_up_all() if --notifiers == 0 and
hmm_range_wait_until_valid() should wait for notifiers == 0.
Otherwise, range.valid doesn't really mean it's valid.
> + wake_up_all(&hmm->wq);
> + }
> +}
> +
> static int hmm_invalidate_range_start(struct mmu_notifier *mn,
> const struct mmu_notifier_range *nrange)
> {
> @@ -148,6 +166,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
> struct hmm_mirror *mirror;
> struct hmm_update update;
> struct hmm_range *range;
> + unsigned long flags;
> int ret = 0;
>
> if (!kref_get_unless_zero(&hmm->kref))
> @@ -158,12 +177,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
> update.event = HMM_UPDATE_INVALIDATE;
> update.blockable = mmu_notifier_range_blockable(nrange);
>
> - if (mmu_notifier_range_blockable(nrange))
> - mutex_lock(&hmm->lock);
> - else if (!mutex_trylock(&hmm->lock)) {
> - ret = -EAGAIN;
> - goto out;
> - }
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
> hmm->notifiers++;
> list_for_each_entry(range, &hmm->ranges, list) {
> if (update.end < range->start || update.start >= range->end)
> @@ -171,7 +185,7 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
>
> range->valid = false;
> }
> - mutex_unlock(&hmm->lock);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
>
> if (mmu_notifier_range_blockable(nrange))
> down_read(&hmm->mirrors_sem);
> @@ -179,16 +193,26 @@ static int hmm_invalidate_range_start(struct mmu_notifier *mn,
> ret = -EAGAIN;
> goto out;
> }
> +
> list_for_each_entry(mirror, &hmm->mirrors, list) {
> - int ret;
> + int rc;
>
> - ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
> - if (!update.blockable && ret == -EAGAIN)
> + rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
> + if (rc) {
> + if (WARN_ON(update.blockable || rc != -EAGAIN))
> + continue;
> + ret = -EAGAIN;
> break;
> + }
> }
> up_read(&hmm->mirrors_sem);
>
> out:
> + if (ret) {
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
> + notifiers_decrement(hmm);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
> + }
> hmm_put(hmm);
> return ret;
> }
> @@ -197,23 +221,14 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
> const struct mmu_notifier_range *nrange)
> {
> struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
> + unsigned long flags;
>
> if (!kref_get_unless_zero(&hmm->kref))
> return;
>
> - mutex_lock(&hmm->lock);
> - hmm->notifiers--;
> - if (!hmm->notifiers) {
> - struct hmm_range *range;
> -
> - list_for_each_entry(range, &hmm->ranges, list) {
> - if (range->valid)
> - continue;
> - range->valid = true;
> - }
> - wake_up_all(&hmm->wq);
> - }
> - mutex_unlock(&hmm->lock);
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
> + notifiers_decrement(hmm);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
>
> hmm_put(hmm);
> }
> @@ -866,6 +881,7 @@ int hmm_range_register(struct hmm_range *range,
> {
> unsigned long mask = ((1UL << page_shift) - 1UL);
> struct hmm *hmm = mirror->hmm;
> + unsigned long flags;
>
> range->valid = false;
> range->hmm = NULL;
> @@ -887,7 +903,7 @@ int hmm_range_register(struct hmm_range *range,
> kref_get(&hmm->kref);
>
> /* Initialize range to track CPU page table updates. */
> - mutex_lock(&hmm->lock);
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
>
> range->hmm = hmm;
> list_add(&range->list, &hmm->ranges);
> @@ -898,7 +914,7 @@ int hmm_range_register(struct hmm_range *range,
> */
> if (!hmm->notifiers)
> range->valid = true;
> - mutex_unlock(&hmm->lock);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
>
> return 0;
> }
> @@ -914,13 +930,14 @@ EXPORT_SYMBOL(hmm_range_register);
> void hmm_range_unregister(struct hmm_range *range)
> {
> struct hmm *hmm = range->hmm;
> + unsigned long flags;
>
> if (WARN_ON(range->end <= range->start))
> return;
>
> - mutex_lock(&hmm->lock);
> + spin_lock_irqsave(&hmm->ranges_lock, flags);
> list_del(&range->list);
> - mutex_unlock(&hmm->lock);
> + spin_unlock_irqrestore(&hmm->ranges_lock, flags);
>
> /* Drop reference taken by hmm_range_register() */
> range->valid = false;
>
More information about the amd-gfx
mailing list