[Intel-gfx] [PATCH 2/2] drm/i915: Serialise userptr gup with mmu-notifier
Chris Wilson
chris at chris-wilson.co.uk
Thu Jul 10 11:21:44 CEST 2014
Jerome Glisse pointed out that get_user_pages() does not synchronize
with concurrent invalidations of the VMA. As such if the backing vma is
changed whilst the pages for the object are being grabbed for use by the
GPU, we may end up with a random mixture of page references being held.
Worse still as the mmu-notifier will believe that the VMA invalidation
was complete and the old page references dropped.
In order to serialise gup with mmu-notifier, we use a seqlock to detect
when an invalidation has occurred in parallel to our gup and if so cancel
the gup. The detection is a little coarse, but hopefully we never see
contention here!
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_gem_userptr.c | 108 +++++++++++++++++++++++++++++++-
1 file changed, 105 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 7c38f50..8ed670f 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -46,6 +46,10 @@ struct i915_mmu_notifier {
struct work_struct work;
unsigned long count;
unsigned long serial;
+ unsigned long invalidate;
+ unsigned long invalidate_serial;
+ unsigned long invalidate_start;
+ unsigned long invalidate_end;
bool is_linear;
};
@@ -131,6 +135,19 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
unsigned long next = start;
unsigned long serial = 0;
+ spin_lock(&mn->lock);
+ if (mn->invalidate++ == 0) {
+ mn->invalidate_start = start;
+ mn->invalidate_end = end;
+ } else {
+ if (mn->invalidate_start > start)
+ mn->invalidate_start = start;
+ if (mn->invalidate_end < end)
+ mn->invalidate_end = end;
+ }
+ mn->invalidate_serial++;
+ spin_unlock(&mn->lock);
+
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
while (next < end) {
struct drm_i915_gem_object *obj;
@@ -156,8 +173,24 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
}
+static void i915_gem_userptr_mn_invalidate_range_end(struct mmu_notifier *_mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
+
+ spin_lock(&mn->lock);
+ if (--mn->invalidate == 0) {
+ mn->invalidate_start = ~0UL;
+ mn->invalidate_end = 0UL;
+ }
+ spin_unlock(&mn->lock);
+}
+
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
.invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+ .invalidate_range_end = i915_gem_userptr_mn_invalidate_range_end,
};
static struct i915_mmu_notifier *
@@ -201,6 +234,10 @@ i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm)
INIT_LIST_HEAD(&mmu->linear);
mmu->is_linear = false;
+ mmu->invalidate = 0;
+ mmu->invalidate_start = ~0UL;
+ mmu->invalidate_end = 0UL;
+
/* Protected by mmap_sem (write-lock) */
ret = __mmu_notifier_register(&mmu->mn, mm);
if (ret) {
@@ -394,6 +431,44 @@ destroy_mmu:
return ret;
}
+static bool
+i915_mmu_object_get(struct i915_mmu_object *mn, unsigned *serial)
+{
+ struct i915_mmu_notifier *mmu;
+ bool valid;
+
+ if (mn == NULL)
+ return true;
+
+ mmu = mn->mmu;
+
+ spin_lock(&mmu->lock);
+ valid = (mn->it.last <= mmu->invalidate_start ||
+ mn->it.start >= mmu->invalidate_end);
+ *serial = mmu->invalidate_serial;
+ spin_unlock(&mmu->lock);
+
+ return valid;
+}
+
+static bool
+i915_mmu_object_put(struct i915_mmu_object *mn, unsigned serial)
+{
+ struct i915_mmu_notifier *mmu;
+ bool valid;
+
+ if (mn == NULL)
+ return true;
+
+ mmu = mn->mmu;
+
+ spin_lock(&mmu->lock);
+ valid = serial == mmu->invalidate_serial;
+ spin_unlock(&mmu->lock);
+
+ return valid;
+}
+
#else
static void
@@ -413,6 +488,20 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return 0;
}
+
+static bool
+i915_mmu_object_get(struct i915_mmu_object *mmu, unsigned *serial)
+{
+ *serial = 0;
+ return true;
+}
+
+static bool
+i915_mmu_object_put(struct i915_mmu_object *mmu, unsigned serial)
+{
+ return true;
+}
+
#endif
struct get_pages_work {
@@ -469,8 +558,14 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
struct drm_device *dev = obj->base.dev;
const int num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ unsigned serial;
int pinned, ret;
+ if (!i915_mmu_object_get(obj->userptr.mn, &serial)) {
+ schedule_work(&work->work);
+ return;
+ }
+
ret = -ENOMEM;
pinned = 0;
@@ -497,7 +592,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_lock(&dev->struct_mutex);
- if (obj->userptr.work != &work->work) {
+ if (obj->userptr.work != &work->work ||
+ !i915_mmu_object_put(obj->userptr.mn, serial)) {
ret = 0;
} else if (pinned == num_pages) {
ret = st_set_pages(&obj->pages, pvec, num_pages);
@@ -522,8 +618,9 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
static int
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const int num_pages = obj->base.size >> PAGE_SHIFT;
+ unsigned num_pages = obj->base.size >> PAGE_SHIFT;
struct page **pvec;
+ unsigned serial;
int pinned, ret;
/* If userspace should engineer that these pages are replaced in
@@ -545,7 +642,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pvec = NULL;
pinned = 0;
- if (obj->userptr.mm == current->mm) {
+ if (obj->userptr.mm == current->mm &&
+ i915_mmu_object_get(obj->userptr.mn, &serial)) {
pvec = kmalloc(num_pages*sizeof(struct page *),
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (pvec == NULL) {
@@ -556,6 +654,10 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
!obj->userptr.read_only, pvec);
+
+ /* And check that the mmu remained valid */
+ if (!i915_mmu_object_put(obj->userptr.mn, serial))
+ num_pages = ~0;
}
if (pinned < num_pages) {
if (pinned < 0) {
--
1.9.1
More information about the Intel-gfx
mailing list