[Intel-gfx] [PATCH 16/38] drm/i915: Pass around sg_table to get_pages/put_pages backend

Chris Wilson chris at chris-wilson.co.uk
Tue Sep 20 08:29:50 UTC 2016


The plan is to move obj->pages out from under the struct_mutex into its
own per-object lock. We need to prune any assumption of the struct_mutex
from the get_pages/put_pages backends, and to make it easier we pass
around the sg_table to operate on rather than indirectly via the obj.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h          |  36 +++++--
 drivers/gpu/drm/i915/i915_gem.c          | 174 +++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_dmabuf.c   |  20 ++--
 drivers/gpu/drm/i915/i915_gem_fence.c    |  16 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c      |  19 ++--
 drivers/gpu/drm/i915/i915_gem_gtt.h      |   6 +-
 drivers/gpu/drm/i915/i915_gem_internal.c |  22 ++--
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  11 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c   |  43 ++++----
 drivers/gpu/drm/i915/i915_gem_userptr.c  |  88 ++++++++--------
 10 files changed, 226 insertions(+), 209 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d51a4ec512fa..e267f25fca6e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2157,8 +2157,8 @@ struct drm_i915_gem_object_ops {
 	 * being released or under memory pressure (where we attempt to
 	 * reap pages for the shrinker).
 	 */
-	int (*get_pages)(struct drm_i915_gem_object *);
-	void (*put_pages)(struct drm_i915_gem_object *);
+	struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
+	void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
 
 	int (*dmabuf_export)(struct drm_i915_gem_object *);
 	void (*release)(struct drm_i915_gem_object *);
@@ -2300,8 +2300,6 @@ struct drm_i915_gem_object {
 		struct i915_gem_userptr {
 			uintptr_t ptr;
 			unsigned read_only :1;
-			unsigned workers :4;
-#define I915_GEM_USERPTR_MAX_WORKERS 15
 
 			struct i915_mm_struct *mm;
 			struct i915_mmu_object *mmu_object;
@@ -2361,6 +2359,19 @@ __deprecated
 extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
 
 static inline bool
+i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+{
+	return atomic_read(&obj->base.refcount.refcount) == 0;
+}
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+#define lockdep_assert_held_unless(lock, cond) \
+	GEM_BUG_ON(!lockdep_is_held(lock) && !(cond))
+#else
+#define lockdep_assert_held_unless(lock, cond)
+#endif
+
+static inline bool
 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
@@ -3172,6 +3183,8 @@ dma_addr_t
 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 				unsigned long n);
 
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages);
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
 static inline int __must_check
@@ -3187,7 +3200,8 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+				   i915_gem_object_is_dead(obj));
 	GEM_BUG_ON(!obj->mm.pages);
 	obj->mm.pages_pin_count++;
 }
@@ -3201,7 +3215,8 @@ i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	lockdep_assert_held_unless(&obj->base.dev->struct_mutex,
+				   i915_gem_object_is_dead(obj));
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 	GEM_BUG_ON(!obj->mm.pages);
 	obj->mm.pages_pin_count--;
@@ -3212,7 +3227,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 	__i915_gem_object_unpin_pages(obj);
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
 
 enum i915_map_type {
 	I915_MAP_WB = 0,
@@ -3435,8 +3451,10 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 void i915_gem_restore_fences(struct drm_device *dev);
 
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				       struct sg_table *pages);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+					 struct sg_table *pages);
 
 /* i915_gem_context.c */
 int __must_check i915_gem_context_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 83d68e069382..88ed07575f5c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -169,7 +169,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static int
+static struct sg_table *
 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
 	struct address_space *mapping = obj->base.filp->f_mapping;
@@ -179,7 +179,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	int i;
 
 	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
 		struct page *page;
@@ -187,7 +187,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
 		page = shmem_read_mapping_page(mapping, i);
 		if (IS_ERR(page))
-			return PTR_ERR(page);
+			return ERR_CAST(page);
 
 		src = kmap_atomic(page);
 		memcpy(vaddr, src, PAGE_SIZE);
@@ -202,11 +202,11 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
 		kfree(st);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	sg = st->sgl;
@@ -216,28 +216,30 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 	sg_dma_address(sg) = obj->phys_handle->busaddr;
 	sg_dma_len(sg) = obj->base.size;
 
-	obj->mm.pages = st;
-	return 0;
+	return st;
 }
 
 static void
-i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
+__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
 {
-	int ret;
-
-	BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (WARN_ON(ret)) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
 	if (obj->mm.madv == I915_MADV_DONTNEED)
 		obj->mm.dirty = false;
 
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		i915_gem_clflush_object(obj, false);
+
+	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+}
+
+static void
+i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
+{
+	__i915_gem_object_release_shmem(obj);
+
 	if (obj->mm.dirty) {
 		struct address_space *mapping = obj->base.filp->f_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
@@ -265,8 +267,8 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
 		obj->mm.dirty = false;
 	}
 
-	sg_free_table(obj->mm.pages);
-	kfree(obj->mm.pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void
@@ -517,9 +519,9 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	ret = __i915_gem_object_put_pages(obj);
-	if (ret)
-		return ret;
+	__i915_gem_object_put_pages(obj);
+	if (obj->mm.pages)
+		return -EBUSY;
 
 	/* create a new object */
 	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
@@ -535,7 +537,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 static int
 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_device *dev = obj->base.dev;
 	void *vaddr = obj->phys_handle->vaddr + args->offset;
@@ -551,7 +553,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
 				   I915_WAIT_LOCKED |
 				   I915_WAIT_ALL,
 				   MAX_SCHEDULE_TIMEOUT,
-				   to_rps_client(file_priv));
+				   to_rps_client(file));
 	if (ret)
 		return ret;
 
@@ -2225,8 +2227,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 }
 
 /* Try to discard unwanted pages */
-static void
-i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
+void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 {
 	struct address_space *mapping;
 
@@ -2245,32 +2246,20 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
+			      struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
-	int ret;
 
-	BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
-
-	ret = i915_gem_object_set_to_cpu_domain(obj, true);
-	if (WARN_ON(ret)) {
-		/* In the event of a disaster, abandon all caches and
-		 * hope for the best.
-		 */
-		i915_gem_clflush_object(obj, true);
-		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
-	}
+	__i915_gem_object_release_shmem(obj);
 
-	i915_gem_gtt_finish_object(obj);
+	i915_gem_gtt_finish_pages(obj, pages);
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_save_bit_17_swizzle(obj);
-
-	if (obj->mm.madv == I915_MADV_DONTNEED)
-		obj->mm.dirty = false;
+		i915_gem_object_save_bit_17_swizzle(obj, pages);
 
-	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		if (obj->mm.dirty)
 			set_page_dirty(page);
 
@@ -2281,8 +2270,8 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 	}
 	obj->mm.dirty = false;
 
-	sg_free_table(obj->mm.pages);
-	kfree(obj->mm.pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
@@ -2294,24 +2283,22 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
 }
 
-int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
+void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 {
-	const struct drm_i915_gem_object_ops *ops = obj->ops;
+	struct sg_table *pages;
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	if (!obj->mm.pages)
-		return 0;
-
 	if (i915_gem_object_has_pinned_pages(obj))
-		return -EBUSY;
+		return;
 
 	GEM_BUG_ON(obj->bind_count);
 
 	/* ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
 	 * lists early. */
-	list_del(&obj->global_list);
+	pages = fetch_and_zero(&obj->mm.pages);
+	GEM_BUG_ON(!pages);
 
 	if (obj->mm.mapping) {
 		void *ptr;
@@ -2327,15 +2314,10 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 
 	__i915_gem_object_reset_page_iter(obj);
 
-	ops->put_pages(obj);
-	obj->mm.pages = NULL;
-
-	i915_gem_object_invalidate(obj);
-
-	return 0;
+	obj->ops->put_pages(obj, pages);
 }
 
-static int
+static struct sg_table *
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2353,17 +2335,17 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * a GPU cache
 	 */
-	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (st == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	page_count = obj->base.size / PAGE_SIZE;
 	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
 		kfree(st);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/* Get the list of pages out of our struct file.  They'll be pinned
@@ -2423,20 +2405,19 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 	if (!swiotlb_nr_tbl())
 #endif
 		sg_mark_end(sg);
-	obj->mm.pages = st;
 
-	ret = i915_gem_gtt_prepare_object(obj);
+	ret = i915_gem_gtt_prepare_pages(obj, st);
 	if (ret)
 		goto err_pages;
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
-		i915_gem_object_do_bit_17_swizzle(obj);
+		i915_gem_object_do_bit_17_swizzle(obj, st);
 
 	if (i915_gem_object_is_tiled(obj) &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
 		__i915_gem_object_pin_pages(obj);
 
-	return 0;
+	return st;
 
 err_pages:
 	sg_mark_end(sg);
@@ -2456,7 +2437,35 @@ err_pages:
 	if (ret == -ENOSPC)
 		ret = -ENOMEM;
 
-	return ret;
+	return ERR_PTR(ret);
+}
+
+void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
+				 struct sg_table *pages)
+{
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	obj->mm.get_page.sg_pos = pages->sgl;
+	obj->mm.get_page.sg_idx = 0;
+
+	obj->mm.pages = pages;
+}
+
+static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+{
+	struct sg_table *pages;
+
+	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+		DRM_DEBUG("Attempting to obtain a purgeable object\n");
+		return -EFAULT;
+	}
+
+	pages = obj->ops->get_pages(obj);
+	if (unlikely(IS_ERR(pages)))
+		return PTR_ERR(pages);
+
+	__i915_gem_object_set_pages(obj, pages);
+	return 0;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -2468,33 +2477,18 @@ err_pages:
  */
 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	const struct drm_i915_gem_object_ops *ops = obj->ops;
-	int ret;
+	int err;
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
 	if (obj->mm.pages)
 		return 0;
 
-	if (obj->mm.madv != I915_MADV_WILLNEED) {
-		DRM_DEBUG("Attempting to obtain a purgeable object\n");
-		__i915_gem_object_unpin_pages(obj);
-		return -EFAULT;
-	}
-
-	ret = ops->get_pages(obj);
-	if (ret) {
+	err = ____i915_gem_object_get_pages(obj);
+	if (err)
 		__i915_gem_object_unpin_pages(obj);
-		return ret;
-	}
 
-	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
-
-	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-	obj->mm.get_page.sg_idx = 0;
-
-	return 0;
+	return err;
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dfb803168317..a4c90e915051 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -289,22 +289,18 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 	return dma_buf;
 }
 
-static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
 {
-	struct sg_table *sg;
-
-	sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
-	if (IS_ERR(sg))
-		return PTR_ERR(sg);
-
-	obj->mm.pages = sg;
-	return 0;
+	return dma_buf_map_attachment(obj->base.import_attach,
+				      DMA_BIDIRECTIONAL);
 }
 
-static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
+					     struct sg_table *pages)
 {
-	dma_buf_unmap_attachment(obj->base.import_attach,
-				 obj->mm.pages, DMA_BIDIRECTIONAL);
+	dma_buf_unmap_attachment(obj->base.import_attach, pages,
+				 DMA_BIDIRECTIONAL);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 398856160656..b79bcd6288e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -638,7 +638,8 @@ i915_gem_swizzle_page(struct page *page)
  * by swapping them out and back in again).
  */
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				  struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
@@ -648,10 +649,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 		return;
 
 	i = 0;
-	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj->bit_17) != 0)) {
+		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
 			i915_gem_swizzle_page(page);
 			set_page_dirty(page);
 		}
@@ -668,15 +668,15 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
  * be called before the backing storage can be unpinned.
  */
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
+				    struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
-	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (obj->bit_17 == NULL) {
-		obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
+		obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
 				      sizeof(long), GFP_KERNEL);
 		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
@@ -687,7 +687,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 
 	i = 0;
 
-	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		if (page_to_phys(page) & (1 << 17))
 			__set_bit(i, obj->bit_17);
 		else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 19108a311547..3ac6a1d995ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2292,14 +2292,15 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
 	i915_ggtt_flush(dev_priv);
 }
 
-int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
 {
-	if (!dma_map_sg(&obj->base.dev->pdev->dev,
-			obj->mm.pages->sgl, obj->mm.pages->nents,
-			PCI_DMA_BIDIRECTIONAL))
-		return -ENOSPC;
+	if (dma_map_sg(&obj->base.dev->pdev->dev,
+		       pages->sgl, pages->nents,
+		       PCI_DMA_BIDIRECTIONAL))
+		return 0;
 
-	return 0;
+	return -ENOSPC;
 }
 
 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
@@ -2671,7 +2672,8 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 					 true);
 }
 
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct device *kdev = &dev_priv->drm.pdev->dev;
@@ -2685,8 +2687,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
 		}
 	}
 
-	dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
-		     PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
 }
 
 static void i915_gtt_color_adjust(struct drm_mm_node *node,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a90999fe2e57..cfa41a887f2d 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -629,8 +629,10 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
 void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
 
-int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
-void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
+					    struct sg_table *pages);
+void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
+			       struct sg_table *pages);
 
 /* Flags used by pin/bind&friends. */
 #define PIN_NONBLOCK		BIT(0)
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 15ef610b3b41..38ae92c983f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -38,7 +38,8 @@ static void __i915_gem_object_free_pages(struct sg_table *st)
 	kfree(st);
 }
 
-static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
 	const unsigned int npages = obj->base.size / PAGE_SIZE;
 	struct sg_table *st;
@@ -49,11 +50,11 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 
 	st = kmalloc(sizeof(*st), GFP_KERNEL);
 	if (!st)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	if (sg_alloc_table(st, npages, GFP_KERNEL)) {
 		kfree(st);
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	sg = st->sgl;
@@ -95,25 +96,24 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 	if (!swiotlb_nr_tbl())
 #endif
 		sg_mark_end(sg);
-	obj->mm.pages = st;
 
-	if (i915_gem_gtt_prepare_object(obj)) {
-		obj->mm.pages = NULL;
+	if (i915_gem_gtt_prepare_pages(obj, st))
 		goto err;
-	}
 
 	obj->mm.madv = I915_MADV_DONTNEED;
-	return 0;
+	return st;
 
 err:
 	sg_mark_end(sg);
 	__i915_gem_object_free_pages(st);
-	return -ENOMEM;
+	return ERR_PTR(-ENOMEM);
 }
 
-static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
+					       struct sg_table *pages)
 {
-	__i915_gem_object_free_pages(obj->mm.pages);
+	i915_gem_gtt_finish_pages(obj, pages);
+	__i915_gem_object_free_pages(pages);
 
 	obj->mm.dirty = false;
 	obj->mm.madv = I915_MADV_WILLNEED;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index e0e6d68fe470..a8c3d37b95b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -91,6 +91,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
 }
 
+static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
+{
+	if (i915_gem_object_unbind(obj) == 0)
+		__i915_gem_object_put_pages(obj);
+	return !obj->mm.pages;
+}
+
 /**
  * i915_gem_shrink - Shrink buffer object caches
  * @dev_priv: i915 device
@@ -191,9 +198,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 
 			i915_gem_object_get(obj);
 
-			/* For the unbound phase, this should be a no-op! */
-			i915_gem_object_unbind(obj);
-			if (__i915_gem_object_put_pages(obj) == 0)
+			if (unsafe_drop_pages(obj))
 				count += obj->base.size >> PAGE_SHIFT;
 
 			i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 6d139d6b4609..4166eafa25dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -545,17 +545,20 @@ i915_pages_create_for_stolen(struct drm_device *dev,
 	return st;
 }
 
-static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
+static struct sg_table *
+i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
 {
-	BUG();
-	return -EINVAL;
+	return i915_pages_create_for_stolen(obj->base.dev,
+					    obj->stolen->start,
+					    obj->stolen->size);
 }
 
-static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
+static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
+					     struct sg_table *pages)
 {
 	/* Should only be called during free */
-	sg_free_table(obj->mm.pages);
-	kfree(obj->mm.pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void
@@ -590,21 +593,13 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
 	drm_gem_private_object_init(dev, &obj->base, stolen->size);
 	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
-	obj->mm.pages = i915_pages_create_for_stolen(dev,
-						     stolen->start,
-						     stolen->size);
-	if (!obj->mm.pages)
-		goto cleanup;
-
-	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-	obj->mm.get_page.sg_idx = 0;
-
-	__i915_gem_object_pin_pages(obj);
 	obj->stolen = stolen;
-
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
 	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
 
+	if (i915_gem_object_pin_pages(obj))
+		goto cleanup;
+
 	return obj;
 
 cleanup:
@@ -699,10 +694,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	if (gtt_offset == I915_GTT_OFFSET_NONE)
 		return obj;
 
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		goto err;
+
 	vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
 	if (IS_ERR(vma)) {
 		ret = PTR_ERR(vma);
-		goto err;
+		goto err_pages;
 	}
 
 	/* To simplify the initialisation sequence between KMS and GTT,
@@ -716,20 +715,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
 	if (ret) {
 		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-		goto err;
+		goto err_pages;
 	}
 
 	vma->pages = obj->mm.pages;
 	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	obj->bind_count++;
 
-	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	__i915_gem_object_pin_pages(obj);
-
 	return obj;
 
+err_pages:
+	i915_gem_object_unpin_pages(obj);
 err:
 	i915_gem_object_put(obj);
 	return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index cb6a186602d8..e97c7b589439 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -73,11 +73,14 @@ static void cancel_userptr(struct work_struct *work)
 	/* Cancel any active worker and force us to re-evaluate gup */
 	obj->userptr.work = NULL;
 
-	if (obj->mm.pages != NULL) {
-		/* We are inside a kthread context and can't be interrupted */
-		WARN_ON(i915_gem_object_unbind(obj));
-		WARN_ON(__i915_gem_object_put_pages(obj));
-	}
+	/* We are inside a kthread context and can't be interrupted */
+	if (i915_gem_object_unbind(obj) == 0)
+		__i915_gem_object_put_pages(obj);
+	WARN_ONCE(obj->mm.pages,
+		  "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_display=%d\n",
+		  obj->bind_count,
+		  obj->mm.pages_pin_count,
+		  obj->pin_display);
 
 	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
@@ -426,24 +429,25 @@ err:
 	return ret;
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
 			     struct page **pvec, int num_pages)
 {
+	struct sg_table *pages;
 	int ret;
 
-	ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
+	ret = st_set_pages(&pages, pvec, num_pages);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
-	ret = i915_gem_gtt_prepare_object(obj);
+	ret = i915_gem_gtt_prepare_pages(obj, pages);
 	if (ret) {
-		sg_free_table(obj->mm.pages);
-		kfree(obj->mm.pages);
-		obj->mm.pages = NULL;
+		sg_free_table(pages);
+		kfree(pages);
+		return ERR_PTR(ret);
 	}
 
-	return ret;
+	return pages;
 }
 
 static int
@@ -521,20 +525,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 
 	mutex_lock(&dev->struct_mutex);
 	if (obj->userptr.work == &work->work) {
+		struct sg_table *pages = ERR_PTR(ret);
+
 		if (pinned == npages) {
-			ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
-			if (ret == 0) {
-				list_add_tail(&obj->global_list,
-					      &to_i915(dev)->mm.unbound_list);
-				obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
-				obj->mm.get_page.sg_idx = 0;
+			pages = __i915_gem_userptr_set_pages(obj, pvec, npages);
+			if (!IS_ERR(pages)) {
+				__i915_gem_object_set_pages(obj, pages);
 				pinned = 0;
+				pages = NULL;
 			}
 		}
-		obj->userptr.work = ERR_PTR(ret);
+
+		obj->userptr.work = ERR_CAST(pages);
 	}
 
-	obj->userptr.workers--;
 	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -545,7 +549,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 	kfree(work);
 }
 
-static int
+static struct sg_table *
 __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 				      bool *active)
 {
@@ -570,15 +574,11 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	 * that error back to this function through
 	 * obj->userptr.work = ERR_PTR.
 	 */
-	if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
-		return -EAGAIN;
-
 	work = kmalloc(sizeof(*work), GFP_KERNEL);
 	if (work == NULL)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	obj->userptr.work = &work->work;
-	obj->userptr.workers++;
 
 	work->obj = i915_gem_object_get(obj);
 
@@ -589,14 +589,15 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	schedule_work(&work->work);
 
 	*active = true;
-	return -EAGAIN;
+	return ERR_PTR(-EAGAIN);
 }
 
-static int
+static struct sg_table *
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
 	const int num_pages = obj->base.size >> PAGE_SHIFT;
 	struct page **pvec;
+	struct sg_table *pages;
 	int pinned, ret;
 	bool active;
 
@@ -620,15 +621,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 	if (obj->userptr.work) {
 		/* active flag should still be held for the pending work */
 		if (IS_ERR(obj->userptr.work))
-			return PTR_ERR(obj->userptr.work);
+			return ERR_CAST(obj->userptr.work);
 		else
-			return -EAGAIN;
+			return ERR_PTR(-EAGAIN);
 	}
 
 	/* Let the mmu-notifier know that we have begun and need cancellation */
 	ret = __i915_gem_userptr_set_active(obj, true);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	pvec = NULL;
 	pinned = 0;
@@ -637,7 +638,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 				      GFP_TEMPORARY);
 		if (pvec == NULL) {
 			__i915_gem_userptr_set_active(obj, false);
-			return -ENOMEM;
+			return ERR_PTR(-ENOMEM);
 		}
 
 		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
@@ -646,21 +647,22 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 
 	active = false;
 	if (pinned < 0)
-		ret = pinned, pinned = 0;
+		pages = ERR_PTR(pinned), pinned = 0;
 	else if (pinned < num_pages)
-		ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
+		pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
 	else
-		ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
-	if (ret) {
+		pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
+	if (IS_ERR(pages)) {
 		__i915_gem_userptr_set_active(obj, active);
 		release_pages(pvec, pinned, 0);
 	}
 	drm_free_large(pvec);
-	return ret;
+	return pages;
 }
 
 static void
-i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
+i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
+			   struct sg_table *pages)
 {
 	struct sgt_iter sgt_iter;
 	struct page *page;
@@ -671,9 +673,9 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 	if (obj->mm.madv != I915_MADV_WILLNEED)
 		obj->mm.dirty = false;
 
-	i915_gem_gtt_finish_object(obj);
+	i915_gem_gtt_finish_pages(obj, pages);
 
-	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
+	for_each_sgt_page(page, sgt_iter, pages) {
 		if (obj->mm.dirty)
 			set_page_dirty(page);
 
@@ -682,8 +684,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
 	}
 	obj->mm.dirty = false;
 
-	sg_free_table(obj->mm.pages);
-	kfree(obj->mm.pages);
+	sg_free_table(pages);
+	kfree(pages);
 }
 
 static void
-- 
2.9.3



More information about the Intel-gfx mailing list