[PATCH 39/39] drm/i915/gem: Split out backing pages from the GEM object

Chris Wilson chris at chris-wilson.co.uk
Mon Jul 8 11:58:17 UTC 2019


The end goal is to decouple the object from the vma so that we can treat
i915_vma as a first-class independently referenced object. Currently, we
have a difficult problem with that introducing a reference cycle between
GEM objects and i915_vma, so as a first step separate out the backing
page store that can then be shared between i915_vma and the object.

Note this doesn't create the proper backing store classes, here we just
allocate a pointer to hold the pages.

Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |   6 +
 drivers/gpu/drm/i915/display/intel_display.c  |  11 +-
 drivers/gpu/drm/i915/display/intel_overlay.c  |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_clflush.c   |  11 +-
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c    |  41 +++---
 drivers/gpu/drm/i915/gem/i915_gem_domain.c    |  55 ++++----
 drivers/gpu/drm/i915/gem/i915_gem_internal.c  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |  32 ++---
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  29 ++---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |  77 ++----------
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     | 113 ++++++++---------
 drivers/gpu/drm/i915/gem/i915_gem_phys.c      |  21 ++--
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   4 +-
 drivers/gpu/drm/i915/gem/i915_gem_shmem.c     |  23 ++--
 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c  |  20 +--
 drivers/gpu/drm/i915/gem/i915_gem_tiling.c    |  32 ++---
 drivers/gpu/drm/i915/gem/i915_gem_userptr.c   |   9 +-
 .../drm/i915/gem/selftests/huge_gem_object.c  |   4 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   | 118 +++++++++++-------
 .../i915/gem/selftests/i915_gem_client_blt.c  |   4 +-
 .../drm/i915/gem/selftests/i915_gem_dmabuf.c  |   6 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c    |   8 +-
 .../drm/i915/gem/selftests/i915_gem_object.c  |   8 +-
 .../drm/i915/gem/selftests/i915_gem_phys.c    |   2 +-
 drivers/gpu/drm/i915/gt/intel_context.c       |   2 +-
 drivers/gpu/drm/i915/gt/intel_engine_pool.c   |   2 +-
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |   2 +-
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |   2 +-
 drivers/gpu/drm/i915/i915_cmd_parser.c        |   2 +-
 drivers/gpu/drm/i915/i915_debugfs.c           |   6 +-
 drivers/gpu/drm/i915/i915_gem.c               |  56 +++++----
 drivers/gpu/drm/i915/i915_gem_fence_reg.c     |  19 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c           |  12 +-
 drivers/gpu/drm/i915/i915_gpu_error.c         |   6 +-
 drivers/gpu/drm/i915/i915_vma.c               |  15 +--
 drivers/gpu/drm/i915/intel_guc_fw.c           |   2 +-
 drivers/gpu/drm/i915/intel_huc_fw.c           |   2 +-
 drivers/gpu/drm/i915/intel_uc_fw.c            |   4 +-
 drivers/gpu/drm/i915/mm/Makefile              |   5 +
 drivers/gpu/drm/i915/mm/Makefile.header-test  |  16 +++
 drivers/gpu/drm/i915/mm/i915_mm_pages.c       |  54 ++++++++
 drivers/gpu/drm/i915/mm/i915_mm_pages.h       |  49 ++++++++
 drivers/gpu/drm/i915/mm/i915_mm_pages_types.h |  90 +++++++++++++
 .../gpu/drm/i915/selftests/i915_gem_evict.c   |  12 +-
 drivers/gpu/drm/i915/selftests/i915_gem_gtt.c |  58 ++++++---
 drivers/gpu/drm/i915/selftests/i915_vma.c     |   6 +-
 47 files changed, 664 insertions(+), 412 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/mm/Makefile
 create mode 100644 drivers/gpu/drm/i915/mm/Makefile.header-test
 create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages.c
 create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages.h
 create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages_types.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1ae546df284a..7b73c193ba9f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -96,6 +96,12 @@ gt-$(CONFIG_DRM_I915_SELFTEST) += \
 	gt/mock_engine.o
 i915-y += $(gt-y)
 
+# Memory management/integration code
+obj-y += mm/
+mm-y += \
+	mm/i915_mm_pages.o
+i915-y += $(mm-y)
+
 # GEM (Graphics Execution Management) code
 obj-y += gem/
 gem-y += \
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 767f1dc197a8..47a032c8d3fb 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14379,6 +14379,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	struct drm_framebuffer *fb = new_state->fb;
 	struct drm_i915_gem_object *obj = intel_fb_obj(fb);
 	struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+	struct i915_mm_pages *mm;
 	int ret;
 
 	if (old_obj) {
@@ -14419,20 +14420,20 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	if (!obj)
 		return 0;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm))
+		return PTR_ERR(mm);
 
 	ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
 	if (ret) {
-		i915_gem_object_unpin_pages(obj);
+		i915_mm_pages_unpin(mm);
 		return ret;
 	}
 
 	ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
 
 	mutex_unlock(&dev_priv->drm.struct_mutex);
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 1a15fa34205c..028c1b963f65 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1324,7 +1324,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
 	}
 
 	if (use_phys)
-		overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+		overlay->flip_addr = sg_dma_address(vma->pages->sgl);
 	else
 		overlay->flip_addr = i915_ggtt_offset(vma);
 	overlay->regs = i915_vma_pin_iomap(vma);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index a65d401f891c..27dcd6f57935 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -47,7 +47,7 @@ static const struct dma_fence_ops i915_clflush_ops = {
 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-	drm_clflush_sg(obj->mm.pages);
+	drm_clflush_sg(obj->mm->pages);
 	intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 }
 
@@ -55,16 +55,17 @@ static void i915_clflush_work(struct work_struct *work)
 {
 	struct clflush *clflush = container_of(work, typeof(*clflush), work);
 	struct drm_i915_gem_object *obj = clflush->obj;
+	struct i915_mm_pages *p;
 
-	if (i915_gem_object_pin_pages(obj)) {
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p)) {
 		DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
 		goto out;
 	}
 
 	__i915_do_clflush(obj);
 
-	i915_gem_object_unpin_pages(obj);
-
+	i915_mm_pages_unpin(p);
 out:
 	i915_gem_object_put(obj);
 
@@ -151,7 +152,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
 						  &clflush->dma);
 
 		i915_sw_fence_commit(&clflush->wait);
-	} else if (obj->mm.pages) {
+	} else if (obj->mm->pages) {
 		__i915_do_clflush(obj);
 	} else {
 		GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index a15d4d9e3a7b..f6b8cd4fdb43 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -21,12 +21,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 					     enum dma_data_direction dir)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-	struct sg_table *st;
 	struct scatterlist *src, *dst;
+	struct i915_mm_pages *p;
+	struct sg_table *st;
 	int ret, i;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
 		goto err;
 
 	/* Copy sg so that we make an independent mapping */
@@ -36,13 +37,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 		goto err_unpin_pages;
 	}
 
-	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+	ret = sg_alloc_table(st, p->pages->nents, GFP_KERNEL);
 	if (ret)
 		goto err_free;
 
-	src = obj->mm.pages->sgl;
+	src = p->pages->sgl;
 	dst = st->sgl;
-	for (i = 0; i < obj->mm.pages->nents; i++) {
+	for (i = 0; i < p->pages->nents; i++) {
 		sg_set_page(dst, sg_page(src), src->length, 0);
 		dst = sg_next(dst);
 		src = sg_next(src);
@@ -53,6 +54,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 		goto err_free_sg;
 	}
 
+	/* return with pages still pinned */
 	return st;
 
 err_free_sg:
@@ -60,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
 err_free:
 	kfree(st);
 err_unpin_pages:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 err:
 	return ERR_PTR(ret);
 }
@@ -96,6 +98,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct i915_mm_pages *p;
 	struct page *page;
 
 	if (page_num >= obj->base.size >> PAGE_SHIFT)
@@ -104,7 +107,8 @@ static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_nu
 	if (!i915_gem_object_has_struct_page(obj))
 		return NULL;
 
-	if (i915_gem_object_pin_pages(obj))
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
 		return NULL;
 
 	/* Synchronisation is left to the caller (via .begin_cpu_access()) */
@@ -112,10 +116,11 @@ static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_nu
 	if (IS_ERR(page))
 		goto err_unpin;
 
+	/* returns with pages still pinned */
 	return kmap(page);
 
 err_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 	return NULL;
 }
 
@@ -152,11 +157,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+	struct i915_mm_pages *p;
 	int err;
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		return err;
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
 
 	err = i915_gem_object_lock_interruptible(obj);
 	if (err)
@@ -166,18 +172,19 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
 	i915_gem_object_unlock(obj);
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 	return err;
 }
 
 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+	struct i915_mm_pages *p;
 	int err;
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
-		return err;
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
 
 	err = i915_gem_object_lock_interruptible(obj);
 	if (err)
@@ -187,7 +194,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct
 	i915_gem_object_unlock(obj);
 
 out:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index a1afc2690e9e..14949fd765eb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -46,6 +46,7 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 {
+	struct i915_mm_pages *p;
 	int ret;
 
 	assert_object_held(obj);
@@ -68,9 +69,9 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
 
 	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
 
@@ -89,10 +90,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 	if (write) {
 		obj->read_domains = I915_GEM_DOMAIN_WC;
 		obj->write_domain = I915_GEM_DOMAIN_WC;
-		obj->mm.dirty = true;
+		p->dirty = true;
 	}
 
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 	return 0;
 }
 
@@ -107,6 +108,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
+	struct i915_mm_pages *p;
 	int ret;
 
 	assert_object_held(obj);
@@ -129,9 +131,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ret;
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
 
 	i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
 
@@ -150,10 +152,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (write) {
 		obj->read_domains = I915_GEM_DOMAIN_GTT;
 		obj->write_domain = I915_GEM_DOMAIN_GTT;
-		obj->mm.dirty = true;
+		p->dirty = true;
 	}
 
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 	return 0;
 }
 
@@ -481,8 +483,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
 
 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
 
-		if (obj->mm.madv == I915_MADV_WILLNEED)
-			list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+		if (obj->mm->madv == I915_MADV_WILLNEED)
+			list_move_tail(&obj->mm_link, &i915->mm.shrink_list);
 
 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 	}
@@ -566,6 +568,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_object *obj;
 	u32 read_domains = args->read_domains;
 	u32 write_domain = args->write_domain;
+	struct i915_mm_pages *p;
 	int err;
 
 	/* Only handle setting domains to types used by the CPU. */
@@ -634,9 +637,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	 * continue to assume that the obj remained out of the CPU cached
 	 * domain.
 	 */
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p)) {
+		err = PTR_ERR(p);
 		goto out;
+	}
 
 	err = i915_gem_object_lock_interruptible(obj);
 	if (err)
@@ -658,7 +663,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
 
 out_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 out:
 	i915_gem_object_put(obj);
 	return err;
@@ -672,6 +677,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 				 unsigned int *needs_clflush)
 {
+	struct i915_mm_pages *p;
 	int ret;
 
 	*needs_clflush = 0;
@@ -688,9 +694,11 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 	if (ret)
 		goto err_unlock;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p)) {
+		ret = PTR_ERR(p);
 		goto err_unlock;
+	}
 
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -717,7 +725,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 err_unlock:
 	i915_gem_object_unlock(obj);
 	return ret;
@@ -726,6 +734,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 				  unsigned int *needs_clflush)
 {
+	struct i915_mm_pages *p;
 	int ret;
 
 	*needs_clflush = 0;
@@ -743,9 +752,11 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 	if (ret)
 		goto err_unlock;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p)) {
+		ret = PTR_ERR(p);
 		goto err_unlock;
+	}
 
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
 	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -776,12 +787,12 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 
 out:
 	intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
-	obj->mm.dirty = true;
+	p->dirty = true;
 	/* return with the pages pinned */
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 err_unlock:
 	i915_gem_object_unlock(obj);
 	return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index a23637222af2..2059ee173c0d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -35,6 +35,7 @@ static void internal_free_pages(struct sg_table *st)
 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_mm_pages *p = obj->mm;
 	struct sg_table *st;
 	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
@@ -122,7 +123,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 	 * and the caller is expected to repopulate - the contents of this
 	 * object are only valid whilst active and pinned.
 	 */
-	obj->mm.madv = I915_MADV_DONTNEED;
+	p->madv = I915_MADV_DONTNEED;
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
@@ -139,11 +140,13 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
 					       struct sg_table *pages)
 {
+	struct i915_mm_pages *p = obj->mm;
+
 	i915_gem_gtt_finish_pages(obj, pages);
 	internal_free_pages(pages);
 
-	obj->mm.dirty = false;
-	obj->mm.madv = I915_MADV_WILLNEED;
+	p->dirty = false;
+	p->madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 71d10ae90922..7567055a0101 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -225,6 +225,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	struct i915_ggtt *ggtt = &i915->ggtt;
 	bool write = area->vm_flags & VM_WRITE;
 	intel_wakeref_t wakeref;
+	struct i915_mm_pages *mm;
 	struct i915_vma *vma;
 	pgoff_t page_offset;
 	int srcu;
@@ -239,9 +240,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm)) {
+		ret = PTR_ERR(mm);
 		goto err;
+	}
 
 	wakeref = intel_runtime_pm_get(rpm);
 
@@ -327,7 +330,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	intel_gt_reset_unlock(ggtt->vm.gt, srcu);
 err_rpm:
 	intel_runtime_pm_put(rpm, wakeref);
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 err:
 	switch (ret) {
 	case -EIO:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index e184480e6abd..d2f2f27049de 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -24,6 +24,8 @@
 
 #include "display/intel_frontbuffer.h"
 #include "gt/intel_gt.h"
+#include "mm/i915_mm_pages.h"
+
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_gem_context.h"
@@ -35,12 +37,15 @@ static struct i915_global_object {
 	struct kmem_cache *slab_objects;
 } global;
 
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+	return kmem_cache_free(global.slab_objects, obj);
+}
+
 static struct drm_i915_gem_object *
 i915_gem_object_init(struct drm_i915_gem_object *obj,
 		     const struct drm_i915_gem_object_ops *ops)
 {
-	mutex_init(&obj->mm.lock);
-
 	spin_lock_init(&obj->vma.lock);
 	INIT_LIST_HEAD(&obj->vma.list);
 
@@ -50,10 +55,6 @@ i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	obj->ops = ops;
 
-	obj->mm.madv = I915_MADV_WILLNEED;
-	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
-	mutex_init(&obj->mm.get_page.lock);
-
 	return obj;
 }
 
@@ -66,12 +67,13 @@ i915_gem_object_alloc(const struct drm_i915_gem_object_ops *ops)
 	if (!obj)
 		return NULL;
 
-	return i915_gem_object_init(obj, ops);
-}
+	obj->mm = i915_mm_pages_create();
+	if (!obj->mm) {
+		i915_gem_object_free(obj);
+		return NULL;
+	}
 
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
-	return kmem_cache_free(global.slab_objects, obj);
+	return i915_gem_object_init(obj, ops);
 }
 
 /**
@@ -163,6 +165,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	llist_for_each_entry_safe(obj, on, freed, freed) {
 		struct i915_vma *vma, *vn;
+		struct i915_mm_pages *mm;
 
 		trace_i915_gem_object_destroy(obj);
 
@@ -182,10 +185,11 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 		GEM_BUG_ON(obj->userfault_count);
 		GEM_BUG_ON(!list_empty(&obj->lut_list));
 
-		atomic_set(&obj->mm.pages_pin_count, 0);
+		mm = obj->mm;
+		atomic_set(&mm->pin_count, 0);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		GEM_BUG_ON(i915_gem_object_has_pages(obj));
-		bitmap_free(obj->bit_17);
+		i915_mm_pages_put(mm);
 
 		if (obj->base.import_attach)
 			drm_prime_gem_destroy(&obj->base, NULL);
@@ -274,7 +278,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 		unsigned long flags;
 
 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
-		list_del_init(&obj->mm.link);
+		list_del_init(&obj->mm_link);
 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 	}
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index db8537c9314c..a4f855eedf6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -13,6 +13,8 @@
 
 #include <drm/i915_drm.h>
 
+#include "mm/i915_mm_pages.h"
+
 #include "i915_gem_object_types.h"
 
 #include "i915_gem_gtt.h"
@@ -230,15 +232,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 unsigned int sg_page_sizes);
 
 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+struct i915_mm_pages *
+__i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
-static inline int __must_check
+static inline struct i915_mm_pages * __must_check
 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
-	might_lock(&obj->mm.lock);
+	might_lock(&obj->mm->lock);
 
-	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
-		return 0;
+	if (atomic_inc_not_zero(&obj->mm->pin_count))
+		return obj->mm;
 
 	return __i915_gem_object_get_pages(obj);
 }
@@ -246,30 +249,27 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 static inline bool
 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 {
-	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm->pages));
 }
 
 static inline void
 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
-	atomic_inc(&obj->mm.pages_pin_count);
+	i915_mm_pages_pin(obj->mm);
 }
 
 static inline bool
 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 {
-	return atomic_read(&obj->mm.pages_pin_count);
+	return i915_mm_pages_is_pinned(obj->mm);
 }
 
 static inline void
 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
-	atomic_dec(&obj->mm.pages_pin_count);
+	i915_mm_pages_unpin(obj->mm);
 }
 
 static inline void
@@ -278,11 +278,6 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 	__i915_gem_object_unpin_pages(obj);
 }
 
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
-	I915_MM_NORMAL = 0,
-	I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
-};
-
 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 				enum i915_mm_subclass subclass);
 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ede0eb4218a8..5e1299c90aec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -9,6 +9,8 @@
 
 #include <drm/drm_gem.h>
 
+#include "mm/i915_mm_pages.h"
+
 #include "i915_active.h"
 #include "i915_selftest.h"
 
@@ -155,76 +157,13 @@ struct drm_i915_gem_object {
 	/** Count of how many global VMA are currently pinned for use by HW */
 	unsigned int pin_global;
 
-	struct {
-		struct mutex lock; /* protects the pages and their use */
-		atomic_t pages_pin_count;
-
-		struct sg_table *pages;
-		void *mapping;
-
-		/* TODO: whack some of this into the error state */
-		struct i915_page_sizes {
-			/**
-			 * The sg mask of the pages sg_table. i.e the mask of
-			 * of the lengths for each sg entry.
-			 */
-			unsigned int phys;
-
-			/**
-			 * The gtt page sizes we are allowed to use given the
-			 * sg mask and the supported page sizes. This will
-			 * express the smallest unit we can use for the whole
-			 * object, as well as the larger sizes we may be able
-			 * to use opportunistically.
-			 */
-			unsigned int sg;
-
-			/**
-			 * The actual gtt page size usage. Since we can have
-			 * multiple vma associated with this object we need to
-			 * prevent any trampling of state, hence a copy of this
-			 * struct also lives in each vma, therefore the gtt
-			 * value here should only be read/write through the vma.
-			 */
-			unsigned int gtt;
-		} page_sizes;
-
-		I915_SELFTEST_DECLARE(unsigned int page_mask);
-
-		struct i915_gem_object_page_iter {
-			struct scatterlist *sg_pos;
-			unsigned int sg_idx; /* in pages, but 32bit eek! */
-
-			struct radix_tree_root radix;
-			struct mutex lock; /* protects this cache */
-		} get_page;
-
-		/**
-		 * Element within i915->mm.unbound_list or i915->mm.bound_list,
-		 * locked by i915->mm.obj_lock.
-		 */
-		struct list_head link;
-
-		/**
-		 * Advice: are the backing pages purgeable?
-		 */
-		unsigned int madv:2;
-
-		/**
-		 * This is set if the object has been written to since the
-		 * pages were last acquired.
-		 */
-		bool dirty:1;
-
-		/**
-		 * This is set if the object has been pinned due to unknown
-		 * swizzling.
-		 */
-		bool quirked:1;
-	} mm;
+	struct i915_mm_pages *mm;
 
-	/** Record of address bit 17 of each page at last unbind. */
-	unsigned long *bit_17;
+	/**
+	 * Element within i915->mm.unbound_list or i915->mm.bound_list,
+	 * locked by i915->mm.obj_lock.
+	 */
+	struct list_head mm_link;
 
 	union {
 		struct i915_gem_userptr {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..b2b030684e88 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -14,9 +14,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
+	struct i915_mm_pages *mm = obj->mm;
 	int i;
 
-	lockdep_assert_held(&obj->mm.lock);
+	lockdep_assert_held(&mm->lock);
 
 	/* Make the pages coherent with the GPU (flushing any swapin). */
 	if (obj->cache_dirty) {
@@ -26,20 +27,20 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 		obj->cache_dirty = false;
 	}
 
-	obj->mm.get_page.sg_pos = pages->sgl;
-	obj->mm.get_page.sg_idx = 0;
+	mm->get_page.sg_pos = pages->sgl;
+	mm->get_page.sg_idx = 0;
 
-	obj->mm.pages = pages;
+	mm->pages = pages;
 
 	if (i915_gem_object_is_tiled(obj) &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-		GEM_BUG_ON(obj->mm.quirked);
-		__i915_gem_object_pin_pages(obj);
-		obj->mm.quirked = true;
+		GEM_BUG_ON(mm->quirked);
+		i915_mm_pages_unpin(mm);
+		mm->quirked = true;
 	}
 
 	GEM_BUG_ON(!sg_page_sizes);
-	obj->mm.page_sizes.phys = sg_page_sizes;
+	mm->page_sizes.phys = sg_page_sizes;
 
 	/*
 	 * Calculate the supported page-sizes which fit into the given
@@ -49,12 +50,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 	 * 64K or 4K pages, although in practice this will depend on a number of
 	 * other factors.
 	 */
-	obj->mm.page_sizes.sg = 0;
+	mm->page_sizes.sg = 0;
 	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
-		if (obj->mm.page_sizes.phys & ~0u << i)
-			obj->mm.page_sizes.sg |= BIT(i);
+		if (mm->page_sizes.phys & ~0u << i)
+			mm->page_sizes.sg |= BIT(i);
 	}
-	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, mm->page_sizes.sg));
 
 	if (i915_gem_object_is_shrinkable(obj)) {
 		struct list_head *list;
@@ -65,11 +66,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 		i915->mm.shrink_count++;
 		i915->mm.shrink_memory += obj->base.size;
 
-		if (obj->mm.madv != I915_MADV_WILLNEED)
+		if (mm->madv != I915_MADV_WILLNEED)
 			list = &i915->mm.purge_list;
 		else
 			list = &i915->mm.shrink_list;
-		list_add_tail(&obj->mm.link, list);
+		list_add_tail(&obj->mm_link, list);
 
 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 	}
@@ -79,7 +80,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
 	int err;
 
-	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+	if (unlikely(obj->mm->madv != I915_MADV_WILLNEED)) {
 		DRM_DEBUG("Attempting to obtain a purgeable object\n");
 		return -EFAULT;
 	}
@@ -97,16 +98,18 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
  * either as a result of memory pressure (reaping pages under the shrinker)
  * or as the object is itself released.
  */
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+struct i915_mm_pages *
+__i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	int err;
 
-	err = mutex_lock_interruptible(&obj->mm.lock);
+	err = mutex_lock_interruptible(&mm->lock);
 	if (err)
-		return err;
+		return ERR_PTR(err);
 
-	if (unlikely(!i915_gem_object_has_pages(obj))) {
-		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+	if (unlikely(!mm->pages)) {
+		GEM_BUG_ON(atomic_read(&mm->pin_count));
 
 		err = ____i915_gem_object_get_pages(obj);
 		if (err)
@@ -114,11 +117,11 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 
 		smp_mb__before_atomic();
 	}
-	atomic_inc(&obj->mm.pages_pin_count);
+	atomic_inc(&mm->pin_count);
 
 unlock:
-	mutex_unlock(&obj->mm.lock);
-	return err;
+	mutex_unlock(&mm->lock);
+	return err ? ERR_PTR(err) : mm;
 }
 
 /* Immediately discard the backing storage */
@@ -132,31 +135,21 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 /* Try to discard unwanted pages */
 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
 {
-	lockdep_assert_held(&obj->mm.lock);
+	lockdep_assert_held(&obj->mm->lock);
 	GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
 	if (obj->ops->writeback)
 		obj->ops->writeback(obj);
 }
 
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
-	struct radix_tree_iter iter;
-	void __rcu **slot;
-
-	rcu_read_lock();
-	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
-		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
-	rcu_read_unlock();
-}
-
 struct sg_table *
 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_mm_pages *mm = obj->mm;
 	struct sg_table *pages;
 
-	pages = fetch_and_zero(&obj->mm.pages);
+	pages = fetch_and_zero(&mm->pages);
 	if (IS_ERR_OR_NULL(pages))
 		return pages;
 
@@ -165,27 +158,28 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 
 		spin_lock_irqsave(&i915->mm.obj_lock, flags);
 
-		list_del(&obj->mm.link);
+		list_del(&obj->mm_link);
 		i915->mm.shrink_count--;
 		i915->mm.shrink_memory -= obj->base.size;
 
 		spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 	}
 
-	if (obj->mm.mapping) {
+	if (mm->mapping) {
 		void *ptr;
 
-		ptr = page_mask_bits(obj->mm.mapping);
+		ptr = page_mask_bits(mm->mapping);
 		if (is_vmalloc_addr(ptr))
 			vunmap(ptr);
 		else
 			kunmap(kmap_to_page(ptr));
 
-		obj->mm.mapping = NULL;
+		mm->mapping = NULL;
 	}
 
-	__i915_gem_object_reset_page_iter(obj);
-	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+	__i915_mm_pages_reset_page_iter(mm);
+	mm->page_sizes.phys = 0;
+	mm->page_sizes.sg = 0;
 
 	return pages;
 }
@@ -193,6 +187,7 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 				enum i915_mm_subclass subclass)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	struct sg_table *pages;
 	int err;
 
@@ -202,8 +197,8 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(atomic_read(&obj->bind_count));
 
 	/* May be called by shrinker from within get_pages() (on another bo) */
-	mutex_lock_nested(&obj->mm.lock, subclass);
-	if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+	mutex_lock_nested(&mm->lock, subclass);
+	if (unlikely(atomic_read(&mm->pin_count))) {
 		err = -EBUSY;
 		goto unlock;
 	}
@@ -229,7 +224,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 
 	err = 0;
 unlock:
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&mm->lock);
 
 	return err;
 }
@@ -239,7 +234,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
-	struct sg_table *sgt = obj->mm.pages;
+	struct sg_table *sgt = obj->mm->pages;
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	struct page *stack_pages[32];
@@ -288,6 +283,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			      enum i915_map_type type)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	enum i915_map_type has_type;
 	bool pinned;
 	void *ptr;
@@ -296,14 +292,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 	if (unlikely(!i915_gem_object_has_struct_page(obj)))
 		return ERR_PTR(-ENXIO);
 
-	err = mutex_lock_interruptible(&obj->mm.lock);
+	err = mutex_lock_interruptible(&mm->lock);
 	if (err)
 		return ERR_PTR(err);
 
 	pinned = !(type & I915_MAP_OVERRIDE);
 	type &= ~I915_MAP_OVERRIDE;
 
-	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+	if (!atomic_inc_not_zero(&mm->pin_count)) {
 		if (unlikely(!i915_gem_object_has_pages(obj))) {
 			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
 
@@ -313,12 +309,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 
 			smp_mb__before_atomic();
 		}
-		atomic_inc(&obj->mm.pages_pin_count);
+		atomic_inc(&mm->pin_count);
 		pinned = false;
 	}
 	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
-	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+	ptr = page_unpack_bits(mm->mapping, &has_type);
 	if (ptr && has_type != type) {
 		if (pinned) {
 			err = -EBUSY;
@@ -330,7 +326,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 		else
 			kunmap(kmap_to_page(ptr));
 
-		ptr = obj->mm.mapping = NULL;
+		ptr = mm->mapping = NULL;
 	}
 
 	if (!ptr) {
@@ -340,15 +336,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			goto err_unpin;
 		}
 
-		obj->mm.mapping = page_pack_bits(ptr, type);
+		mm->mapping = page_pack_bits(ptr, type);
 	}
 
 out_unlock:
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&mm->lock);
 	return ptr;
 
 err_unpin:
-	atomic_dec(&obj->mm.pages_pin_count);
+	atomic_dec(&mm->pin_count);
 err_unlock:
 	ptr = ERR_PTR(err);
 	goto out_unlock;
@@ -359,18 +355,19 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 				 unsigned long size)
 {
 	enum i915_map_type has_type;
+	struct i915_mm_pages *mm;
 	void *ptr;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 	GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
 				     offset, size, obj->base.size));
 
-	obj->mm.dirty = true;
+	mm->dirty = true;
 
 	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
 		return;
 
-	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+	ptr = page_unpack_bits(mm->mapping, &has_type);
 	if (has_type == I915_MAP_WC)
 		return;
 
@@ -386,7 +383,7 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 		       unsigned int n,
 		       unsigned int *offset)
 {
-	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+	struct i915_page_iter *iter = &obj->mm->get_page;
 	struct scatterlist *sg;
 	unsigned int idx, count;
 
@@ -514,7 +511,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 	struct page *page;
 
 	page = i915_gem_object_get_page(obj, n);
-	if (!obj->mm.dirty)
+	if (!obj->mm->dirty)
 		set_page_dirty(page);
 
 	return page;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 102fd7a23d3d..f5c5a8923c35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -98,9 +98,11 @@ static void
 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 			       struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	__i915_gem_object_release_shmem(obj, pages, false);
 
-	if (obj->mm.dirty) {
+	if (mm->dirty) {
 		struct address_space *mapping = obj->base.filp->f_mapping;
 		char *vaddr = obj->phys_handle->vaddr;
 		int i;
@@ -119,12 +121,12 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
 			kunmap_atomic(dst);
 
 			set_page_dirty(page);
-			if (obj->mm.madv == I915_MADV_WILLNEED)
+			if (mm->madv == I915_MADV_WILLNEED)
 				mark_page_accessed(page);
 			put_page(page);
 			vaddr += PAGE_SIZE;
 		}
-		obj->mm.dirty = false;
+		mm->dirty = false;
 	}
 
 	sg_free_table(pages);
@@ -140,6 +142,7 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 
 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	struct sg_table *pages;
 	int err;
 
@@ -156,19 +159,19 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 	if (err)
 		return err;
 
-	mutex_lock(&obj->mm.lock);
+	mutex_lock(&mm->lock);
 
-	if (obj->mm.madv != I915_MADV_WILLNEED) {
+	if (mm->madv != I915_MADV_WILLNEED) {
 		err = -EFAULT;
 		goto err_unlock;
 	}
 
-	if (obj->mm.quirked) {
+	if (mm->quirked) {
 		err = -EFAULT;
 		goto err_unlock;
 	}
 
-	if (obj->mm.mapping) {
+	if (mm->mapping) {
 		err = -EBUSY;
 		goto err_unlock;
 	}
@@ -186,7 +189,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 
 	if (!IS_ERR_OR_NULL(pages))
 		i915_gem_shmem_ops.put_pages(obj, pages);
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&mm->lock);
 	return 0;
 
 err_xfer:
@@ -197,7 +200,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
 		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 	}
 err_unlock:
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&mm->lock);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 8e2eeaec06cb..2aa1efbedc7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -178,7 +178,7 @@ static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
 {
 	return list_first_entry_or_null(list,
 					struct drm_i915_gem_object,
-					mm.link);
+					mm_link);
 }
 
 void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -216,7 +216,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
 		LIST_HEAD(keep);
 
 		while ((obj = first_mm_object(*phase))) {
-			list_move_tail(&obj->mm.link, &keep);
+			list_move_tail(&obj->mm_link, &keep);
 
 			/* Beware the background _i915_gem_free_objects */
 			if (!kref_get_unless_zero(&obj->base.refcount))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index f7982e5789d1..680bd6416a9a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -215,6 +215,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
 static void
 shmem_truncate(struct drm_i915_gem_object *obj)
 {
+	struct i915_mm_pages *mm;
+
 	/*
 	 * Our goal here is to return as much of the memory as
 	 * is possible back to the system as we are called from OOM.
@@ -222,8 +224,8 @@ shmem_truncate(struct drm_i915_gem_object *obj)
 	 * backing pages, *now*.
 	 */
 	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
-	obj->mm.madv = __I915_MADV_PURGED;
-	obj->mm.pages = ERR_PTR(-EFAULT);
+	mm->madv = __I915_MADV_PURGED;
+	mm->pages = ERR_PTR(-EFAULT);
 }
 
 static void
@@ -276,10 +278,12 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 				struct sg_table *pages,
 				bool needs_clflush)
 {
-	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
+	struct i915_mm_pages *mm = obj->mm;
+
+	GEM_BUG_ON(mm->madv == __I915_MADV_PURGED);
 
-	if (obj->mm.madv == I915_MADV_DONTNEED)
-		obj->mm.dirty = false;
+	if (mm->madv == I915_MADV_DONTNEED)
+		mm->dirty = false;
 
 	if (needs_clflush &&
 	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
@@ -292,6 +296,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 static void
 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	struct sgt_iter sgt_iter;
 	struct pagevec pvec;
 	struct page *page;
@@ -307,10 +312,10 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
 
 	pagevec_init(&pvec);
 	for_each_sgt_page(page, sgt_iter, pages) {
-		if (obj->mm.dirty)
+		if (mm->dirty)
 			set_page_dirty(page);
 
-		if (obj->mm.madv == I915_MADV_WILLNEED)
+		if (mm->madv == I915_MADV_WILLNEED)
 			mark_page_accessed(page);
 
 		if (!pagevec_add(&pvec, page))
@@ -318,7 +323,7 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
 	}
 	if (pagevec_count(&pvec))
 		check_release_pagevec(&pvec);
-	obj->mm.dirty = false;
+	mm->dirty = false;
 
 	sg_free_table(pages);
 	kfree(pages);
@@ -348,7 +353,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
 	if (i915_gem_object_has_pages(obj))
 		return -ENODEV;
 
-	if (obj->mm.madv != I915_MADV_WILLNEED)
+	if (obj->mm->madv != I915_MADV_WILLNEED)
 		return -EFAULT;
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 3f4c6bdcc3c3..86a11dcef367 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -57,6 +57,8 @@ static bool swap_available(void)
 
 static bool can_release_pages(struct drm_i915_gem_object *obj)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	/* Consider only shrinkable ojects. */
 	if (!i915_gem_object_is_shrinkable(obj))
 		return false;
@@ -69,7 +71,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	 * to the GPU, simply unbinding from the GPU is not going to succeed
 	 * in releasing our pin count on the pages themselves.
 	 */
-	if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
+	if (atomic_read(&mm->pin_count) > atomic_read(&obj->bind_count))
 		return false;
 
 	/* If any vma are "permanently" pinned, it will prevent us from
@@ -85,7 +87,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	 * discard the contents (because the user has marked them as being
 	 * purgeable) or if we can move their contents out to swap.
 	 */
-	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+	return swap_available() || mm->madv == I915_MADV_DONTNEED;
 }
 
 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
@@ -106,7 +108,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
 static void try_to_writeback(struct drm_i915_gem_object *obj,
 			     unsigned int flags)
 {
-	switch (obj->mm.madv) {
+	switch (obj->mm->madv) {
 	case I915_MADV_DONTNEED:
 		i915_gem_object_truncate(obj);
 	case __I915_MADV_PURGED:
@@ -229,11 +231,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
 		while (count < target &&
 		       (obj = list_first_entry_or_null(phase->list,
 						       typeof(*obj),
-						       mm.link))) {
-			list_move_tail(&obj->mm.link, &still_in_list);
+						       mm_link))) {
+			list_move_tail(&obj->mm_link, &still_in_list);
 
 			if (shrink & I915_SHRINK_VMAPS &&
-			    !is_vmalloc_addr(obj->mm.mapping))
+			    !is_vmalloc_addr(obj->mm->mapping))
 				continue;
 
 			if (!(shrink & I915_SHRINK_ACTIVE) &&
@@ -254,13 +256,13 @@ i915_gem_shrink(struct drm_i915_private *i915,
 
 			if (unsafe_drop_pages(obj, shrink)) {
 				/* May arrive from get_pages on another bo */
-				mutex_lock_nested(&obj->mm.lock,
+				mutex_lock_nested(&obj->mm->lock,
 						  I915_MM_SHRINKER);
 				if (!i915_gem_object_has_pages(obj)) {
 					try_to_writeback(obj, shrink);
 					count += obj->base.size >> PAGE_SHIFT;
 				}
-				mutex_unlock(&obj->mm.lock);
+				mutex_unlock(&obj->mm->lock);
 			}
 
 			scanned += obj->base.size >> PAGE_SHIFT;
@@ -401,7 +403,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
 	 */
 	available = unevictable = 0;
 	spin_lock_irqsave(&i915->mm.obj_lock, flags);
-	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+	list_for_each_entry(obj, &i915->mm.shrink_list, mm_link) {
 		if (!can_release_pages(obj))
 			unevictable += obj->base.size >> PAGE_SHIFT;
 		else
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index ca0c2f451742..deeb8e0b536a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -204,6 +204,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 			   unsigned int tiling, unsigned int stride)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_mm_pages *mm = obj->mm;
 	struct i915_vma *vma;
 	int err;
 
@@ -247,22 +248,22 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 	 * pages to prevent them being swapped out and causing corruption
 	 * due to the change in swizzling.
 	 */
-	mutex_lock(&obj->mm.lock);
-	if (i915_gem_object_has_pages(obj) &&
-	    obj->mm.madv == I915_MADV_WILLNEED &&
+	mutex_lock(&mm->lock);
+	if (mm->pages &&
+	    mm->madv == I915_MADV_WILLNEED &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
 		if (tiling == I915_TILING_NONE) {
-			GEM_BUG_ON(!obj->mm.quirked);
-			__i915_gem_object_unpin_pages(obj);
-			obj->mm.quirked = false;
+			GEM_BUG_ON(!mm->quirked);
+			i915_mm_pages_unpin(mm);
+			mm->quirked = false;
 		}
 		if (!i915_gem_object_is_tiled(obj)) {
-			GEM_BUG_ON(obj->mm.quirked);
-			__i915_gem_object_pin_pages(obj);
-			obj->mm.quirked = true;
+			GEM_BUG_ON(mm->quirked);
+			i915_mm_pages_pin(mm);
+			mm->quirked = true;
 		}
 	}
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&mm->lock);
 
 	for_each_ggtt_vma(vma, obj) {
 		vma->fence_size =
@@ -283,13 +284,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 
 	/* Try to preallocate memory required to save swizzling on put-pages */
 	if (i915_gem_object_needs_bit17_swizzle(obj)) {
-		if (!obj->bit_17) {
-			obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
-						    GFP_KERNEL);
+		if (!obj->mm->bit_17) {
+			obj->mm->bit_17 =
+				bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+					      GFP_KERNEL);
 		}
 	} else {
-		bitmap_free(obj->bit_17);
-		obj->bit_17 = NULL;
+		bitmap_free(obj->mm->bit_17);
+		obj->mm->bit_17 = NULL;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 95d6355ce684..de0dc502f6d9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -509,7 +509,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		}
 	}
 
-	mutex_lock(&obj->mm.lock);
+	mutex_lock(&obj->mm->lock);
 	if (obj->userptr.work == &work->work) {
 		struct sg_table *pages = ERR_PTR(ret);
 
@@ -526,7 +526,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 		if (IS_ERR(pages))
 			__i915_gem_userptr_set_active(obj, false);
 	}
-	mutex_unlock(&obj->mm.lock);
+	mutex_unlock(&obj->mm->lock);
 
 	release_pages(pvec, pinned);
 	kvfree(pvec);
@@ -651,6 +651,7 @@ static void
 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 			   struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	struct sgt_iter sgt_iter;
 	struct page *page;
 
@@ -664,13 +665,13 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 	i915_gem_gtt_finish_pages(obj, pages);
 
 	for_each_sgt_page(page, sgt_iter, pages) {
-		if (obj->mm.dirty)
+		if (mm->dirty)
 			set_page_dirty(page);
 
 		mark_page_accessed(page);
 		put_page(page);
 	}
-	obj->mm.dirty = false;
+	mm->dirty = false;
 
 	sg_free_table(pages);
 	kfree(pages);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index ac958a4c947d..743dfe426a74 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -78,10 +78,12 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
 static void huge_put_pages(struct drm_i915_gem_object *obj,
 			   struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	i915_gem_gtt_finish_pages(obj, pages);
 	huge_free_pages(obj, pages);
 
-	obj->mm.dirty = false;
+	mm->dirty = false;
 }
 
 static const struct drm_i915_gem_object_ops huge_ops = {
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 016bdf7ad4ab..e1ebf81cd060 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -56,7 +56,8 @@ static void huge_pages_free_pages(struct sg_table *st)
 static int get_huge_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
-	unsigned int page_mask = obj->mm.page_mask;
+	struct i915_mm_pages *mm = obj->mm;
+	unsigned int page_mask = mm->page_mask;
 	struct sg_table *st;
 	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
@@ -113,9 +114,9 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
 	if (i915_gem_gtt_prepare_pages(obj, st))
 		goto err;
 
-	obj->mm.madv = I915_MADV_DONTNEED;
+	mm->madv = I915_MADV_DONTNEED;
 
-	GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+	GEM_BUG_ON(sg_page_sizes != mm->page_mask);
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
 	return 0;
@@ -131,11 +132,13 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
 static void put_huge_pages(struct drm_i915_gem_object *obj,
 			   struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	i915_gem_gtt_finish_pages(obj, pages);
 	huge_pages_free_pages(pages);
 
-	obj->mm.dirty = false;
-	obj->mm.madv = I915_MADV_WILLNEED;
+	mm->dirty = false;
+	mm->madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -171,7 +174,7 @@ huge_pages_object(struct drm_i915_private *i915,
 	obj->read_domains = I915_GEM_DOMAIN_CPU;
 	obj->cache_level = I915_CACHE_NONE;
 
-	obj->mm.page_mask = page_mask;
+	obj->mm->page_mask = page_mask;
 
 	return obj;
 }
@@ -180,9 +183,10 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	const u64 max_len = rounddown_pow_of_two(UINT_MAX);
-	struct sg_table *st;
-	struct scatterlist *sg;
+	struct i915_mm_pages *mm = obj->mm;
 	unsigned int sg_page_sizes;
+	struct scatterlist *sg;
+	struct sg_table *st;
 	u64 rem;
 
 	st = kmalloc(sizeof(*st), GFP);
@@ -226,7 +230,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
 
 	i915_sg_trim(st);
 
-	obj->mm.madv = I915_MADV_DONTNEED;
+	mm->madv = I915_MADV_DONTNEED;
 
 	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
@@ -236,9 +240,10 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
 static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
-	struct sg_table *st;
-	struct scatterlist *sg;
+	struct i915_mm_pages *mm = obj->mm;
 	unsigned int page_size;
+	struct scatterlist *sg;
+	struct sg_table *st;
 
 	st = kmalloc(sizeof(*st), GFP);
 	if (!st)
@@ -260,7 +265,7 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
 	sg_dma_len(sg) = obj->base.size;
 	sg_dma_address(sg) = page_size;
 
-	obj->mm.madv = I915_MADV_DONTNEED;
+	mm->madv = I915_MADV_DONTNEED;
 
 	__i915_gem_object_set_pages(obj, st, sg->length);
 
@@ -278,9 +283,11 @@ static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
 static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
 				struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	fake_free_huge_pages(obj, pages);
-	obj->mm.dirty = false;
-	obj->mm.madv = I915_MADV_WILLNEED;
+	mm->dirty = false;
+	mm->madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops fake_ops = {
@@ -327,6 +334,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
 	struct drm_i915_private *i915 = vma->vm->i915;
 	unsigned int supported = INTEL_INFO(i915)->page_sizes;
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct i915_mm_pages *mm = obj->mm;
 	int err = 0;
 
 	if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) {
@@ -341,21 +349,21 @@ static int igt_check_page_sizes(struct i915_vma *vma)
 		err = -EINVAL;
 	}
 
-	if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+	if (vma->page_sizes.phys != mm->page_sizes.phys) {
 		pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
-		       vma->page_sizes.phys, obj->mm.page_sizes.phys);
+		       vma->page_sizes.phys, mm->page_sizes.phys);
 		err = -EINVAL;
 	}
 
-	if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+	if (vma->page_sizes.sg != mm->page_sizes.sg) {
 		pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
-		       vma->page_sizes.sg, obj->mm.page_sizes.sg);
+		       vma->page_sizes.sg, mm->page_sizes.sg);
 		err = -EINVAL;
 	}
 
-	if (obj->mm.page_sizes.gtt) {
+	if (mm->page_sizes.gtt) {
 		pr_err("obj->page_sizes.gtt(%u) should never be set\n",
-		       obj->mm.page_sizes.gtt);
+		       mm->page_sizes.gtt);
 		err = -EINVAL;
 	}
 
@@ -467,6 +475,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 		unsigned int size =
 			round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
 		struct i915_vma *vma;
+		struct i915_mm_pages *mm;
 
 		obj = fake_huge_pages_object(i915, size, true);
 		if (IS_ERR(obj))
@@ -479,12 +488,14 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 			goto out_put;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
+			err = PTR_ERR(mm);
 			goto out_put;
+		}
 
 		/* Force the page size for this object */
-		obj->mm.page_sizes.sg = page_size;
+		mm->page_sizes.sg = page_size;
 
 		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 		if (IS_ERR(vma)) {
@@ -555,7 +566,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
 
 		i915_vma_close(vma);
 
-		i915_gem_object_unpin_pages(obj);
+		i915_mm_pages_unpin(mm);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		i915_gem_object_put(obj);
 	}
@@ -605,6 +616,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 		u64 size = page_num << PAGE_SHIFT;
 		struct i915_vma *vma;
 		unsigned int expected_gtt = 0;
+		struct i915_mm_pages *mm;
 		int i;
 
 		obj = fake_huge_pages_object(i915, size, single);
@@ -621,8 +633,8 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
 			break;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err) {
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
 			i915_gem_object_put(obj);
 			break;
 		}
@@ -793,19 +805,23 @@ static int igt_mock_ppgtt_64K(void *arg)
 		unsigned int flags = PIN_USER;
 
 		for (single = 0; single <= 1; single++) {
+			struct i915_mm_pages *mm;
+
 			obj = fake_huge_pages_object(i915, size, !!single);
 			if (IS_ERR(obj))
 				return PTR_ERR(obj);
 
-			err = i915_gem_object_pin_pages(obj);
-			if (err)
+			mm = i915_gem_object_pin_pages(obj);
+			if (IS_ERR(mm)) {
+				err = PTR_ERR(mm);
 				goto out_object_put;
+			}
 
 			/*
 			 * Disable 2M pages -- We only want to use 64K/4K pages
 			 * for this test.
 			 */
-			obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+			mm->page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
 
 			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
 			if (IS_ERR(vma)) {
@@ -853,7 +869,7 @@ static int igt_mock_ppgtt_64K(void *arg)
 			i915_vma_unpin(vma);
 			i915_vma_close(vma);
 
-			i915_gem_object_unpin_pages(obj);
+			i915_mm_pages_unpin(mm);
 			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 			i915_gem_object_put(obj);
 		}
@@ -1089,6 +1105,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
 	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
+	struct i915_mm_pages *mm = obj->mm;
 	I915_RND_STATE(prng);
 	IGT_TIMEOUT(end_time);
 	unsigned int max_page_size;
@@ -1103,10 +1120,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 
 	size = obj->base.size;
-	if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+	if (mm->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
 		size = round_up(size, I915_GTT_PAGE_SIZE_2M);
 
-	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+	max_page_size = rounddown_pow_of_two(mm->page_sizes.sg);
 	max = div_u64((vm->total - size), max_page_size);
 
 	n = 0;
@@ -1151,7 +1168,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		 * boundary, however to improve coverage we opt for testing both
 		 * aligned and unaligned offsets.
 		 */
-		if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+		if (mm->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
 			offset_low = round_down(offset_low,
 						I915_GTT_PAGE_SIZE_2M);
 
@@ -1215,6 +1232,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 		 */
 		for (page_mask = 2; page_mask <= size_mask; page_mask++) {
 			unsigned int page_sizes = 0;
+			struct i915_mm_pages *mm;
 
 			for (i = 0; i < n; i++) {
 				if (page_mask & BIT(i))
@@ -1234,8 +1252,8 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 				goto out_device;
 			}
 
-			err = i915_gem_object_pin_pages(obj);
-			if (err) {
+			mm = i915_gem_object_pin_pages(obj);
+			if (IS_ERR(mm)) {
 				i915_gem_object_put(obj);
 
 				if (err == -ENOMEM) {
@@ -1252,7 +1270,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 			}
 
 			/* Force the page-size for the gtt insertion */
-			obj->mm.page_sizes.sg = page_sizes;
+			mm->page_sizes.sg = page_sizes;
 
 			err = igt_write_huge(ctx, obj);
 			if (err) {
@@ -1261,7 +1279,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
 				goto out_unpin;
 			}
 
-			i915_gem_object_unpin_pages(obj);
+			i915_mm_pages_unpin(mm);
 			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 			i915_gem_object_put(obj);
 		}
@@ -1301,16 +1319,19 @@ static int igt_ppgtt_internal_huge(void *arg)
 
 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
 		unsigned int size = sizes[i];
+		struct i915_mm_pages *mm;
 
 		obj = i915_gem_object_create_internal(i915, size);
 		if (IS_ERR(obj))
 			return PTR_ERR(obj);
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
+			err = PTR_ERR(mm);
 			goto out_put;
+		}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
+		if (mm->page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
 			pr_info("internal unable to allocate huge-page(s) with size=%u\n",
 				size);
 			goto out_unpin;
@@ -1323,7 +1344,7 @@ static int igt_ppgtt_internal_huge(void *arg)
 			goto out_unpin;
 		}
 
-		i915_gem_object_unpin_pages(obj);
+		i915_mm_pages_unpin(mm);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		i915_gem_object_put(obj);
 	}
@@ -1370,16 +1391,19 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 
 	for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
 		unsigned int size = sizes[i];
+		struct i915_mm_pages *mm;
 
 		obj = i915_gem_object_create_shmem(i915, size);
 		if (IS_ERR(obj))
 			return PTR_ERR(obj);
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err)
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
+			err = PTR_ERR(mm);
 			goto out_put;
+		}
 
-		if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+		if (mm->page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
 			pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n",
 				size);
 			goto out_unpin;
@@ -1392,7 +1416,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
 			goto out_unpin;
 		}
 
-		i915_gem_object_unpin_pages(obj);
+		i915_mm_pages_unpin(mm);
 		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
 		i915_gem_object_put(obj);
 	}
@@ -1636,7 +1660,7 @@ static int igt_shrink_thp(void *arg)
 	if (err)
 		goto out_close;
 
-	if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+	if (obj->mm->page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
 		pr_info("failed to allocate THP, finishing test early\n");
 		goto out_unpin;
 	}
@@ -1668,7 +1692,7 @@ static int igt_shrink_thp(void *arg)
 		goto out_close;
 	}
 
-	if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+	if (obj->mm->page_sizes.sg || obj->mm->page_sizes.phys) {
 		pr_err("residual page-size bits left\n");
 		err = -EINVAL;
 		goto out_close;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 275c28926067..79ab58276449 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -59,8 +59,8 @@ static int igt_client_fill(void *arg)
 		if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 			obj->cache_dirty = true;
 
-		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
-						       &obj->mm.page_sizes,
+		err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm->pages,
+						       &obj->mm->page_sizes,
 						       val);
 		if (err)
 			goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index d85d1ce273ca..b571546c3a4f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -163,6 +163,7 @@ static int igt_dmabuf_import_ownership(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
 	struct dma_buf *dmabuf;
+	struct i915_mm_pages *p;
 	void *ptr;
 	int err;
 
@@ -190,14 +191,15 @@ static int igt_dmabuf_import_ownership(void *arg)
 
 	dma_buf_put(dmabuf);
 
-	err = i915_gem_object_pin_pages(obj);
+	p = i915_gem_object_pin_pages(obj);
 	if (err) {
+		err = PTR_ERR(p);
 		pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
 		goto out_obj;
 	}
 
 	err = 0;
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 out_obj:
 	i915_gem_object_put(obj);
 	return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 097181c55947..5934817be1d7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -181,6 +181,7 @@ static int igt_partial_tiling(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
 	intel_wakeref_t wakeref;
+	struct i915_mm_pages *mm;
 	int tiling;
 	int err;
 
@@ -198,8 +199,9 @@ static int igt_partial_tiling(void *arg)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err) {
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm)) {
+		err = PTR_ERR(mm);
 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
 		       nreal, obj->base.size / PAGE_SIZE, err);
 		goto out;
@@ -319,7 +321,7 @@ next_tiling: ;
 out_unlock:
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	mutex_unlock(&i915->drm.struct_mutex);
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 out:
 	i915_gem_object_put(obj);
 	return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 2b6db6f799de..e80ff0b8cd14 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -36,6 +36,7 @@ static int igt_gem_huge(void *arg)
 	const unsigned int nreal = 509; /* just to be awkward */
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
+	struct i915_mm_pages *mm;
 	unsigned int n;
 	int err;
 
@@ -47,8 +48,9 @@ static int igt_gem_huge(void *arg)
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err) {
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm)) {
+		err = PTR_ERR(mm);
 		pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
 		       nreal, obj->base.size / PAGE_SIZE, err);
 		goto out;
@@ -65,7 +67,7 @@ static int igt_gem_huge(void *arg)
 	}
 
 out_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 out:
 	i915_gem_object_put(obj);
 	return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 94a15e3f6db8..bb5ab4b469b9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -39,7 +39,7 @@ static int mock_phys_object(void *arg)
 		goto out_obj;
 	}
 
-	if (!atomic_read(&obj->mm.pages_pin_count)) {
+	if (!atomic_read(&obj->mm->pin_count)) {
 		pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
 		err = -EINVAL;
 		goto out_obj;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index b0a6bc89cef2..9f6fdaa73297 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -119,7 +119,7 @@ static int __context_pin_state(struct i915_vma *vma)
 	 * it cannot reclaim the object until we release it.
 	 */
 	vma->obj->pin_global++;
-	vma->obj->mm.dirty = true;
+	vma->obj->mm->dirty = true;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 32688ca379ef..828af36e1876 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -50,7 +50,7 @@ static int pool_active(struct i915_active *ref)
 		reservation_object_unlock(resv);
 	}
 
-	return i915_gem_object_pin_pages(node->obj);
+	return PTR_ERR_OR_ZERO(i915_gem_object_pin_pages(node->obj));
 }
 
 static void pool_retire(struct i915_active *ref)
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 5afa99fa2319..01fa10b2d8ad 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -511,7 +511,7 @@ static struct page *status_page(struct intel_engine_cs *engine)
 	struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-	return sg_page(obj->mm.pages->sgl);
+	return sg_page(obj->mm->pages->sgl);
 }
 
 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index d54113697745..12993eb1de99 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -21,7 +21,7 @@ static struct page *hwsp_page(struct intel_timeline *tl)
 	struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-	return sg_page(obj->mm.pages->sgl);
+	return sg_page(obj->mm->pages->sgl);
 }
 
 static unsigned long hwsp_cacheline(struct intel_timeline *tl)
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a28bcd2d7c09..f838ce96d1d7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1284,7 +1284,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
 
 		if (*cmd == MI_BATCH_BUFFER_END) {
 			if (needs_clflush_after) {
-				void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping);
+				void *ptr = page_mask_bits(shadow_batch_obj->mm->mapping);
 				drm_clflush_virt_range(ptr,
 						       (void *)(cmd + 1) - ptr);
 			}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d753db157ef1..ed49b9121bb9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -97,7 +97,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)
 
 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
 {
-	return obj->mm.mapping ? 'M' : ' ';
+	return obj->mm->mapping ? 'M' : ' ';
 }
 
 static const char *
@@ -148,8 +148,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->read_domains,
 		   obj->write_domain,
 		   i915_cache_level_str(dev_priv, obj->cache_level),
-		   obj->mm.dirty ? " dirty" : "",
-		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
+		   obj->mm->dirty ? " dirty" : "",
+		   obj->mm->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9c359d437bf5..5f466191c0ed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -448,6 +448,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_pread *args = data;
 	struct drm_i915_gem_object *obj;
+	struct i915_mm_pages *mm;
 	int ret;
 
 	if (args->size == 0)
@@ -475,15 +476,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm)) {
+		ret = PTR_ERR(mm);
 		goto out;
+	}
 
 	ret = i915_gem_shmem_pread(obj, args);
 	if (ret == -EFAULT || ret == -ENODEV)
 		ret = i915_gem_gtt_pread(obj, args);
 
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 out:
 	i915_gem_object_put(obj);
 	return ret;
@@ -752,6 +755,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_i915_gem_pwrite *args = data;
 	struct drm_i915_gem_object *obj;
+	struct i915_mm_pages *mm;
 	int ret;
 
 	if (args->size == 0)
@@ -791,9 +795,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm)) {
+		ret = PTR_ERR(mm);
 		goto err;
+	}
 
 	ret = -EFAULT;
 	/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -817,7 +823,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 			ret = i915_gem_shmem_pwrite(obj, args);
 	}
 
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 err:
 	i915_gem_object_put(obj);
 	return ret;
@@ -1069,6 +1075,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_private *i915 = to_i915(dev);
 	struct drm_i915_gem_madvise *args = data;
 	struct drm_i915_gem_object *obj;
+	struct i915_mm_pages *mm = obj->mm;
 	int err;
 
 	switch (args->madv) {
@@ -1083,29 +1090,29 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 	if (!obj)
 		return -ENOENT;
 
-	err = mutex_lock_interruptible(&obj->mm.lock);
+	err = mutex_lock_interruptible(&mm->lock);
 	if (err)
 		goto out;
 
-	if (i915_gem_object_has_pages(obj) &&
+	if (mm->pages &&
 	    i915_gem_object_is_tiled(obj) &&
 	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-		if (obj->mm.madv == I915_MADV_WILLNEED) {
-			GEM_BUG_ON(!obj->mm.quirked);
-			__i915_gem_object_unpin_pages(obj);
-			obj->mm.quirked = false;
+		if (mm->madv == I915_MADV_WILLNEED) {
+			GEM_BUG_ON(!mm->quirked);
+			i915_mm_pages_unpin(mm);
+			mm->quirked = false;
 		}
 		if (args->madv == I915_MADV_WILLNEED) {
-			GEM_BUG_ON(obj->mm.quirked);
-			__i915_gem_object_pin_pages(obj);
-			obj->mm.quirked = true;
+			GEM_BUG_ON(mm->quirked);
+			i915_mm_pages_pin(mm);
+			mm->quirked = true;
 		}
 	}
 
-	if (obj->mm.madv != __I915_MADV_PURGED)
-		obj->mm.madv = args->madv;
+	if (mm->madv != __I915_MADV_PURGED)
+		mm->madv = args->madv;
 
-	if (i915_gem_object_has_pages(obj)) {
+	if (mm->pages) {
 		struct list_head *list;
 
 		if (i915_gem_object_is_shrinkable(obj)) {
@@ -1113,23 +1120,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 
 			spin_lock_irqsave(&i915->mm.obj_lock, flags);
 
-			if (obj->mm.madv != I915_MADV_WILLNEED)
+			if (obj->mm->madv != I915_MADV_WILLNEED)
 				list = &i915->mm.purge_list;
 			else
 				list = &i915->mm.shrink_list;
-			list_move_tail(&obj->mm.link, list);
+			list_move_tail(&obj->mm_link, list);
 
 			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 		}
 	}
 
 	/* if the object is no longer attached, discard its backing storage */
-	if (obj->mm.madv == I915_MADV_DONTNEED &&
-	    !i915_gem_object_has_pages(obj))
+	if (mm->madv == I915_MADV_DONTNEED && !mm->pages)
 		i915_gem_object_truncate(obj);
 
-	args->retained = obj->mm.madv != __I915_MADV_PURGED;
-	mutex_unlock(&obj->mm.lock);
+	args->retained = mm->madv != __I915_MADV_PURGED;
+	mutex_unlock(&mm->lock);
 
 out:
 	i915_gem_object_put(obj);
@@ -1726,7 +1732,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
 	i915_gem_shrink(i915, -1UL, NULL, ~0);
 	i915_gem_drain_freed_objects(i915);
 
-	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+	list_for_each_entry(obj, &i915->mm.shrink_list, mm_link) {
 		i915_gem_object_lock(obj);
 		WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
 		i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 6a33a0bb97a9..04c691bfc069 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -768,17 +768,18 @@ void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
 				  struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	int i;
 
-	if (obj->bit_17 == NULL)
+	if (!mm->bit_17)
 		return;
 
 	i = 0;
 	for_each_sgt_page(page, sgt_iter, pages) {
 		char new_bit_17 = page_to_phys(page) >> 17;
-		if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
+		if ((new_bit_17 & 0x1) != (test_bit(i, mm->bit_17) != 0)) {
 			i915_gem_swizzle_page(page);
 			set_page_dirty(page);
 		}
@@ -800,13 +801,14 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
 				    struct sg_table *pages)
 {
 	const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
+	struct i915_mm_pages *mm = obj->mm;
 	struct sgt_iter sgt_iter;
 	struct page *page;
 	int i;
 
-	if (obj->bit_17 == NULL) {
-		obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
-		if (obj->bit_17 == NULL) {
+	if (!mm->bit_17) {
+		mm->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
+		if (!mm->bit_17) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
 			return;
@@ -814,12 +816,11 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
 	}
 
 	i = 0;
-
 	for_each_sgt_page(page, sgt_iter, pages) {
-		if (page_to_phys(page) & (1 << 17))
-			__set_bit(i, obj->bit_17);
+		if (page_to_phys(page) & BIT(17))
+			__set_bit(i, mm->bit_17);
 		else
-			__clear_bit(i, obj->bit_17);
+			__clear_bit(i, mm->bit_17);
 		i++;
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 697d47fbe6e3..207645d07428 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -175,9 +175,9 @@ static int ppgtt_set_pages(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->pages);
 
-	vma->pages = vma->obj->mm.pages;
+	vma->pages = vma->obj->mm->pages;
 
-	vma->page_sizes = vma->obj->mm.page_sizes;
+	vma->page_sizes = vma->obj->mm->page_sizes;
 
 	return 0;
 }
@@ -186,7 +186,7 @@ static void clear_pages(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!vma->pages);
 
-	if (vma->pages != vma->obj->mm.pages) {
+	if (vma->pages != vma->obj->mm->pages) {
 		sg_free_table(vma->pages);
 		kfree(vma->pages);
 	}
@@ -2103,7 +2103,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 		 * try again - if there are no more pages to remove from
 		 * the DMA remapper, i915_gem_shrink will return 0.
 		 */
-		GEM_BUG_ON(obj->mm.pages == pages);
+		GEM_BUG_ON(obj->mm->pages == pages);
 	} while (i915_gem_shrink(to_i915(obj->base.dev),
 				 obj->base.size >> PAGE_SHIFT, NULL,
 				 I915_SHRINK_BOUND |
@@ -2497,7 +2497,7 @@ static int ggtt_set_pages(struct i915_vma *vma)
 	if (ret)
 		return ret;
 
-	vma->page_sizes = vma->obj->mm.page_sizes;
+	vma->page_sizes = vma->obj->mm->page_sizes;
 
 	return 0;
 }
@@ -3508,7 +3508,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 		GEM_BUG_ON(vma->ggtt_view.type);
 		/* fall through */
 	case I915_GGTT_VIEW_NORMAL:
-		vma->pages = vma->obj->mm.pages;
+		vma->pages = vma->obj->mm->pages;
 		return 0;
 
 	case I915_GGTT_VIEW_ROTATED:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5489cd879315..e6a9d4c3b367 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1059,8 +1059,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->write_domain = obj->write_domain;
 	err->fence_reg = vma->fence ? vma->fence->id : -1;
 	err->tiling = i915_gem_object_get_tiling(obj);
-	err->dirty = obj->mm.dirty;
-	err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
+	err->dirty = obj->mm->dirty;
+	err->purgeable = obj->mm->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->cache_level = obj->cache_level;
 }
@@ -1392,7 +1392,7 @@ capture_object(struct drm_i915_private *dev_priv,
 		struct i915_vma fake = {
 			.node = { .start = U64_MAX, .size = obj->base.size },
 			.size = obj->base.size,
-			.pages = obj->mm.pages,
+			.pages = obj->mm->pages,
 			.obj = obj,
 		};
 
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 29c1d09a5e9e..6a51a15627bd 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -517,7 +517,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
 	 * assume that no else is pinning the pages, but as a rough assertion
 	 * that we will not run into problems later, this will do!)
 	 */
-	GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
+	GEM_BUG_ON(atomic_read(&obj->mm->pin_count) < atomic_read(&obj->bind_count));
 }
 
 /**
@@ -539,6 +539,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_private *dev_priv = vma->vm->i915;
 	unsigned int cache_level;
+	struct i915_mm_pages *mm;
 	u64 start, end;
 	int ret;
 
@@ -580,9 +581,9 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	}
 
 	if (vma->obj) {
-		ret = i915_gem_object_pin_pages(vma->obj);
-		if (ret)
-			return ret;
+		mm = i915_gem_object_pin_pages(vma->obj);
+		if (IS_ERR(mm))
+			return PTR_ERR(mm);
 
 		cache_level = vma->obj->cache_level;
 	} else {
@@ -668,8 +669,8 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 err_clear:
 	vma->ops->clear_pages(vma);
 err_unpin:
-	if (vma->obj)
-		i915_gem_object_unpin_pages(vma->obj);
+	if (mm)
+		i915_mm_pages_unpin(mm);
 	return ret;
 }
 
@@ -927,7 +928,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
 		obj->read_domains = 0;
 	}
 	obj->read_domains |= I915_GEM_GPU_DOMAINS;
-	obj->mm.dirty = true;
+	obj->mm->dirty = true;
 
 	export_fence(vma, rq, flags);
 
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index db1e0daca7db..e2709b50001e 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -155,7 +155,7 @@ static void guc_xfer_rsa(struct intel_guc *guc)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(guc);
 	struct intel_uc_fw *fw = &guc->fw;
-	struct sg_table *pages = fw->obj->mm.pages;
+	struct sg_table *pages = fw->obj->mm->pages;
 	u32 rsa[UOS_RSA_SCRATCH_COUNT];
 	int i;
 
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 05cbf8338f53..12d1c484c739 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -120,7 +120,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
 static void huc_xfer_rsa(struct intel_huc *huc)
 {
 	struct intel_uc_fw *fw = &huc->fw;
-	struct sg_table *pages = fw->obj->mm.pages;
+	struct sg_table *pages = fw->obj->mm->pages;
 
 	/*
 	 * HuC firmware image is outside GuC accessible range.
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index f342ddd47df8..3ffdab38c318 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -199,7 +199,7 @@ static void intel_uc_fw_ggtt_bind(struct intel_uc_fw *uc_fw)
 	struct i915_vma dummy = {
 		.node.start = intel_uc_fw_ggtt_offset(uc_fw),
 		.node.size = obj->base.size,
-		.pages = obj->mm.pages,
+		.pages = obj->mm->pages,
 		.vm = &ggtt->vm,
 	};
 
@@ -284,7 +284,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
 	if (uc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
 		return -ENOEXEC;
 
-	err = i915_gem_object_pin_pages(uc_fw->obj);
+	err = PTR_ERR_OR_ZERO(i915_gem_object_pin_pages(uc_fw->obj));
 	if (err)
 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
 				 intel_uc_fw_type_repr(uc_fw->type), err);
diff --git a/drivers/gpu/drm/i915/mm/Makefile b/drivers/gpu/drm/i915/mm/Makefile
new file mode 100644
index 000000000000..eec6961015a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/Makefile
@@ -0,0 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+include $(src)/Makefile.header-test
diff --git a/drivers/gpu/drm/i915/mm/Makefile.header-test b/drivers/gpu/drm/i915/mm/Makefile.header-test
new file mode 100644
index 000000000000..61e06cbb4b32
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/Makefile.header-test
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := $(notdir $(wildcard $(src)/*.h))
+
+quiet_cmd_header_test = HDRTEST $@
+      cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+	$(call cmd,header_test)
+
+extra-$(CONFIG_DRM_I915_WERROR) += \
+	$(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages.c b/drivers/gpu/drm/i915/mm/i915_mm_pages.c
new file mode 100644
index 000000000000..7da5124047c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages.c
@@ -0,0 +1,54 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_mm_pages.h"
+
+struct i915_mm_pages *i915_mm_pages_create(void)
+{
+	struct i915_mm_pages *mm;
+
+	mm = kmalloc(sizeof(*mm), GFP_KERNEL);
+	if (!mm)
+		return NULL;
+
+	kref_init(&mm->kref);
+	mutex_init(&mm->lock);
+
+	INIT_RADIX_TREE(&mm->get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+	mutex_init(&mm->get_page.lock);
+
+	mm->madv = I915_MADV_WILLNEED;
+
+	return mm;
+}
+
+void __i915_mm_pages_reset_page_iter(struct i915_mm_pages *mm)
+{
+	struct radix_tree_iter iter;
+	void __rcu **slot;
+
+	rcu_read_lock();
+	radix_tree_for_each_slot(slot, &mm->get_page.radix, &iter, 0)
+		radix_tree_delete(&mm->get_page.radix, iter.index);
+	rcu_read_unlock();
+}
+
+void i915_mm_pages_free(struct kref *kref)
+{
+	struct i915_mm_pages *mm = container_of(kref, typeof(*mm), kref);
+
+	bitmap_free(mm->bit_17);
+
+	mutex_destroy(&mm->get_page.lock);
+	mutex_destroy(&mm->lock);
+
+	kfree(mm);
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages.h b/drivers/gpu/drm/i915/mm/i915_mm_pages.h
new file mode 100644
index 000000000000..bb2dc057465d
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages.h
@@ -0,0 +1,49 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef __I915_MM_PAGES_H__
+#define __I915_MM_PAGES_H__
+
+#include "i915_gem.h"
+#include "i915_mm_pages_types.h"
+
+struct i915_mm_pages *i915_mm_pages_create(void);
+void i915_mm_pages_free(struct kref *kref);
+
+void __i915_mm_pages_reset_page_iter(struct i915_mm_pages *mm);
+
+static inline struct i915_mm_pages *i915_mm_pages_get(struct i915_mm_pages *p)
+{
+	kref_get(&p->kref);
+	return p;
+}
+
+static inline void i915_mm_pages_put(struct i915_mm_pages *p)
+{
+	kref_put(&p->kref, i915_mm_pages_free);
+}
+
+static inline void
+i915_mm_pages_pin(struct i915_mm_pages *p)
+{
+	atomic_inc(&p->pin_count);
+	GEM_BUG_ON(!atomic_read(&p->pin_count));
+}
+
+static inline bool
+i915_mm_pages_is_pinned(struct i915_mm_pages *p)
+{
+	return atomic_read(&p->pin_count);
+}
+
+static inline void
+i915_mm_pages_unpin(struct i915_mm_pages *p)
+{
+	GEM_BUG_ON(!atomic_read(&p->pin_count));
+	atomic_dec(&p->pin_count);
+}
+
+#endif /* __I915_MM_PAGES_H__ */
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h b/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h
new file mode 100644
index 000000000000..c4fdfc84e3cc
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h
@@ -0,0 +1,90 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef __I915_MM_PAGES_TYPES_H__
+#define __I915_MM_PAGES_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/scatterlist.h>
+
+#include "i915_selftest.h"
+
+struct i915_mm_pages {
+	struct kref kref;
+
+	struct mutex lock; /* protects the pages and their use */
+	atomic_t pin_count;
+
+	struct sg_table *pages;
+	void *mapping;
+
+	/* TODO: whack some of this into the error state */
+	struct i915_page_sizes {
+		/**
+		 * The sg mask of the pages sg_table. i.e the mask of
+		 * of the lengths for each sg entry.
+		 */
+		unsigned int phys;
+
+		/**
+		 * The gtt page sizes we are allowed to use given the
+		 * sg mask and the supported page sizes. This will
+		 * express the smallest unit we can use for the whole
+		 * object, as well as the larger sizes we may be able
+		 * to use opportunistically.
+		 */
+		unsigned int sg;
+
+		/**
+		 * The actual gtt page size usage. Since we can have
+		 * multiple vma associated with this object we need to
+		 * prevent any trampling of state, hence a copy of this
+		 * struct also lives in each vma, therefore the gtt
+		 * value here should only be read/write through the vma.
+		 */
+		unsigned int gtt;
+	} page_sizes;
+
+	I915_SELFTEST_DECLARE(unsigned int page_mask);
+
+	struct i915_page_iter {
+		struct scatterlist *sg_pos;
+		unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+		struct radix_tree_root radix;
+		struct mutex lock; /* protects this cache */
+	} get_page;
+
+	/**
+	 * Advice: are the backing pages purgeable?
+	 */
+	unsigned int madv:2;
+
+	/**
+	 * This is set if the object has been written to since the
+	 * pages were last acquired.
+	 */
+	bool dirty:1;
+
+	/**
+	 * This is set if the object has been pinned due to unknown
+	 * swizzling.
+	 */
+	bool quirked:1;
+
+	/** Record of address bit 17 of each page at last unbind. */
+	unsigned long *bit_17;
+};
+
+enum i915_mm_subclass { /* lockdep subclass for mm->lock/struct_mutex */
+	I915_MM_NORMAL = 0,
+	I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
+};
+
+#endif /* __I915_MM_PAGES_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b6449d0a8c17..7ca383c160b9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
 		      struct list_head *objects)
 {
 	/* quirk is only for live tiled objects, use it to declare ownership */
-	GEM_BUG_ON(obj->mm.quirked);
-	obj->mm.quirked = true;
+	GEM_BUG_ON(obj->mm->quirked);
+	obj->mm->quirked = true;
 	list_add(&obj->st_link, objects);
 }
 
@@ -72,7 +72,7 @@ static int populate_ggtt(struct drm_i915_private *i915,
 	bound = 0;
 	unbound = 0;
 	list_for_each_entry(obj, objects, st_link) {
-		GEM_BUG_ON(!obj->mm.quirked);
+		GEM_BUG_ON(!obj->mm->quirked);
 
 		if (atomic_read(&obj->bind_count))
 			bound++;
@@ -108,7 +108,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
 
 	mutex_lock(&ggtt->vm.mutex);
 	list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link)
-		if (vma->obj->mm.quirked)
+		if (vma->obj->mm->quirked)
 			i915_vma_unpin(vma);
 	mutex_unlock(&ggtt->vm.mutex);
 }
@@ -119,8 +119,8 @@ static void cleanup_objects(struct drm_i915_private *i915,
 	struct drm_i915_gem_object *obj, *on;
 
 	list_for_each_entry_safe(obj, on, list, st_link) {
-		GEM_BUG_ON(!obj->mm.quirked);
-		obj->mm.quirked = false;
+		GEM_BUG_ON(!obj->mm->quirked);
+		obj->mm->quirked = false;
 		i915_gem_object_put(obj);
 	}
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 4ff9dc615c80..bf185e91c278 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -58,6 +58,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
 {
 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
 #define PFN_BIAS 0x1000
+	struct i915_mm_pages *mm = obj->mm;
 	struct sg_table *pages;
 	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
@@ -88,7 +89,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
 	}
 	GEM_BUG_ON(rem);
 
-	obj->mm.madv = I915_MADV_DONTNEED;
+	mm->madv = I915_MADV_DONTNEED;
 
 	__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
 
@@ -99,9 +100,11 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
 static void fake_put_pages(struct drm_i915_gem_object *obj,
 			   struct sg_table *pages)
 {
+	struct i915_mm_pages *mm = obj->mm;
+
 	fake_free_pages(obj, pages);
-	obj->mm.dirty = false;
-	obj->mm.madv = I915_MADV_WILLNEED;
+	mm->dirty = false;
+	mm->madv = I915_MADV_WILLNEED;
 }
 
 static const struct drm_i915_gem_object_ops fake_ops = {
@@ -114,6 +117,7 @@ static struct drm_i915_gem_object *
 fake_dma_object(struct drm_i915_private *i915, u64 size)
 {
 	struct drm_i915_gem_object *obj;
+	struct i915_mm_pages *mm;
 
 	GEM_BUG_ON(!size);
 	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -132,10 +136,11 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
 	obj->cache_level = I915_CACHE_NONE;
 
 	/* Preallocate the "backing storage" */
-	if (i915_gem_object_pin_pages(obj))
+	mm = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(mm))
 		goto err_obj;
 
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(mm);
 	return obj;
 
 err_obj:
@@ -227,6 +232,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 		I915_RND_SUBSTATE(prng, seed_prng);
 		struct drm_i915_gem_object *obj;
 		unsigned int *order, count, n;
+		struct i915_mm_pages *mm;
 		u64 hole_size;
 
 		hole_size = (hole_end - hole_start) >> size;
@@ -265,7 +271,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 
 		GEM_BUG_ON(obj->base.size != BIT_ULL(size));
 
-		if (i915_gem_object_pin_pages(obj)) {
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
 			i915_gem_object_put(obj);
 			kfree(order);
 			break;
@@ -288,7 +295,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			    vm->allocate_va_range(vm, addr, BIT_ULL(size)))
 				break;
 
-			mock_vma.pages = obj->mm.pages;
+			mock_vma.pages = obj->mm->pages;
 			mock_vma.node.size = BIT_ULL(size);
 			mock_vma.node.start = addr;
 
@@ -306,7 +313,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
 			vm->clear_range(vm, addr, BIT_ULL(size));
 		}
 
-		i915_gem_object_unpin_pages(obj);
+		i915_mm_pages_unpin(mm);
 		i915_gem_object_put(obj);
 
 		kfree(order);
@@ -1145,6 +1152,7 @@ static int igt_ggtt_page(void *arg)
 	intel_wakeref_t wakeref;
 	struct drm_mm_node tmp;
 	unsigned int *order, n;
+	struct i915_mm_pages *p;
 	int err;
 
 	mutex_lock(&i915->drm.struct_mutex);
@@ -1155,9 +1163,11 @@ static int igt_ggtt_page(void *arg)
 		goto out_unlock;
 	}
 
-	err = i915_gem_object_pin_pages(obj);
-	if (err)
+	p = i915_gem_object_pin_pages(obj);
+	if (IS_ERR(p)) {
+		err = PTR_ERR(p);
 		goto out_free;
+	}
 
 	memset(&tmp, 0, sizeof(tmp));
 	err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
@@ -1218,7 +1228,7 @@ static int igt_ggtt_page(void *arg)
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 	drm_mm_remove_node(&tmp);
 out_unpin:
-	i915_gem_object_unpin_pages(obj);
+	i915_mm_pages_unpin(p);
 out_free:
 	i915_gem_object_put(obj);
 out_unlock:
@@ -1233,7 +1243,7 @@ static void track_vma_bind(struct i915_vma *vma)
 	atomic_inc(&obj->bind_count); /* track for eviction later */
 	__i915_gem_object_pin_pages(obj);
 
-	vma->pages = obj->mm.pages;
+	vma->pages = obj->mm->pages;
 
 	mutex_lock(&vma->vm->mutex);
 	list_add_tail(&vma->vm_link, &vma->vm->bound_list);
@@ -1306,6 +1316,7 @@ static int igt_gtt_reserve(void *arg)
 	for (total = 0;
 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
 	     total += 2 * I915_GTT_PAGE_SIZE) {
+		struct i915_mm_pages *p;
 		struct i915_vma *vma;
 
 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1315,8 +1326,9 @@ static int igt_gtt_reserve(void *arg)
 			goto out;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err) {
+		p = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(p)) {
+			err = PTR_ERR(p);
 			i915_gem_object_put(obj);
 			goto out;
 		}
@@ -1356,6 +1368,7 @@ static int igt_gtt_reserve(void *arg)
 	for (total = I915_GTT_PAGE_SIZE;
 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
 	     total += 2 * I915_GTT_PAGE_SIZE) {
+		struct i915_mm_pages *p;
 		struct i915_vma *vma;
 
 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1365,8 +1378,9 @@ static int igt_gtt_reserve(void *arg)
 			goto out;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err) {
+		p = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(p)) {
+			err = PTR_ERR(p);
 			i915_gem_object_put(obj);
 			goto out;
 		}
@@ -1513,6 +1527,7 @@ static int igt_gtt_insert(void *arg)
 	for (total = 0;
 	     total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
 	     total += I915_GTT_PAGE_SIZE) {
+		struct i915_mm_pages *p;
 		struct i915_vma *vma;
 
 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1522,8 +1537,9 @@ static int igt_gtt_insert(void *arg)
 			goto out;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err) {
+		p = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(p)) {
+			err = PTR_ERR(p);
 			i915_gem_object_put(obj);
 			goto out;
 		}
@@ -1618,6 +1634,7 @@ static int igt_gtt_insert(void *arg)
 	for (total = 0;
 	     total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
 	     total += 2 * I915_GTT_PAGE_SIZE) {
+		struct i915_mm_pages *mm;
 		struct i915_vma *vma;
 
 		obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1627,8 +1644,9 @@ static int igt_gtt_insert(void *arg)
 			goto out;
 		}
 
-		err = i915_gem_object_pin_pages(obj);
-		if (err) {
+		mm = i915_gem_object_pin_pages(obj);
+		if (IS_ERR(mm)) {
+			err = PTR_ERR(mm);
 			i915_gem_object_put(obj);
 			goto out;
 		}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index fbc79b14823a..65c37c92b253 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -567,7 +567,7 @@ static int igt_vma_rotate_remap(void *arg)
 						goto out_object;
 					}
 
-					if (vma->pages == obj->mm.pages) {
+					if (vma->pages == obj->mm->pages) {
 						pr_err("VMA using unrotated object pages!\n");
 						err = -EINVAL;
 						goto out_object;
@@ -665,7 +665,7 @@ static bool assert_pin(struct i915_vma *vma,
 			ok = false;
 		}
 
-		if (vma->pages == vma->obj->mm.pages) {
+		if (vma->pages == vma->obj->mm->pages) {
 			pr_err("(%s) VMA using original object pages!\n",
 			       name);
 			ok = false;
@@ -677,7 +677,7 @@ static bool assert_pin(struct i915_vma *vma,
 			ok = false;
 		}
 
-		if (vma->pages != vma->obj->mm.pages) {
+		if (vma->pages != vma->obj->mm->pages) {
 			pr_err("VMA not using object pages!\n");
 			ok = false;
 		}
-- 
2.20.1



More information about the Intel-gfx-trybot mailing list