[Intel-gfx] [PATCH 16/42] drm/i915: Refactor object page API

John Harrison John.C.Harrison at Intel.com
Mon Oct 10 10:54:39 UTC 2016


On 07/10/2016 10:46, Chris Wilson wrote:
> The plan is to make obtaining the backing storage for the object avoid
> struct_mutex (i.e. use its own locking). The first step is to update the
> API so that normal users only call pin/unpin whilst working on the
> backing storage.
>
> Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/i915_cmd_parser.c       |   2 +-
>   drivers/gpu/drm/i915/i915_debugfs.c          |  17 +--
>   drivers/gpu/drm/i915/i915_drv.h              |  89 ++++++++----
>   drivers/gpu/drm/i915/i915_gem.c              | 207 +++++++++++++--------------
>   drivers/gpu/drm/i915/i915_gem_batch_pool.c   |   3 +-
>   drivers/gpu/drm/i915/i915_gem_dmabuf.c       |  14 +-
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c   |   2 +-
>   drivers/gpu/drm/i915/i915_gem_fence.c        |   4 +-
>   drivers/gpu/drm/i915/i915_gem_gtt.c          |  10 +-
>   drivers/gpu/drm/i915/i915_gem_internal.c     |  19 +--
>   drivers/gpu/drm/i915/i915_gem_render_state.c |   2 +-
>   drivers/gpu/drm/i915/i915_gem_shrinker.c     |  10 +-
>   drivers/gpu/drm/i915/i915_gem_stolen.c       |  24 ++--
>   drivers/gpu/drm/i915/i915_gem_tiling.c       |   8 +-
>   drivers/gpu/drm/i915/i915_gem_userptr.c      |  30 ++--
>   drivers/gpu/drm/i915/i915_gpu_error.c        |   4 +-
>   drivers/gpu/drm/i915/intel_lrc.c             |   6 +-
>   17 files changed, 234 insertions(+), 217 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
> index 70980f82a15b..8d20020cb9f9 100644
> --- a/drivers/gpu/drm/i915/i915_cmd_parser.c
> +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
> @@ -1290,7 +1290,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
>   	}
>   
>   	if (ret == 0 && needs_clflush_after)
> -		drm_clflush_virt_range(shadow_batch_obj->mapping, batch_len);
> +		drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
>   	i915_gem_object_unpin_map(shadow_batch_obj);
>   
>   	return ret;
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index e4b5ba771bea..b807ddf65e04 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -112,7 +112,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)
>   
>   static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
>   {
> -	return obj->mapping ? 'M' : ' ';
> +	return obj->mm.mapping ? 'M' : ' ';
>   }
>   
>   static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
> @@ -158,8 +158,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
>   		   i915_gem_active_get_seqno(&obj->last_write,
>   					     &obj->base.dev->struct_mutex),
>   		   i915_cache_level_str(dev_priv, obj->cache_level),
> -		   obj->dirty ? " dirty" : "",
> -		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
> +		   obj->mm.dirty ? " dirty" : "",
> +		   obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
>   	if (obj->base.name)
>   		seq_printf(m, " (name: %d)", obj->base.name);
>   	list_for_each_entry(vma, &obj->vma_list, obj_link) {
> @@ -411,12 +411,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>   		size += obj->base.size;
>   		++count;
>   
> -		if (obj->madv == I915_MADV_DONTNEED) {
> +		if (obj->mm.madv == I915_MADV_DONTNEED) {
>   			purgeable_size += obj->base.size;
>   			++purgeable_count;
>   		}
>   
> -		if (obj->mapping) {
> +		if (obj->mm.mapping) {
>   			mapped_count++;
>   			mapped_size += obj->base.size;
>   		}
> @@ -433,12 +433,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
>   			++dpy_count;
>   		}
>   
> -		if (obj->madv == I915_MADV_DONTNEED) {
> +		if (obj->mm.madv == I915_MADV_DONTNEED) {
>   			purgeable_size += obj->base.size;
>   			++purgeable_count;
>   		}
>   
> -		if (obj->mapping) {
> +		if (obj->mm.mapping) {
>   			mapped_count++;
>   			mapped_size += obj->base.size;
>   		}
> @@ -2018,7 +2018,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
>   		seq_printf(m, "\tBound in GGTT at 0x%08x\n",
>   			   i915_ggtt_offset(vma));
>   
> -	if (i915_gem_object_get_pages(vma->obj)) {
> +	if (i915_gem_object_pin_pages(vma->obj)) {
>   		seq_puts(m, "\tFailed to get pages for context object\n\n");
Error message needs updating to match the function call change.

>   		return;
>   	}
> @@ -2037,6 +2037,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
>   		kunmap_atomic(reg_state);
>   	}
>   
> +	i915_gem_object_unpin_pages(vma->obj);
>   	seq_putc(m, '\n');
>   }
>   
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index a96b446d8db4..3c22d49005fe 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2238,17 +2238,6 @@ struct drm_i915_gem_object {
>   #define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
>   
>   	/**
> -	 * This is set if the object has been written to since last bound
> -	 * to the GTT
> -	 */
> -	unsigned int dirty:1;
> -
> -	/**
> -	 * Advice: are the backing pages purgeable?
> -	 */
> -	unsigned int madv:2;
> -
> -	/**
>   	 * Whether the current gtt mapping needs to be mappable (and isn't just
>   	 * mappable by accident). Track pin and fault separate for a more
>   	 * accurate mappable working set.
> @@ -2276,16 +2265,31 @@ struct drm_i915_gem_object {
>   	unsigned int bind_count;
>   	unsigned int pin_display;
>   
> -	struct sg_table *pages;
> -	int pages_pin_count;
> -	struct i915_gem_object_page_iter {
> -		struct scatterlist *sg_pos;
> -		unsigned long sg_idx;
> +	struct {
> +		unsigned int pages_pin_count;
> +
> +		struct sg_table *pages;
> +		void *mapping;
> +
> +		struct i915_gem_object_page_iter {
> +			struct scatterlist *sg_pos;
> +			unsigned long sg_idx;
>   
> -		struct radix_tree_root radix;
> -		struct mutex lock;
> -	} get_page;
> -	void *mapping;
> +			struct radix_tree_root radix;
> +			struct mutex lock;
> +		} get_page;
> +
> +		/**
> +		 * Advice: are the backing pages purgeable?
> +		 */
> +		unsigned int madv:2;
> +
> +		/**
> +		 * This is set if the object has been written to since the
> +		 * pages were last acquired.
> +		 */
> +		unsigned int dirty:1;
> +	} mm;
>   
>   	/** Breadcrumb of last rendering to the buffer.
>   	 * There can only be one writer, but we allow for multiple readers.
> @@ -3160,13 +3164,10 @@ void i915_vma_close(struct i915_vma *vma);
>   void i915_vma_destroy(struct i915_vma *vma);
>   
>   int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
> -int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
>   void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
>   void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
>   
> -int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
> -
> -static inline int __sg_page_count(struct scatterlist *sg)
> +static inline int __sg_page_count(const struct scatterlist *sg)
>   {
>   	return sg->length >> PAGE_SHIFT;
>   }
> @@ -3187,18 +3188,48 @@ dma_addr_t
>   i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
>   				unsigned long n);
>   
> -static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
> +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
> +
> +static inline int __must_check
> +i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
>   {
> -	BUG_ON(obj->pages == NULL);
> -	obj->pages_pin_count++;
> +	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	if (obj->mm.pages_pin_count++)
> +		return 0;
> +
> +	return __i915_gem_object_get_pages(obj);
> +}
> +
> +static inline void
> +__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
> +{
> +	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	GEM_BUG_ON(!obj->mm.pages);
> +	obj->mm.pages_pin_count++;
> +}
> +
> +static inline bool
> +i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
> +{
> +	return obj->mm.pages_pin_count;
> +}
> +
> +static inline void
> +__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
> +{
> +	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
> +	GEM_BUG_ON(!obj->mm.pages);
> +	obj->mm.pages_pin_count--;
>   }
>   
>   static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
>   {
> -	BUG_ON(obj->pages_pin_count == 0);
> -	obj->pages_pin_count--;
> +	__i915_gem_object_unpin_pages(obj);
>   }
Am I missing something or is the above identical to simply '#define 
i915_gem_object_unpin_pages __i915_gem_object_unpin_pages'? In which 
case, why bother with having two separate functions?

>   
> +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
> +
>   enum i915_map_type {
>   	I915_MAP_WB = 0,
>   	I915_MAP_WC,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index af7d51f16658..df774ddf62ae 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -216,7 +216,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
>   	sg_dma_address(sg) = obj->phys_handle->busaddr;
>   	sg_dma_len(sg) = obj->base.size;
>   
> -	obj->pages = st;
> +	obj->mm.pages = st;
>   	return 0;
>   }
>   
> @@ -225,7 +225,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
>   {
>   	int ret;
>   
> -	BUG_ON(obj->madv == __I915_MADV_PURGED);
> +	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
>   
>   	ret = i915_gem_object_set_to_cpu_domain(obj, true);
>   	if (WARN_ON(ret)) {
> @@ -235,10 +235,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
>   		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
>   	}
>   
> -	if (obj->madv == I915_MADV_DONTNEED)
> -		obj->dirty = 0;
> +	if (obj->mm.madv == I915_MADV_DONTNEED)
> +		obj->mm.dirty = false;
>   
> -	if (obj->dirty) {
> +	if (obj->mm.dirty) {
>   		struct address_space *mapping = obj->base.filp->f_mapping;
>   		char *vaddr = obj->phys_handle->vaddr;
>   		int i;
> @@ -257,22 +257,23 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
>   			kunmap_atomic(dst);
>   
>   			set_page_dirty(page);
> -			if (obj->madv == I915_MADV_WILLNEED)
> +			if (obj->mm.madv == I915_MADV_WILLNEED)
>   				mark_page_accessed(page);
>   			put_page(page);
>   			vaddr += PAGE_SIZE;
>   		}
> -		obj->dirty = 0;
> +		obj->mm.dirty = false;
>   	}
>   
> -	sg_free_table(obj->pages);
> -	kfree(obj->pages);
> +	sg_free_table(obj->mm.pages);
> +	kfree(obj->mm.pages);
>   }
>   
>   static void
>   i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
>   {
>   	drm_pci_free(obj->base.dev, obj->phys_handle);
> +	i915_gem_object_unpin_pages(obj);
>   }
>   
>   static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
> @@ -506,7 +507,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>   		return 0;
>   	}
>   
> -	if (obj->madv != I915_MADV_WILLNEED)
> +	if (obj->mm.madv != I915_MADV_WILLNEED)
>   		return -EFAULT;
>   
>   	if (obj->base.filp == NULL)
> @@ -516,7 +517,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>   	if (ret)
>   		return ret;
>   
> -	ret = i915_gem_object_put_pages(obj);
> +	ret = __i915_gem_object_put_pages(obj);
>   	if (ret)
>   		return ret;
>   
> @@ -528,7 +529,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>   	obj->phys_handle = phys;
>   	obj->ops = &i915_gem_phys_ops;
>   
> -	return i915_gem_object_get_pages(obj);
> +	return i915_gem_object_pin_pages(obj);
>   }
>   
>   static int
> @@ -724,12 +725,10 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
>   	if (ret)
>   		return ret;
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ret;
>   
> -	i915_gem_object_pin_pages(obj);
> -
>   	i915_gem_object_flush_gtt_write_domain(obj);
>   
>   	/* If we're not in the cpu read domain, set ourself into the gtt
> @@ -777,12 +776,10 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
>   	if (ret)
>   		return ret;
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ret;
>   
> -	i915_gem_object_pin_pages(obj);
> -
>   	i915_gem_object_flush_gtt_write_domain(obj);
>   
>   	/* If we're not in the cpu write domain, set ourself into the
> @@ -812,7 +809,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
>   		obj->cache_dirty = true;
>   
>   	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
> -	obj->dirty = 1;
> +	obj->mm.dirty = true;
>   	/* return with the pages pinned */
>   	return 0;
>   
> @@ -949,13 +946,11 @@ i915_gem_gtt_pread(struct drm_device *dev,
>   		if (ret)
>   			goto out;
>   
> -		ret = i915_gem_object_get_pages(obj);
> +		ret = i915_gem_object_pin_pages(obj);
>   		if (ret) {
>   			remove_mappable_node(&node);
>   			goto out;
>   		}
> -
> -		i915_gem_object_pin_pages(obj);
>   	}
>   
>   	ret = i915_gem_object_set_to_gtt_domain(obj, false);
> @@ -1062,7 +1057,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
>   	offset = args->offset;
>   	remain = args->size;
>   
> -	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
> +	for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
>   			 offset >> PAGE_SHIFT) {
>   		struct page *page = sg_page_iter_page(&sg_iter);
>   
> @@ -1254,13 +1249,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
>   		if (ret)
>   			goto out;
>   
> -		ret = i915_gem_object_get_pages(obj);
> +		ret = i915_gem_object_pin_pages(obj);
>   		if (ret) {
>   			remove_mappable_node(&node);
>   			goto out;
>   		}
> -
> -		i915_gem_object_pin_pages(obj);
>   	}
>   
>   	ret = i915_gem_object_set_to_gtt_domain(obj, true);
> @@ -1268,7 +1261,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
>   		goto out_unpin;
>   
>   	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
> -	obj->dirty = true;
> +	obj->mm.dirty = true;
>   
>   	user_data = u64_to_user_ptr(args->data_ptr);
>   	offset = args->offset;
> @@ -1439,7 +1432,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
>   	offset = args->offset;
>   	remain = args->size;
>   
> -	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
> +	for_each_sg_page(obj->mm.pages->sgl, &sg_iter, obj->mm.pages->nents,
>   			 offset >> PAGE_SHIFT) {
>   		struct page *page = sg_page_iter_page(&sg_iter);
>   		int partial_cacheline_write;
> @@ -2228,7 +2221,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
>   	 * backing pages, *now*.
>   	 */
>   	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
> -	obj->madv = __I915_MADV_PURGED;
> +	obj->mm.madv = __I915_MADV_PURGED;
>   }
>   
>   /* Try to discard unwanted pages */
> @@ -2237,7 +2230,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
>   {
>   	struct address_space *mapping;
>   
> -	switch (obj->madv) {
> +	switch (obj->mm.madv) {
>   	case I915_MADV_DONTNEED:
>   		i915_gem_object_truncate(obj);
>   	case __I915_MADV_PURGED:
> @@ -2258,7 +2251,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
>   	struct page *page;
>   	int ret;
>   
> -	BUG_ON(obj->madv == __I915_MADV_PURGED);
> +	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
>   
>   	ret = i915_gem_object_set_to_cpu_domain(obj, true);
>   	if (WARN_ON(ret)) {
> @@ -2274,22 +2267,22 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
>   	if (i915_gem_object_needs_bit17_swizzle(obj))
>   		i915_gem_object_save_bit_17_swizzle(obj);
>   
> -	if (obj->madv == I915_MADV_DONTNEED)
> -		obj->dirty = 0;
> +	if (obj->mm.madv == I915_MADV_DONTNEED)
> +		obj->mm.dirty = false;
>   
> -	for_each_sgt_page(page, sgt_iter, obj->pages) {
> -		if (obj->dirty)
> +	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +		if (obj->mm.dirty)
>   			set_page_dirty(page);
>   
> -		if (obj->madv == I915_MADV_WILLNEED)
> +		if (obj->mm.madv == I915_MADV_WILLNEED)
>   			mark_page_accessed(page);
>   
>   		put_page(page);
>   	}
> -	obj->dirty = 0;
> +	obj->mm.dirty = false;
>   
> -	sg_free_table(obj->pages);
> -	kfree(obj->pages);
> +	sg_free_table(obj->mm.pages);
> +	kfree(obj->mm.pages);
>   }
>   
>   static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
> @@ -2297,21 +2290,20 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
>   	struct radix_tree_iter iter;
>   	void **slot;
>   
> -	radix_tree_for_each_slot(slot, &obj->get_page.radix, &iter, 0)
> -		radix_tree_delete(&obj->get_page.radix, iter.index);
> +	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
> +		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
>   }
>   
> -int
> -i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
> +int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   {
>   	const struct drm_i915_gem_object_ops *ops = obj->ops;
>   
>   	lockdep_assert_held(&obj->base.dev->struct_mutex);
>   
> -	if (obj->pages == NULL)
> +	if (!obj->mm.pages)
>   		return 0;
>   
> -	if (obj->pages_pin_count)
> +	if (i915_gem_object_has_pinned_pages(obj))
>   		return -EBUSY;
>   
>   	GEM_BUG_ON(obj->bind_count);
> @@ -2321,22 +2313,22 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
>   	 * lists early. */
>   	list_del(&obj->global_list);
>   
> -	if (obj->mapping) {
> +	if (obj->mm.mapping) {
>   		void *ptr;
>   
> -		ptr = ptr_mask_bits(obj->mapping);
> +		ptr = ptr_mask_bits(obj->mm.mapping);
>   		if (is_vmalloc_addr(ptr))
>   			vunmap(ptr);
>   		else
>   			kunmap(kmap_to_page(ptr));
>   
> -		obj->mapping = NULL;
> +		obj->mm.mapping = NULL;
>   	}
>   
>   	__i915_gem_object_reset_page_iter(obj);
>   
>   	ops->put_pages(obj);
> -	obj->pages = NULL;
> +	obj->mm.pages = NULL;
>   
>   	i915_gem_object_invalidate(obj);
>   
> @@ -2431,7 +2423,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   	if (!swiotlb_nr_tbl())
>   #endif
>   		sg_mark_end(sg);
> -	obj->pages = st;
> +	obj->mm.pages = st;
>   
>   	ret = i915_gem_gtt_prepare_object(obj);
>   	if (ret)
> @@ -2442,7 +2434,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
>   
>   	if (i915_gem_object_is_tiled(obj) &&
>   	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
> -		i915_gem_object_pin_pages(obj);
> +		__i915_gem_object_pin_pages(obj);
>   
>   	return 0;
>   
> @@ -2474,8 +2466,7 @@ err_pages:
>    * either as a result of memory pressure (reaping pages under the shrinker)
>    * or as the object is itself released.
>    */
> -int
> -i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
> +int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>   {
>   	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>   	const struct drm_i915_gem_object_ops *ops = obj->ops;
> @@ -2483,24 +2474,25 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
>   
>   	lockdep_assert_held(&obj->base.dev->struct_mutex);
>   
> -	if (obj->pages)
> +	if (obj->mm.pages)
>   		return 0;
>   
> -	if (obj->madv != I915_MADV_WILLNEED) {
> +	if (obj->mm.madv != I915_MADV_WILLNEED) {
>   		DRM_DEBUG("Attempting to obtain a purgeable object\n");
> +		__i915_gem_object_unpin_pages(obj);
>   		return -EFAULT;
>   	}
>   
> -	BUG_ON(obj->pages_pin_count);
> -
>   	ret = ops->get_pages(obj);
> -	if (ret)
> +	if (ret) {
> +		__i915_gem_object_unpin_pages(obj);
>   		return ret;
> +	}
>   
>   	list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
>   
> -	obj->get_page.sg_pos = obj->pages->sgl;
> -	obj->get_page.sg_idx = 0;
> +	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> +	obj->mm.get_page.sg_idx = 0;
>   
>   	return 0;
>   }
> @@ -2510,7 +2502,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
>   				 enum i915_map_type type)
>   {
>   	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
> -	struct sg_table *sgt = obj->pages;
> +	struct sg_table *sgt = obj->mm.pages;
>   	struct sgt_iter sgt_iter;
>   	struct page *page;
>   	struct page *stack_pages[32];
> @@ -2564,14 +2556,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
>   	lockdep_assert_held(&obj->base.dev->struct_mutex);
>   	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ERR_PTR(ret);
>   
> -	i915_gem_object_pin_pages(obj);
> -	pinned = obj->pages_pin_count > 1;
> +	pinned = obj->mm.pages_pin_count > 1;
>   
> -	ptr = ptr_unpack_bits(obj->mapping, has_type);
> +	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
>   	if (ptr && has_type != type) {
>   		if (pinned) {
>   			ret = -EBUSY;
> @@ -2583,7 +2574,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
>   		else
>   			kunmap(kmap_to_page(ptr));
>   
> -		ptr = obj->mapping = NULL;
> +		ptr = obj->mm.mapping = NULL;
>   	}
>   
>   	if (!ptr) {
> @@ -2593,7 +2584,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
>   			goto err;
>   		}
>   
> -		obj->mapping = ptr_pack_bits(ptr, type);
> +		obj->mm.mapping = ptr_pack_bits(ptr, type);
>   	}
>   
>   	return ptr;
> @@ -3030,7 +3021,7 @@ int i915_vma_unbind(struct i915_vma *vma)
>   		goto destroy;
>   
>   	GEM_BUG_ON(obj->bind_count == 0);
> -	GEM_BUG_ON(!obj->pages);
> +	GEM_BUG_ON(!obj->mm.pages);
>   
>   	if (i915_vma_is_map_and_fenceable(vma)) {
>   		/* release the fence reg _after_ flushing */
> @@ -3054,7 +3045,7 @@ int i915_vma_unbind(struct i915_vma *vma)
>   	drm_mm_remove_node(&vma->node);
>   	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
>   
> -	if (vma->pages != obj->pages) {
> +	if (vma->pages != obj->mm.pages) {
>   		GEM_BUG_ON(!vma->pages);
>   		sg_free_table(vma->pages);
>   		kfree(vma->pages);
> @@ -3186,12 +3177,10 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
>   		return -E2BIG;
>   	}
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ret;
>   
> -	i915_gem_object_pin_pages(obj);
> -
>   	if (flags & PIN_OFFSET_FIXED) {
>   		u64 offset = flags & PIN_OFFSET_MASK;
>   		if (offset & (alignment - 1) || offset > end - size) {
> @@ -3270,7 +3259,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
>   	 * to GPU, and we can ignore the cache flush because it'll happen
>   	 * again at bind time.
>   	 */
> -	if (obj->pages == NULL)
> +	if (!obj->mm.pages)
>   		return false;
>   
>   	/*
> @@ -3294,7 +3283,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
>   	}
>   
>   	trace_i915_gem_object_clflush(obj);
> -	drm_clflush_sg(obj->pages);
> +	drm_clflush_sg(obj->mm.pages);
>   	obj->cache_dirty = false;
>   
>   	return true;
> @@ -3408,7 +3397,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   	 * continue to assume that the obj remained out of the CPU cached
>   	 * domain.
>   	 */
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ret;
>   
> @@ -3432,7 +3421,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   	if (write) {
>   		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
>   		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
> -		obj->dirty = 1;
> +		obj->mm.dirty = true;
>   	}
>   
>   	trace_i915_gem_object_change_domain(obj,
> @@ -3441,6 +3430,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
>   
>   	/* And bump the LRU for this access */
>   	i915_gem_object_bump_inactive_ggtt(obj);
> +	i915_gem_object_unpin_pages(obj);
>   
>   	return 0;
>   }
> @@ -4210,23 +4200,23 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
>   		goto unlock;
>   	}
>   
> -	if (obj->pages &&
> +	if (obj->mm.pages &&
>   	    i915_gem_object_is_tiled(obj) &&
>   	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
> -		if (obj->madv == I915_MADV_WILLNEED)
> -			i915_gem_object_unpin_pages(obj);
> +		if (obj->mm.madv == I915_MADV_WILLNEED)
> +			__i915_gem_object_unpin_pages(obj);
>   		if (args->madv == I915_MADV_WILLNEED)
> -			i915_gem_object_pin_pages(obj);
> +			__i915_gem_object_pin_pages(obj);
>   	}
>   
> -	if (obj->madv != __I915_MADV_PURGED)
> -		obj->madv = args->madv;
> +	if (obj->mm.madv != __I915_MADV_PURGED)
> +		obj->mm.madv = args->madv;
>   
>   	/* if the object is no longer attached, discard its backing storage */
> -	if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
> +	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
>   		i915_gem_object_truncate(obj);
>   
> -	args->retained = obj->madv != __I915_MADV_PURGED;
> +	args->retained = obj->mm.madv != __I915_MADV_PURGED;
>   
>   	i915_gem_object_put(obj);
>   unlock:
> @@ -4252,9 +4242,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
>   	obj->ops = ops;
>   
>   	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
> -	obj->madv = I915_MADV_WILLNEED;
> -	INIT_RADIX_TREE(&obj->get_page.radix, GFP_KERNEL);
> -	mutex_init(&obj->get_page.lock);
> +
> +	obj->mm.madv = I915_MADV_WILLNEED;
> +	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL);
> +	mutex_init(&obj->mm.get_page.lock);
>   
>   	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
>   }
> @@ -4331,7 +4322,7 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj)
>   	 * back the contents from the GPU.
>   	 */
>   
> -	if (obj->madv != I915_MADV_WILLNEED)
> +	if (obj->mm.madv != I915_MADV_WILLNEED)
>   		return false;
>   
>   	if (obj->base.filp == NULL)
> @@ -4373,32 +4364,27 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>   	}
>   	GEM_BUG_ON(obj->bind_count);
>   
> -	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
> -	 * before progressing. */
> -	if (obj->stolen)
> -		i915_gem_object_unpin_pages(obj);
> -
>   	WARN_ON(atomic_read(&obj->frontbuffer_bits));
>   
> -	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
> +	if (obj->mm.pages && obj->mm.madv == I915_MADV_WILLNEED &&
>   	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
>   	    i915_gem_object_is_tiled(obj))
> -		i915_gem_object_unpin_pages(obj);
> +		__i915_gem_object_unpin_pages(obj);
>   
> -	if (WARN_ON(obj->pages_pin_count))
> -		obj->pages_pin_count = 0;
> +	if (obj->ops->release)
> +		obj->ops->release(obj);
> +
> +	if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
> +		obj->mm.pages_pin_count = 0;
>   	if (discard_backing_storage(obj))
> -		obj->madv = I915_MADV_DONTNEED;
> -	i915_gem_object_put_pages(obj);
> +		obj->mm.madv = I915_MADV_DONTNEED;
> +	__i915_gem_object_put_pages(obj);
>   
> -	BUG_ON(obj->pages);
> +	GEM_BUG_ON(obj->mm.pages);
>   
>   	if (obj->base.import_attach)
>   		drm_prime_gem_destroy(&obj->base, NULL);
>   
> -	if (obj->ops->release)
> -		obj->ops->release(obj);
> -
>   	drm_gem_object_release(&obj->base);
>   	i915_gem_info_remove_obj(dev_priv, obj->base.size);
>   
> @@ -4935,14 +4921,13 @@ i915_gem_object_create_from_data(struct drm_device *dev,
>   	if (ret)
>   		goto fail;
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		goto fail;
>   
> -	i915_gem_object_pin_pages(obj);
> -	sg = obj->pages;
> +	sg = obj->mm.pages;
>   	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
> -	obj->dirty = 1;		/* Backing store is now out of date */
> +	obj->mm.dirty = true; /* Backing store is now out of date */
>   	i915_gem_object_unpin_pages(obj);
>   
>   	if (WARN_ON(bytes != size)) {
> @@ -4962,7 +4947,7 @@ static struct scatterlist *
>   __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
>   			 unsigned long n, unsigned int *offset)
>   {
> -	struct scatterlist *sg = obj->pages->sgl;
> +	struct scatterlist *sg = obj->mm.pages->sgl;
>   	int idx = 0;
>   
>   	while (idx + __sg_page_count(sg) <= n) {
> @@ -4980,11 +4965,11 @@ i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
>   		       unsigned long n,
>   		       unsigned int *offset)
>   {
> -	struct i915_gem_object_page_iter *iter = &obj->get_page;
> +	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
>   	struct scatterlist *sg;
>   
>   	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
> -	GEM_BUG_ON(obj->pages_pin_count == 0);
> +	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
>   
>   	if (n < READ_ONCE(iter->sg_idx))
>   		goto lookup;
> @@ -5058,7 +5043,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
>   	struct page *page;
>   
>   	page = i915_gem_object_get_page(obj, n);
> -	if (!obj->dirty)
> +	if (!obj->mm.dirty)
>   		set_page_dirty(page);
>   
>   	return page;
> diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
> index 3934c9103cf2..6b656822bb3a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
> +++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
> @@ -131,11 +131,10 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
>   			return obj;
>   	}
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		return ERR_PTR(ret);
>   
>   	list_move_tail(&obj->batch_pool_link, list);
> -	i915_gem_object_pin_pages(obj);
>   	return obj;
>   }
> diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> index 97c9d68b45df..10441dc72e73 100644
> --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
> @@ -48,12 +48,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
>   	if (ret)
>   		goto err;
>   
> -	ret = i915_gem_object_get_pages(obj);
> +	ret = i915_gem_object_pin_pages(obj);
>   	if (ret)
>   		goto err_unlock;
>   
> -	i915_gem_object_pin_pages(obj);
> -
>   	/* Copy sg so that we make an independent mapping */
>   	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
>   	if (st == NULL) {
> @@ -61,13 +59,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
>   		goto err_unpin;
>   	}
>   
> -	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
> +	ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
>   	if (ret)
>   		goto err_free;
>   
> -	src = obj->pages->sgl;
> +	src = obj->mm.pages->sgl;
>   	dst = st->sgl;
> -	for (i = 0; i < obj->pages->nents; i++) {
> +	for (i = 0; i < obj->mm.pages->nents; i++) {
>   		sg_set_page(dst, sg_page(src), src->length, 0);
>   		dst = sg_next(dst);
>   		src = sg_next(src);
> @@ -299,14 +297,14 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
>   	if (IS_ERR(sg))
>   		return PTR_ERR(sg);
>   
> -	obj->pages = sg;
> +	obj->mm.pages = sg;
>   	return 0;
>   }
>   
>   static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
>   {
>   	dma_buf_unmap_attachment(obj->base.import_attach,
> -				 obj->pages, DMA_BIDIRECTIONAL);
> +				 obj->mm.pages, DMA_BIDIRECTIONAL);
>   }
>   
>   static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 0deecd4e3b6c..062e098b0909 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1290,7 +1290,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
>   
>   	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
>   
> -	obj->dirty = 1; /* be paranoid  */
> +	obj->mm.dirty = true; /* be paranoid  */
>   
>   	/* Add a reference if we're newly entering the active list.
>   	 * The order in which we add operations to the retirement queue is
> diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
> index 8df1fa7234e8..398856160656 100644
> --- a/drivers/gpu/drm/i915/i915_gem_fence.c
> +++ b/drivers/gpu/drm/i915/i915_gem_fence.c
> @@ -648,7 +648,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
>   		return;
>   
>   	i = 0;
> -	for_each_sgt_page(page, sgt_iter, obj->pages) {
> +	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
>   		char new_bit_17 = page_to_phys(page) >> 17;
>   		if ((new_bit_17 & 0x1) !=
>   		    (test_bit(i, obj->bit_17) != 0)) {
> @@ -687,7 +687,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
>   
>   	i = 0;
>   
> -	for_each_sgt_page(page, sgt_iter, obj->pages) {
> +	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
>   		if (page_to_phys(page) & (1 << 17))
>   			__set_bit(i, obj->bit_17);
>   		else
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 7e0c98576e72..3c711e0b8a3f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -175,7 +175,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
>   {
>   	u32 pte_flags = 0;
>   
> -	vma->pages = vma->obj->pages;
> +	vma->pages = vma->obj->mm.pages;
>   
>   	/* Currently applicable only to VLV */
>   	if (vma->obj->gt_ro)
> @@ -2295,7 +2295,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
>   int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
>   {
>   	if (!dma_map_sg(&obj->base.dev->pdev->dev,
> -			obj->pages->sgl, obj->pages->nents,
> +			obj->mm.pages->sgl, obj->mm.pages->nents,
>   			PCI_DMA_BIDIRECTIONAL))
>   		return -ENOSPC;
>   
> @@ -2685,7 +2685,7 @@ void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
>   		}
>   	}
>   
> -	dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
> +	dma_unmap_sg(kdev, obj->mm.pages->sgl, obj->mm.pages->nents,
>   		     PCI_DMA_BIDIRECTIONAL);
>   }
>   
> @@ -3526,7 +3526,7 @@ intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
>   
>   	/* Populate source page list from the object. */
>   	i = 0;
> -	for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
> +	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
>   		page_addr_list[i++] = dma_addr;
>   
>   	GEM_BUG_ON(i != n_pages);
> @@ -3617,7 +3617,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
>   		return 0;
>   
>   	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
> -		vma->pages = vma->obj->pages;
> +		vma->pages = vma->obj->mm.pages;
>   	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
>   		vma->pages =
>   			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
> diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
> index 534a61c1aba2..24eb347f9e0a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_internal.c
> +++ b/drivers/gpu/drm/i915/i915_gem_internal.c
> @@ -28,10 +28,11 @@
>   
>   static void internal_free_pages(struct sg_table *st)
>   {
> -	struct sg_page_iter sg_iter;
> +	struct sgt_iter iter;
> +	struct page *page;
>   
> -	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
> -		put_page(sg_page_iter_page(&sg_iter));
> +	for_each_sgt_page(page, iter, st)
> +		put_page(page);
>   
>   	sg_free_table(st);
>   	kfree(st);
> @@ -94,10 +95,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   	if (!swiotlb_nr_tbl())
>   #endif
>   		sg_mark_end(sg);
> -	obj->pages = st;
> +	obj->mm.pages = st;
>   
>   	if (i915_gem_gtt_prepare_object(obj)) {
> -		obj->pages = NULL;
> +		obj->mm.pages = NULL;
>   		goto err;
>   	}
>   
> @@ -106,7 +107,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
>   	 * and the caller is expected to repopulate - the contents of this
>   	 * object are only valid whilst active and pinned.
>   	 */
> -	obj->madv = I915_MADV_DONTNEED;
> +	obj->mm.madv = I915_MADV_DONTNEED;
>   	return 0;
>   
>   err:
> @@ -117,10 +118,10 @@ err:
>   
>   static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj)
>   {
> -	internal_free_pages(obj->pages);
> +	internal_free_pages(obj->mm.pages);
>   
> -	obj->dirty = 0;
> -	obj->madv = I915_MADV_WILLNEED;
> +	obj->mm.dirty = false;
> +	obj->mm.madv = I915_MADV_WILLNEED;
>   }
>   
>   static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index adb00c5125ad..64d9712ec789 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -230,7 +230,7 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
>   		return 0;
>   
>   	/* Recreate the page after shrinking */
> -	if (!so->vma->obj->pages)
> +	if (!so->vma->obj->mm.pages)
>   		so->batch_offset = -1;
>   
>   	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
> diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> index 1c237d02f30b..e0e6d68fe470 100644
> --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
> +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
> @@ -78,7 +78,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
>   	 * to the GPU, simply unbinding from the GPU is not going to succeed
>   	 * in releasing our pin count on the pages themselves.
>   	 */
> -	if (obj->pages_pin_count > obj->bind_count)
> +	if (obj->mm.pages_pin_count > obj->bind_count)
>   		return false;
>   
>   	if (any_vma_pinned(obj))
> @@ -88,7 +88,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
>   	 * discard the contents (because the user has marked them as being
>   	 * purgeable) or if we can move their contents out to swap.
>   	 */
> -	return swap_available() || obj->madv == I915_MADV_DONTNEED;
> +	return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
>   }
>   
>   /**
> @@ -175,11 +175,11 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>   			list_move_tail(&obj->global_list, &still_in_list);
>   
>   			if (flags & I915_SHRINK_PURGEABLE &&
> -			    obj->madv != I915_MADV_DONTNEED)
> +			    obj->mm.madv != I915_MADV_DONTNEED)
>   				continue;
>   
>   			if (flags & I915_SHRINK_VMAPS &&
> -			    !is_vmalloc_addr(obj->mapping))
> +			    !is_vmalloc_addr(obj->mm.mapping))
>   				continue;
>   
>   			if ((flags & I915_SHRINK_ACTIVE) == 0 &&
> @@ -193,7 +193,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
>   
>   			/* For the unbound phase, this should be a no-op! */
>   			i915_gem_object_unbind(obj);
> -			if (i915_gem_object_put_pages(obj) == 0)
> +			if (__i915_gem_object_put_pages(obj) == 0)
>   				count += obj->base.size >> PAGE_SHIFT;
>   
>   			i915_gem_object_put(obj);
> diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
> index 24bad4e60ef0..6d139d6b4609 100644
> --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
> +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
> @@ -554,16 +554,17 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
>   static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
>   {
>   	/* Should only be called during free */
> -	sg_free_table(obj->pages);
> -	kfree(obj->pages);
> +	sg_free_table(obj->mm.pages);
> +	kfree(obj->mm.pages);
>   }
>   
> -
>   static void
>   i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
>   {
>   	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>   
> +	__i915_gem_object_unpin_pages(obj);
> +
>   	if (obj->stolen) {
>   		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
>   		kfree(obj->stolen);
> @@ -589,15 +590,16 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
>   	drm_gem_private_object_init(dev, &obj->base, stolen->size);
>   	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
>   
> -	obj->pages = i915_pages_create_for_stolen(dev,
> -						  stolen->start, stolen->size);
> -	if (obj->pages == NULL)
> +	obj->mm.pages = i915_pages_create_for_stolen(dev,
> +						     stolen->start,
> +						     stolen->size);
> +	if (!obj->mm.pages)
>   		goto cleanup;
>   
> -	obj->get_page.sg_pos = obj->pages->sgl;
> -	obj->get_page.sg_idx = 0;
> +	obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> +	obj->mm.get_page.sg_idx = 0;
>   
> -	i915_gem_object_pin_pages(obj);
> +	__i915_gem_object_pin_pages(obj);
>   	obj->stolen = stolen;
>   
>   	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
> @@ -717,14 +719,14 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
>   		goto err;
>   	}
>   
> -	vma->pages = obj->pages;
> +	vma->pages = obj->mm.pages;
>   	vma->flags |= I915_VMA_GLOBAL_BIND;
>   	__i915_vma_set_map_and_fenceable(vma);
>   	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
>   	obj->bind_count++;
>   
>   	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -	i915_gem_object_pin_pages(obj);
> +	__i915_gem_object_pin_pages(obj);
>   
>   	return obj;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index a14b1e3d4c78..7286de7bd25e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -260,13 +260,13 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
>   		if (!err) {
>   			struct i915_vma *vma;
>   
> -			if (obj->pages &&
> -			    obj->madv == I915_MADV_WILLNEED &&
> +			if (obj->mm.pages &&
> +			    obj->mm.madv == I915_MADV_WILLNEED &&
>   			    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
>   				if (args->tiling_mode == I915_TILING_NONE)
> -					i915_gem_object_unpin_pages(obj);
> +					__i915_gem_object_unpin_pages(obj);
>   				if (!i915_gem_object_is_tiled(obj))
> -					i915_gem_object_pin_pages(obj);
> +					__i915_gem_object_pin_pages(obj);
>   			}
>   
>   			list_for_each_entry(vma, &obj->vma_list, obj_link) {
> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
> index cb95789da76e..0fdd1d6723d1 100644
> --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
> +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
> @@ -73,10 +73,10 @@ static void cancel_userptr(struct work_struct *work)
>   	/* Cancel any active worker and force us to re-evaluate gup */
>   	obj->userptr.work = NULL;
>   
> -	if (obj->pages != NULL) {
> +	if (obj->mm.pages) {
>   		/* We are inside a kthread context and can't be interrupted */
>   		WARN_ON(i915_gem_object_unbind(obj));
> -		WARN_ON(i915_gem_object_put_pages(obj));
> +		WARN_ON(__i915_gem_object_put_pages(obj));
>   	}
>   
>   	i915_gem_object_put(obj);
> @@ -432,15 +432,15 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
>   {
>   	int ret;
>   
> -	ret = st_set_pages(&obj->pages, pvec, num_pages);
> +	ret = st_set_pages(&obj->mm.pages, pvec, num_pages);
>   	if (ret)
>   		return ret;
>   
>   	ret = i915_gem_gtt_prepare_object(obj);
>   	if (ret) {
> -		sg_free_table(obj->pages);
> -		kfree(obj->pages);
> -		obj->pages = NULL;
> +		sg_free_table(obj->mm.pages);
> +		kfree(obj->mm.pages);
> +		obj->mm.pages = NULL;
>   	}
>   
>   	return ret;
> @@ -526,8 +526,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
>   			if (ret == 0) {
>   				list_add_tail(&obj->global_list,
>   					      &to_i915(dev)->mm.unbound_list);
> -				obj->get_page.sg_pos = obj->pages->sgl;
> -				obj->get_page.sg_idx = 0;
> +				obj->mm.get_page.sg_pos = obj->mm.pages->sgl;
> +				obj->mm.get_page.sg_idx = 0;
>   				pinned = 0;
>   			}
>   		}
> @@ -668,22 +668,22 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
>   	BUG_ON(obj->userptr.work != NULL);
>   	__i915_gem_userptr_set_active(obj, false);
>   
> -	if (obj->madv != I915_MADV_WILLNEED)
> -		obj->dirty = 0;
> +	if (obj->mm.madv != I915_MADV_WILLNEED)
> +		obj->mm.dirty = false;
>   
>   	i915_gem_gtt_finish_object(obj);
>   
> -	for_each_sgt_page(page, sgt_iter, obj->pages) {
> -		if (obj->dirty)
> +	for_each_sgt_page(page, sgt_iter, obj->mm.pages) {
> +		if (obj->mm.dirty)
>   			set_page_dirty(page);
>   
>   		mark_page_accessed(page);
>   		put_page(page);
>   	}
> -	obj->dirty = 0;
> +	obj->mm.dirty = false;
>   
> -	sg_free_table(obj->pages);
> -	kfree(obj->pages);
> +	sg_free_table(obj->mm.pages);
> +	kfree(obj->mm.pages);
>   }
>   
>   static void
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 6c22be28ba01..628d5cdf9200 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -881,8 +881,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
>   	err->write_domain = obj->base.write_domain;
>   	err->fence_reg = vma->fence ? vma->fence->id : -1;
>   	err->tiling = i915_gem_object_get_tiling(obj);
> -	err->dirty = obj->dirty;
> -	err->purgeable = obj->madv != I915_MADV_WILLNEED;
> +	err->dirty = obj->mm.dirty;
> +	err->purgeable = obj->mm.madv != I915_MADV_WILLNEED;
>   	err->userptr = obj->userptr.mm != NULL;
>   	err->cache_level = obj->cache_level;
>   }
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 02c48d390148..6acecfc41f5e 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -744,7 +744,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>   	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
>   		i915_ggtt_offset(ce->ring->vma);
>   
> -	ce->state->obj->dirty = true;
> +	ce->state->obj->mm.dirty = true;
>   
>   	/* Invalidate GuC TLB. */
>   	if (i915.enable_guc_submission) {
> @@ -2042,7 +2042,7 @@ populate_lr_context(struct i915_gem_context *ctx,
>   		DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
>   		return ret;
>   	}
> -	ctx_obj->dirty = true;
> +	ctx_obj->mm.dirty = true;
>   
>   	/* The second page of the context object contains some fields which must
>   	 * be set up prior to the first execution. */
> @@ -2179,7 +2179,7 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
>   			reg[CTX_RING_HEAD+1] = 0;
>   			reg[CTX_RING_TAIL+1] = 0;
>   
> -			ce->state->obj->dirty = true;
> +			ce->state->obj->mm.dirty = true;
>   			i915_gem_object_unpin_map(ce->state->obj);
>   
>   			ce->ring->head = ce->ring->tail = 0;



More information about the Intel-gfx mailing list