[PATCH 3/3] drm/i915/gem: Split out backing pages from the GEM object
Chris Wilson
chris at chris-wilson.co.uk
Tue Dec 17 10:37:17 UTC 2019
The end goal is to decouple the object from the vma so that we can treat
i915_vma as a first-class independently referenced object. Currently, we
have a difficult problem with that introducing a reference cycle between
GEM objects and i915_vma, so as a first step separate out the backing
page store that can then be shared between i915_vma and the object.
Note this doesn't create the proper backing store classes, here we just
allocate a pointer to hold the pages.
Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk>
---
drivers/gpu/drm/i915/Makefile | 9 +
drivers/gpu/drm/i915/display/intel_display.c | 9 +-
drivers/gpu/drm/i915/display/intel_overlay.c | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_clflush.c | 15 +-
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c | 41 +--
drivers/gpu/drm/i915/gem/i915_gem_domain.c | 58 ++--
drivers/gpu/drm/i915/gem/i915_gem_internal.c | 13 +-
drivers/gpu/drm/i915/gem/i915_gem_lmem.c | 52 +---
drivers/gpu/drm/i915/gem/i915_gem_lmem.h | 8 -
drivers/gpu/drm/i915/gem/i915_gem_mman.c | 23 +-
drivers/gpu/drm/i915/gem/i915_gem_object.c | 47 +--
drivers/gpu/drm/i915/gem/i915_gem_object.h | 106 +++----
.../gpu/drm/i915/gem/i915_gem_object_types.h | 105 +------
drivers/gpu/drm/i915/gem/i915_gem_pages.c | 283 ++++--------------
drivers/gpu/drm/i915/gem/i915_gem_phys.c | 24 +-
drivers/gpu/drm/i915/gem/i915_gem_pm.c | 4 +-
drivers/gpu/drm/i915/gem/i915_gem_region.c | 44 +--
drivers/gpu/drm/i915/gem/i915_gem_region.h | 5 -
drivers/gpu/drm/i915/gem/i915_gem_shmem.c | 35 ++-
drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 36 +--
drivers/gpu/drm/i915/gem/i915_gem_stolen.c | 17 +-
drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 32 +-
drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 16 +-
.../drm/i915/gem/selftests/huge_gem_object.c | 7 +-
.../gpu/drm/i915/gem/selftests/huge_pages.c | 135 +++++----
.../i915/gem/selftests/i915_gem_client_blt.c | 4 +-
.../drm/i915/gem/selftests/i915_gem_dmabuf.c | 8 +-
.../drm/i915/gem/selftests/i915_gem_mman.c | 18 +-
.../drm/i915/gem/selftests/i915_gem_object.c | 10 +-
.../drm/i915/gem/selftests/i915_gem_phys.c | 2 +-
drivers/gpu/drm/i915/gt/intel_context.c | 2 +-
drivers/gpu/drm/i915/gt/intel_engine_pool.c | 8 +-
.../gpu/drm/i915/gt/intel_ring_submission.c | 2 +-
drivers/gpu/drm/i915/gt/selftest_timeline.c | 2 +-
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c | 6 +-
drivers/gpu/drm/i915/gvt/dmabuf.c | 7 +-
drivers/gpu/drm/i915/i915_cmd_parser.c | 14 +-
drivers/gpu/drm/i915/i915_debugfs.c | 6 +-
drivers/gpu/drm/i915/i915_gem.c | 58 ++--
drivers/gpu/drm/i915/i915_gem_fence_reg.c | 19 +-
drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +-
drivers/gpu/drm/i915/i915_gpu_error.c | 4 +-
drivers/gpu/drm/i915/i915_vma.c | 15 +-
drivers/gpu/drm/i915/mm/Makefile | 5 +
drivers/gpu/drm/i915/mm/i915_mm_iomap.c | 43 +++
drivers/gpu/drm/i915/mm/i915_mm_iomap.h | 15 +
drivers/gpu/drm/i915/mm/i915_mm_pages.c | 53 ++++
drivers/gpu/drm/i915/mm/i915_mm_pages.h | 103 +++++++
drivers/gpu/drm/i915/mm/i915_mm_pages_types.h | 107 +++++++
drivers/gpu/drm/i915/mm/i915_mm_region.c | 32 ++
drivers/gpu/drm/i915/mm/i915_mm_region.h | 9 +
drivers/gpu/drm/i915/mm/i915_mm_sg.c | 161 ++++++++++
drivers/gpu/drm/i915/mm/i915_mm_sg.h | 22 ++
.../gpu/drm/i915/selftests/i915_gem_evict.c | 12 +-
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 58 ++--
drivers/gpu/drm/i915/selftests/i915_vma.c | 6 +-
.../drm/i915/selftests/intel_memory_region.c | 63 ++--
drivers/gpu/drm/i915/selftests/mock_region.c | 12 +-
58 files changed, 1181 insertions(+), 843 deletions(-)
create mode 100644 drivers/gpu/drm/i915/mm/Makefile
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_iomap.c
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_iomap.h
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages.c
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages.h
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_pages_types.h
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_region.c
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_region.h
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_sg.c
create mode 100644 drivers/gpu/drm/i915/mm/i915_mm_sg.h
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e0fd10c0cfb8..dd9771d316c8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -107,6 +107,15 @@ gt-y += \
gt/gen9_renderstate.o
i915-y += $(gt-y)
+# Memory management/integration code
+obj-y += mm/
+mm-y += \
+ mm/i915_mm_iomap.o \
+ mm/i915_mm_pages.o \
+ mm/i915_mm_region.o \
+ mm/i915_mm_sg.o
+i915-y += $(mm-y)
+
# GEM (Graphics Execution Management) code
obj-y += gem/
gem-y += \
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 60f3899a59f8..09f47600b5c7 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -15162,6 +15162,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = new_plane_state->hw.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct i915_mm_pages *mm;
int ret;
if (old_obj) {
@@ -15202,13 +15203,13 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!obj)
return 0;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
ret = intel_plane_pin_fb(new_plane_state);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 2a44b3be2600..d28f201bb83f 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -1302,7 +1302,7 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys)
}
if (use_phys)
- overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ overlay->flip_addr = sg_dma_address(vma->pages->sgl);
else
overlay->flip_addr = i915_ggtt_offset(vma);
overlay->regs = i915_vma_pin_iomap(vma);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b9f504ba3b32..35ce292b9cb9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -19,7 +19,7 @@ struct clflush {
static void __do_clflush(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- drm_clflush_sg(obj->mm.pages);
+ drm_clflush_sg(obj->mm->pages);
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
}
@@ -27,14 +27,17 @@ static int clflush_work(struct dma_fence_work *base)
{
struct clflush *clflush = container_of(base, typeof(*clflush), base);
struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
- int err;
+ struct i915_mm_pages *mm;
+ int err = 0;
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto put;
+ }
__do_clflush(obj);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
put:
i915_gem_object_put(obj);
@@ -114,7 +117,7 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
I915_FENCE_GFP);
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
dma_fence_work_commit(&clflush->base);
- } else if (obj->mm.pages) {
+ } else if (obj->mm->pages) {
__do_clflush(obj);
} else {
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 372b57ca0efc..0a0f624fef67 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -21,13 +21,16 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
enum dma_data_direction dir)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- struct sg_table *st;
struct scatterlist *src, *dst;
+ struct i915_mm_pages *mm;
+ struct sg_table *st;
int ret, i;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto err;
+ }
/* Copy sg so that we make an independent mapping */
st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
@@ -36,13 +39,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_unpin_pages;
}
- ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
+ ret = sg_alloc_table(st, mm->pages->nents, GFP_KERNEL);
if (ret)
goto err_free;
- src = obj->mm.pages->sgl;
+ src = mm->pages->sgl;
dst = st->sgl;
- for (i = 0; i < obj->mm.pages->nents; i++) {
+ for (i = 0; i < mm->pages->nents; i++) {
sg_set_page(dst, sg_page(src), src->length, 0);
dst = sg_next(dst);
src = sg_next(src);
@@ -53,6 +56,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
goto err_free_sg;
}
+ /* return with pages still pinned */
return st;
err_free_sg:
@@ -60,7 +64,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
err_free:
kfree(st);
err_unpin_pages:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
err:
return ERR_PTR(ret);
}
@@ -118,11 +122,12 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
+ struct i915_mm_pages *mm;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
err = i915_gem_object_lock_interruptible(obj);
if (err)
@@ -132,18 +137,19 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_dire
i915_gem_object_unlock(obj);
out:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return err;
}
static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ struct i915_mm_pages *mm;
int err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
err = i915_gem_object_lock_interruptible(obj);
if (err)
@@ -153,7 +159,7 @@ static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direct
i915_gem_object_unlock(obj);
out:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return err;
}
@@ -245,14 +251,13 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc();
- if (obj == NULL) {
+ obj = i915_gem_object_alloc(&i915_gem_object_dmabuf_ops, &lock_class);
+ if (!obj) {
ret = -ENOMEM;
goto fail_detach;
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index 65f1851e2863..6a987f3b0454 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -48,6 +48,7 @@ void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
+ struct i915_mm_pages *mm;
int ret;
assert_object_held(obj);
@@ -70,9 +71,9 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
@@ -91,10 +92,10 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
if (write) {
obj->read_domains = I915_GEM_DOMAIN_WC;
obj->write_domain = I915_GEM_DOMAIN_WC;
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(mm);
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return 0;
}
@@ -109,6 +110,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
+ struct i915_mm_pages *mm;
int ret;
assert_object_held(obj);
@@ -131,9 +133,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- return ret;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
@@ -154,16 +156,17 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj->mm.dirty = true;
spin_lock(&obj->vma.lock);
for_each_ggtt_vma(vma, obj)
if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
i915_vma_set_ggtt_write(vma);
spin_unlock(&obj->vma.lock);
+
+ i915_mm_pages_set_dirty(mm);
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return 0;
}
@@ -389,9 +392,9 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv == I915_MADV_WILLNEED &&
- !atomic_read(&obj->mm.shrink_pin))
- list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
+ if (obj->mm->madv == I915_MADV_WILLNEED &&
+ !atomic_read(&obj->mm->shrink_pin))
+ list_move_tail(&obj->mm_link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
@@ -469,6 +472,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj;
u32 read_domains = args->read_domains;
u32 write_domain = args->write_domain;
+ struct i915_mm_pages *mm;
int err;
/* Only handle setting domains to types used by the CPU. */
@@ -537,9 +541,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
* continue to assume that the obj remained out of the CPU cached
* domain.
*/
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out;
+ }
err = i915_gem_object_lock_interruptible(obj);
if (err)
@@ -561,7 +567,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
out_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out:
i915_gem_object_put(obj);
return err;
@@ -575,6 +581,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush)
{
+ struct i915_mm_pages *mm;
int ret;
*needs_clflush = 0;
@@ -591,9 +598,11 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
if (ret)
goto err_unlock;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto err_unlock;
+ }
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -620,7 +629,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
return 0;
err_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
err_unlock:
i915_gem_object_unlock(obj);
return ret;
@@ -629,6 +638,7 @@ int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush)
{
+ struct i915_mm_pages *mm;
int ret;
*needs_clflush = 0;
@@ -646,9 +656,11 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
if (ret)
goto err_unlock;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto err_unlock;
+ }
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
!static_cpu_has(X86_FEATURE_CLFLUSH)) {
@@ -679,12 +691,12 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
out:
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(mm);
/* return with the pages pinned */
return 0;
err_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
err_unlock:
i915_gem_object_unlock(obj);
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..5c13e992163b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -35,9 +35,9 @@ static void internal_free_pages(struct sg_table *st)
static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
unsigned int sg_page_sizes;
+ struct scatterlist *sg;
+ struct sg_table *st;
unsigned int npages;
int max_order;
gfp_t gfp;
@@ -132,10 +132,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
i915_gem_gtt_finish_pages(obj, pages);
internal_free_pages(pages);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
@@ -174,12 +176,11 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&i915_gem_object_internal_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class);
/*
* Mark the object as volatile, such that the pages are marked as
@@ -188,7 +189,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
* expected to repopulate - the contents of this object are only valid
* whilst active and pinned.
*/
- i915_gem_object_set_volatile(obj);
+ i915_mm_pages_set_volatile(obj->mm);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 520cc9cac471..3f74bbc312e4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -6,8 +6,15 @@
#include "intel_memory_region.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_lmem.h"
+#include "mm/i915_mm_region.h"
#include "i915_drv.h"
+static void
+i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ i915_mm_pages_release_memory_region(obj->mm);
+}
+
const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.flags = I915_GEM_OBJECT_HAS_IOMEM,
@@ -16,46 +23,6 @@ const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
.release = i915_gem_object_release_memory_region,
};
-/* XXX: Time to vfunc your life up? */
-void __iomem *
-i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- resource_size_t offset;
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
-}
-
-void __iomem *
-i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned long size)
-{
- resource_size_t offset;
-
- GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
-
- offset = i915_gem_object_get_dma_address(obj, n);
- offset -= obj->mm.region->region.start;
-
- return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
-}
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
{
return obj->ops == &i915_gem_lmem_obj_ops;
@@ -79,18 +46,17 @@ __i915_gem_lmem_object_create(struct intel_memory_region *mem,
struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&i915_gem_lmem_obj_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- i915_gem_object_init_memory_region(obj, mem, flags);
+ i915_mm_pages_init_memory_region(obj->mm, mem, flags);
return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 7c176b8b7d2f..fc3f15580fe3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -14,14 +14,6 @@ struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
-void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
- unsigned long n, unsigned long size);
-void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
- unsigned long n);
-void __iomem *
-i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
- unsigned long n);
-
bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 879fff8adc48..530e7aa326a3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -239,7 +239,7 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
unsigned long i, size = area->vm_end - area->vm_start;
bool write = area->vm_flags & VM_WRITE;
vm_fault_t ret = VM_FAULT_SIGBUS;
- int err;
+ struct i915_mm_pages *mm;
if (!i915_gem_object_has_struct_page(obj))
return ret;
@@ -248,9 +248,9 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
if (i915_gem_object_is_readonly(obj) && write)
return ret;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return i915_error_to_vmf_fault(err);
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return i915_error_to_vmf_fault(PTR_ERR(mm));
/* PTEs are revoked in obj->ops->put_pages() */
for (i = 0; i < size >> PAGE_SHIFT; i++) {
@@ -266,10 +266,10 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
if (write) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
obj->cache_dirty = true; /* XXX flush after PAT update? */
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(mm);
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return ret;
}
@@ -285,6 +285,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct intel_runtime_pm *rpm = &i915->runtime_pm;
struct i915_ggtt *ggtt = &i915->ggtt;
bool write = area->vm_flags & VM_WRITE;
+ struct i915_mm_pages *mm;
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
@@ -300,9 +301,11 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto err;
+ }
wakeref = intel_runtime_pm_get(rpm);
@@ -382,7 +385,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (write) {
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
i915_vma_set_ggtt_write(vma);
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(obj->mm);
}
err_fence:
@@ -393,7 +396,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
err_rpm:
intel_runtime_pm_put(rpm, wakeref);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
err:
return i915_error_to_vmf_fault(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 16d611db9ca6..f128836d8d90 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -26,6 +26,8 @@
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
+#include "mm/i915_mm_pages.h"
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -39,26 +41,19 @@ static struct i915_global_object {
struct kmem_cache *slab_objects;
} global;
-struct drm_i915_gem_object *i915_gem_object_alloc(void)
-{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
-}
-
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
return kmem_cache_free(global.slab_objects, obj);
}
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops,
- struct lock_class_key *key)
+static struct drm_i915_gem_object *
+i915_gem_object_init(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_object_ops *ops)
{
- __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
-
spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list);
- INIT_LIST_HEAD(&obj->mm.link);
+ INIT_LIST_HEAD(&obj->mm_link);
INIT_LIST_HEAD(&obj->lut_list);
@@ -69,9 +64,26 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
obj->ops = ops;
- obj->mm.madv = I915_MADV_WILLNEED;
- INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
- mutex_init(&obj->mm.get_page.lock);
+ return obj;
+}
+
+struct drm_i915_gem_object *
+i915_gem_object_alloc(const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->mm = i915_mm_pages_create(key);
+ if (!obj->mm) {
+ i915_gem_object_free(obj);
+ return NULL;
+ }
+
+ return i915_gem_object_init(obj, ops);
}
/**
@@ -201,8 +213,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
spin_unlock(&obj->vma.lock);
}
- i915_gem_object_release_mmap(obj);
-
list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
@@ -214,10 +224,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
GEM_BUG_ON(obj->userfault_count);
GEM_BUG_ON(!list_empty(&obj->lut_list));
- atomic_set(&obj->mm.pages_pin_count, 0);
+ atomic_set(&obj->mm->pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
- bitmap_free(obj->bit_17);
if (obj->base.import_attach)
drm_prime_gem_destroy(&obj->base, NULL);
@@ -227,6 +236,8 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (obj->ops->release)
obj->ops->release(obj);
+ i915_mm_pages_put(obj->mm);
+
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index a1eb7c0b23ac..9e81bd153dfa 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -13,18 +13,20 @@
#include <drm/i915_drm.h>
+#include "mm/i915_mm_pages.h"
+#include "mm/i915_mm_sg.h"
+
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
void i915_gem_init__objects(struct drm_i915_private *i915);
-struct drm_i915_gem_object *i915_gem_object_alloc(void);
+struct drm_i915_gem_object *
+i915_gem_object_alloc(const struct drm_i915_gem_object_ops *ops,
+ struct lock_class_key *key);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
-void i915_gem_object_init(struct drm_i915_gem_object *obj,
- const struct drm_i915_gem_object_ops *ops,
- struct lock_class_key *key);
struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size);
@@ -132,31 +134,13 @@ void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
- obj->flags |= I915_BO_READONLY;
+ obj->readonly = true;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
- return obj->flags & I915_BO_READONLY;
-}
-
-static inline bool
-i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
-{
- return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
-}
-
-static inline bool
-i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
-{
- return obj->flags & I915_BO_ALLOC_VOLATILE;
-}
-
-static inline void
-i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
-{
- obj->flags |= I915_BO_ALLOC_VOLATILE;
+ return obj->readonly;
}
static inline bool
@@ -243,58 +227,57 @@ i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride);
-struct scatterlist *
+static inline struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n, unsigned int *offset);
+ unsigned long n, unsigned int *offset)
+{
+ return i915_mm_pages_get_sg(obj->mm, n, offset);
+}
-struct page *
+static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+ unsigned long n)
+{
+ return i915_mm_pages_get_page(obj->mm, n);
+}
-struct page *
+static inline struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+ unsigned long n)
+{
+ return i915_mm_pages_get_dirty_page(obj->mm, n);
+}
-dma_addr_t
+static inline dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
- unsigned int *len);
+ unsigned int *len)
+{
+ return i915_mm_pages_get_dma_address_len(obj->mm, n, len);
+}
-dma_addr_t
+static inline dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
+ unsigned long n)
+{
+ return i915_mm_pages_get_dma_address(obj->mm, n);
+}
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes);
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
- I915_MM_NORMAL = 0,
- /*
- * Only used by struct_mutex, when called "recursively" from
- * direct-reclaim-esque. Safe because there is only every one
- * struct_mutex in the entire system.
- */
- I915_MM_SHRINKER = 1,
- /*
- * Used for obj->mm.lock when allocating pages. Safe because the object
- * isn't yet on any LRU, and therefore the shrinker can't deadlock on
- * it. As soon as the object has pages, obj->mm.lock nests within
- * fs_reclaim.
- */
- I915_MM_GET_PAGES = 1,
-};
+struct i915_mm_pages *
+__i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
-static inline int __must_check
+static inline struct i915_mm_pages *
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
- might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ might_lock_nested(&obj->mm->lock, I915_MM_GET_PAGES);
- if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
- return 0;
+ if (atomic_inc_not_zero(&obj->mm->pin_count))
+ return obj->mm;
return __i915_gem_object_get_pages(obj);
}
@@ -302,30 +285,27 @@ i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
- return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
+ return !IS_ERR_OR_NULL(READ_ONCE(obj->mm->pages));
}
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
-
- atomic_inc(&obj->mm.pages_pin_count);
+ i915_mm_pages_pin(obj->mm);
}
static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
- return atomic_read(&obj->mm.pages_pin_count);
+ return i915_mm_pages_is_pinned(obj->mm);
}
static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- atomic_dec(&obj->mm.pages_pin_count);
+ i915_mm_pages_unpin(obj->mm);
}
static inline void
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2d404e6f63df..1ad37c333950 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -10,6 +10,8 @@
#include <drm/drm_gem.h>
#include <uapi/drm/i915_drm.h>
+#include "mm/i915_mm_pages.h"
+
#include "i915_active.h"
#include "i915_selftest.h"
@@ -85,6 +87,8 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
+ struct i915_mm_pages *mm;
+
struct {
/**
* @vma.lock: protect the list/tree of vmas
@@ -142,12 +146,6 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
- unsigned long flags;
-#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_VOLATILE BIT(1)
-#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE)
-#define I915_BO_READONLY BIT(2)
-
/*
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
@@ -157,6 +155,7 @@ struct drm_i915_gem_object {
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
#define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
unsigned int cache_dirty:1;
+ unsigned int readonly:1;
/**
* @read_domains: Read memory domains.
@@ -184,95 +183,11 @@ struct drm_i915_gem_object {
/** Count of VMA actually bound by this object */
atomic_t bind_count;
- struct {
- /*
- * Protects the pages and their use. Do not use directly, but
- * instead go through the pin/unpin interfaces.
- */
- struct mutex lock;
- atomic_t pages_pin_count;
- atomic_t shrink_pin;
-
- /**
- * Memory region for this object.
- */
- struct intel_memory_region *region;
- /**
- * List of memory region blocks allocated for this object.
- */
- struct list_head blocks;
- /**
- * Element within memory_region->objects or region->purgeable
- * if the object is marked as DONTNEED. Access is protected by
- * region->obj_lock.
- */
- struct list_head region_link;
-
- struct sg_table *pages;
- void *mapping;
-
- struct i915_page_sizes {
- /**
- * The sg mask of the pages sg_table. i.e the mask of
- * of the lengths for each sg entry.
- */
- unsigned int phys;
-
- /**
- * The gtt page sizes we are allowed to use given the
- * sg mask and the supported page sizes. This will
- * express the smallest unit we can use for the whole
- * object, as well as the larger sizes we may be able
- * to use opportunistically.
- */
- unsigned int sg;
-
- /**
- * The actual gtt page size usage. Since we can have
- * multiple vma associated with this object we need to
- * prevent any trampling of state, hence a copy of this
- * struct also lives in each vma, therefore the gtt
- * value here should only be read/write through the vma.
- */
- unsigned int gtt;
- } page_sizes;
-
- I915_SELFTEST_DECLARE(unsigned int page_mask);
-
- struct i915_gem_object_page_iter {
- struct scatterlist *sg_pos;
- unsigned int sg_idx; /* in pages, but 32bit eek! */
-
- struct radix_tree_root radix;
- struct mutex lock; /* protects this cache */
- } get_page;
-
- /**
- * Element within i915->mm.unbound_list or i915->mm.bound_list,
- * locked by i915->mm.obj_lock.
- */
- struct list_head link;
-
- /**
- * Advice: are the backing pages purgeable?
- */
- unsigned int madv:2;
-
- /**
- * This is set if the object has been written to since the
- * pages were last acquired.
- */
- bool dirty:1;
-
- /**
- * This is set if the object has been pinned due to unknown
- * swizzling.
- */
- bool quirked:1;
- } mm;
-
- /** Record of address bit 17 of each page at last unbind. */
- unsigned long *bit_17;
+ /**
+ * Element within i915->mm.unbound_list or i915->mm.bound_list,
+ * locked by i915->mm.obj_lock.
+ */
+ struct list_head mm_link;
union {
struct i915_gem_userptr {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 75197ca696a8..5119f1d41e18 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -9,6 +9,7 @@
#include "i915_scatterlist.h"
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "mm/i915_mm_iomap.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
@@ -16,12 +17,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes;
+ struct i915_mm_pages *mm = obj->mm;
int i;
- lockdep_assert_held(&obj->mm.lock);
+ lockdep_assert_held(&mm->lock);
- if (i915_gem_object_is_volatile(obj))
- obj->mm.madv = I915_MADV_DONTNEED;
+ if (i915_mm_pages_is_volatile(mm))
+ mm->madv = I915_MADV_DONTNEED;
/* Make the pages coherent with the GPU (flushing any swapin). */
if (obj->cache_dirty) {
@@ -31,20 +33,20 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->cache_dirty = false;
}
- obj->mm.get_page.sg_pos = pages->sgl;
- obj->mm.get_page.sg_idx = 0;
+ mm->get_page.sg_pos = pages->sgl;
+ mm->get_page.sg_idx = 0;
- obj->mm.pages = pages;
+ mm->pages = pages;
if (i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_mm_pages_is_quirked(mm));
+ i915_mm_pages_unpin(mm);
+ i915_mm_pages_set_quirk(mm);
}
GEM_BUG_ON(!sg_page_sizes);
- obj->mm.page_sizes.phys = sg_page_sizes;
+ mm->page_sizes.phys = sg_page_sizes;
/*
* Calculate the supported page-sizes which fit into the given
@@ -54,12 +56,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
* 64K or 4K pages, although in practice this will depend on a number of
* other factors.
*/
- obj->mm.page_sizes.sg = 0;
+ mm->page_sizes.sg = 0;
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
- if (obj->mm.page_sizes.phys & ~0u << i)
- obj->mm.page_sizes.sg |= BIT(i);
+ if (mm->page_sizes.phys & ~0u << i)
+ mm->page_sizes.sg |= BIT(i);
}
- GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+ GEM_BUG_ON(!HAS_PAGE_SIZES(i915, mm->page_sizes.sg));
if (i915_gem_object_is_shrinkable(obj)) {
struct list_head *list;
@@ -70,13 +72,13 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
- if (obj->mm.madv != I915_MADV_WILLNEED)
+ if (mm->madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
else
list = &i915->mm.shrink_list;
- list_add_tail(&obj->mm.link, list);
+ list_add_tail(&obj->mm_link, list);
- atomic_set(&obj->mm.shrink_pin, 0);
+ atomic_set(&obj->mm->shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
@@ -85,7 +87,7 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
int err;
- if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
+ if (unlikely(obj->mm->madv != I915_MADV_WILLNEED)) {
DRM_DEBUG("Attempting to obtain a purgeable object\n");
return -EFAULT;
}
@@ -103,16 +105,18 @@ int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
* either as a result of memory pressure (reaping pages under the shrinker)
* or as the object is itself released.
*/
-int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
+struct i915_mm_pages *
+__i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
int err;
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ err = mutex_lock_interruptible_nested(&mm->lock, I915_MM_GET_PAGES);
if (err)
- return err;
+ return ERR_PTR(err);
- if (unlikely(!i915_gem_object_has_pages(obj))) {
- GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+ if (unlikely(!mm->pages)) {
+ GEM_BUG_ON(atomic_read(&mm->pin_count));
err = ____i915_gem_object_get_pages(obj);
if (err)
@@ -120,11 +124,11 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
smp_mb__before_atomic();
}
- atomic_inc(&obj->mm.pages_pin_count);
+ atomic_inc(&mm->pin_count);
unlock:
- mutex_unlock(&obj->mm.lock);
- return err;
+ mutex_unlock(&mm->lock);
+ return err ? ERR_PTR(err) : mm;
}
/* Immediately discard the backing storage */
@@ -138,24 +142,13 @@ void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Try to discard unwanted pages */
void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
{
- lockdep_assert_held(&obj->mm.lock);
+ lockdep_assert_held(&obj->mm->lock);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
if (obj->ops->writeback)
obj->ops->writeback(obj);
}
-static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
-{
- struct radix_tree_iter iter;
- void __rcu **slot;
-
- rcu_read_lock();
- radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
- radix_tree_delete(&obj->mm.get_page.radix, iter.index);
- rcu_read_unlock();
-}
-
static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
{
if (i915_gem_object_is_lmem(obj))
@@ -169,30 +162,33 @@ static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sg_table *pages;
- pages = fetch_and_zero(&obj->mm.pages);
+ pages = fetch_and_zero(&mm->pages);
if (IS_ERR_OR_NULL(pages))
return pages;
- if (i915_gem_object_is_volatile(obj))
- obj->mm.madv = I915_MADV_WILLNEED;
+ if (i915_mm_pages_is_volatile(mm))
+ mm->madv = I915_MADV_WILLNEED;
i915_gem_object_make_unshrinkable(obj);
- if (obj->mm.mapping) {
- unmap_object(obj, page_mask_bits(obj->mm.mapping));
- obj->mm.mapping = NULL;
+ if (mm->mapping) {
+ unmap_object(obj, page_mask_bits(mm->mapping));
+ mm->mapping = NULL;
}
- __i915_gem_object_reset_page_iter(obj);
- obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ __i915_mm_pages_reset_page_iter(mm);
+ mm->page_sizes.phys = 0;
+ mm->page_sizes.sg = 0;
return pages;
}
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sg_table *pages;
int err;
@@ -202,8 +198,8 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
GEM_BUG_ON(atomic_read(&obj->bind_count));
/* May be called by shrinker from within get_pages() (on another bo) */
- mutex_lock(&obj->mm.lock);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+ mutex_lock(&mm->lock);
+ if (unlikely(atomic_read(&mm->pin_count))) {
err = -EBUSY;
goto unlock;
}
@@ -231,7 +227,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
err = 0;
unlock:
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&mm->lock);
return err;
}
@@ -241,7 +237,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
- struct sg_table *sgt = obj->mm.pages;
+ struct sg_table *sgt = obj->mm->pages;
struct sgt_iter sgt_iter;
struct page *page;
struct page *stack_pages[32];
@@ -256,7 +252,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
if (type != I915_MAP_WC)
return NULL;
- io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+ io = i915_mm_pages_io_map(obj->mm, 0, obj->base.size);
return (void __force *)io;
}
@@ -300,6 +296,7 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type)
{
+ struct i915_mm_pages *mm = obj->mm;
enum i915_map_type has_type;
unsigned int flags;
bool pinned;
@@ -310,14 +307,14 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
if (!i915_gem_object_type_has(obj, flags))
return ERR_PTR(-ENXIO);
- err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ err = mutex_lock_interruptible_nested(&mm->lock, I915_MM_GET_PAGES);
if (err)
return ERR_PTR(err);
pinned = !(type & I915_MAP_OVERRIDE);
type &= ~I915_MAP_OVERRIDE;
- if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
+ if (!atomic_inc_not_zero(&mm->pin_count)) {
if (unlikely(!i915_gem_object_has_pages(obj))) {
GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
@@ -327,12 +324,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
smp_mb__before_atomic();
}
- atomic_inc(&obj->mm.pages_pin_count);
+ atomic_inc(&mm->pin_count);
pinned = false;
}
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ ptr = page_unpack_bits(mm->mapping, &has_type);
if (ptr && has_type != type) {
if (pinned) {
err = -EBUSY;
@@ -341,7 +338,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
unmap_object(obj, ptr);
- ptr = obj->mm.mapping = NULL;
+ ptr = mm->mapping = NULL;
}
if (!ptr) {
@@ -351,15 +348,15 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
goto err_unpin;
}
- obj->mm.mapping = page_pack_bits(ptr, type);
+ mm->mapping = page_pack_bits(ptr, type);
}
out_unlock:
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&mm->lock);
return ptr;
err_unpin:
- atomic_dec(&obj->mm.pages_pin_count);
+ atomic_dec(&mm->pin_count);
err_unlock:
ptr = ERR_PTR(err);
goto out_unlock;
@@ -369,19 +366,20 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
unsigned long offset,
unsigned long size)
{
+ struct i915_mm_pages *mm = obj->mm;
enum i915_map_type has_type;
void *ptr;
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(!i915_mm_pages_is_pinned(mm));
GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
offset, size, obj->base.size));
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(mm);
if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
return;
- ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ ptr = page_unpack_bits(mm->mapping, &has_type);
if (has_type == I915_MAP_WC)
return;
@@ -392,164 +390,3 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
}
}
-struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
-{
- struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
- struct scatterlist *sg;
- unsigned int idx, count;
-
- might_sleep();
- GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
-
- /* As we iterate forward through the sg, we record each entry in a
- * radixtree for quick repeated (backwards) lookups. If we have seen
- * this index previously, we will have an entry for it.
- *
- * Initial lookup is O(N), but this is amortized to O(1) for
- * sequential page access (where each new request is consecutive
- * to the previous one). Repeated lookups are O(lg(obj->base.size)),
- * i.e. O(1) with a large constant!
- */
- if (n < READ_ONCE(iter->sg_idx))
- goto lookup;
-
- mutex_lock(&iter->lock);
-
- /* We prefer to reuse the last sg so that repeated lookup of this
- * (or the subsequent) sg are fast - comparing against the last
- * sg is faster than going through the radixtree.
- */
-
- sg = iter->sg_pos;
- idx = iter->sg_idx;
- count = __sg_page_count(sg);
-
- while (idx + count <= n) {
- void *entry;
- unsigned long i;
- int ret;
-
- /* If we cannot allocate and insert this entry, or the
- * individual pages from this range, cancel updating the
- * sg_idx so that on this lookup we are forced to linearly
- * scan onwards, but on future lookups we will try the
- * insertion again (in which case we need to be careful of
- * the error return reporting that we have already inserted
- * this index).
- */
- ret = radix_tree_insert(&iter->radix, idx, sg);
- if (ret && ret != -EEXIST)
- goto scan;
-
- entry = xa_mk_value(idx);
- for (i = 1; i < count; i++) {
- ret = radix_tree_insert(&iter->radix, idx + i, entry);
- if (ret && ret != -EEXIST)
- goto scan;
- }
-
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
-scan:
- iter->sg_pos = sg;
- iter->sg_idx = idx;
-
- mutex_unlock(&iter->lock);
-
- if (unlikely(n < idx)) /* insertion completed by another thread */
- goto lookup;
-
- /* In case we failed to insert the entry into the radixtree, we need
- * to look beyond the current sg.
- */
- while (idx + count <= n) {
- idx += count;
- sg = ____sg_next(sg);
- count = __sg_page_count(sg);
- }
-
- *offset = n - idx;
- return sg;
-
-lookup:
- rcu_read_lock();
-
- sg = radix_tree_lookup(&iter->radix, n);
- GEM_BUG_ON(!sg);
-
- /* If this index is in the middle of multi-page sg entry,
- * the radix tree will contain a value entry that points
- * to the start of that range. We will return the pointer to
- * the base page and the offset of this page within the
- * sg entry's range.
- */
- *offset = 0;
- if (unlikely(xa_is_value(sg))) {
- unsigned long base = xa_to_value(sg);
-
- sg = radix_tree_lookup(&iter->radix, base);
- GEM_BUG_ON(!sg);
-
- *offset = n - base;
- }
-
- rcu_read_unlock();
-
- return sg;
-}
-
-struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
-}
-
-/* Like i915_gem_object_get_page(), but mark the returned page dirty */
-struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
-{
- struct page *page;
-
- page = i915_gem_object_get_page(obj, n);
- if (!obj->mm.dirty)
- set_page_dirty(page);
-
- return page;
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len)
-{
- struct scatterlist *sg;
- unsigned int offset;
-
- sg = i915_gem_object_get_sg(obj, n, &offset);
-
- if (len)
- *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
-
- return sg_dma_address(sg) + (offset << PAGE_SHIFT);
-}
-
-dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
-{
- return i915_gem_object_get_dma_address_len(obj, n, NULL);
-}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b1b7c1b3038a..3f2e2395e3a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -18,6 +18,7 @@
#include "i915_gem_object.h"
#include "i915_gem_region.h"
#include "i915_scatterlist.h"
+#include "mm/i915_mm_region.h"
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
@@ -99,9 +100,11 @@ static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
__i915_gem_object_release_shmem(obj, pages, false);
- if (obj->mm.dirty) {
+ if (i915_mm_pages_is_dirty(mm)) {
struct address_space *mapping = obj->base.filp->f_mapping;
char *vaddr = obj->phys_handle->vaddr;
int i;
@@ -120,12 +123,12 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
kunmap_atomic(dst);
set_page_dirty(page);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (mm->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
vaddr += PAGE_SIZE;
}
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
sg_free_table(pages);
@@ -148,6 +151,7 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sg_table *pages;
int err;
@@ -164,19 +168,19 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err)
return err;
- mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ mutex_lock_nested(&mm->lock, I915_MM_GET_PAGES);
- if (obj->mm.madv != I915_MADV_WILLNEED) {
+ if (mm->madv != I915_MADV_WILLNEED) {
err = -EFAULT;
goto err_unlock;
}
- if (obj->mm.quirked) {
+ if (i915_mm_pages_is_quirked(mm)) {
err = -EFAULT;
goto err_unlock;
}
- if (obj->mm.mapping) {
+ if (mm->mapping) {
err = -EBUSY;
goto err_unlock;
}
@@ -194,9 +198,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (!IS_ERR_OR_NULL(pages)) {
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
+ i915_mm_pages_release_memory_region(obj->mm);
}
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&mm->lock);
return 0;
err_xfer:
@@ -207,7 +211,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
err_unlock:
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&mm->lock);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 3671a4e7e1cb..683a0e8579ee 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -36,7 +36,7 @@ static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
{
return list_first_entry_or_null(list,
struct drm_i915_gem_object,
- mm.link);
+ mm_link);
}
void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -76,7 +76,7 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
LIST_HEAD(keep);
while ((obj = first_mm_object(*phase))) {
- list_move_tail(&obj->mm.link, &keep);
+ list_move_tail(&obj->mm_link, &keep);
/* Beware the background _i915_gem_free_objects */
if (!kref_get_unless_zero(&obj->base.refcount))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index d50adac12249..d9a665175794 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -12,9 +12,11 @@ void
i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
- __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks);
+ struct i915_mm_pages *mm = obj->mm;
+
+ __intel_memory_region_put_pages_buddy(mm->region, &mm->blocks);
+ i915_mm_pages_clear_dirty(mm);
- obj->mm.dirty = false;
sg_free_table(pages);
kfree(pages);
}
@@ -22,8 +24,9 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
int
i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
{
- struct intel_memory_region *mem = obj->mm.region;
- struct list_head *blocks = &obj->mm.blocks;
+ struct i915_mm_pages *mm = obj->mm;
+ struct intel_memory_region *mem = mm->region;
+ struct list_head *blocks = &mm->blocks;
resource_size_t size = obj->base.size;
resource_size_t prev_end;
struct i915_buddy_block *block;
@@ -43,7 +46,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
}
flags = I915_ALLOC_MIN_PAGE_SIZE;
- if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+ if (mm->flags & I915_MM_PAGES_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS;
ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
@@ -101,35 +104,6 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
return ret;
}
-void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mem,
- unsigned long flags)
-{
- INIT_LIST_HEAD(&obj->mm.blocks);
- obj->mm.region = intel_memory_region_get(mem);
- obj->flags |= flags;
-
- mutex_lock(&mem->objects.lock);
-
- if (obj->flags & I915_BO_ALLOC_VOLATILE)
- list_add(&obj->mm.region_link, &mem->objects.purgeable);
- else
- list_add(&obj->mm.region_link, &mem->objects.list);
-
- mutex_unlock(&mem->objects.lock);
-}
-
-void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
-{
- struct intel_memory_region *mem = obj->mm.region;
-
- mutex_lock(&mem->objects.lock);
- list_del(&obj->mm.region_link);
- mutex_unlock(&mem->objects.lock);
-
- intel_memory_region_put(mem);
-}
-
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
@@ -143,8 +117,6 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
* future.
*/
- GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
-
if (!mem)
return ERR_PTR(-ENODEV);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index f2ff6f8bff74..1af20f703cbe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -16,11 +16,6 @@ int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj);
void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
struct sg_table *pages);
-void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
- struct intel_memory_region *mem,
- unsigned long flags);
-void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
-
struct drm_i915_gem_object *
i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 4d69c3fc3439..f1d065c8afea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -13,6 +13,7 @@
#include "i915_gem_object.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "mm/i915_mm_region.h"
/*
* Move pages to appropriate lru and release the pagevec, decrementing the
@@ -28,7 +29,7 @@ static void check_release_pagevec(struct pagevec *pvec)
static int shmem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct intel_memory_region *mem = obj->mm.region;
+ struct intel_memory_region *mem = obj->mm->region;
const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned long i;
struct address_space *mapping;
@@ -219,6 +220,8 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
static void
shmem_truncate(struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
+
/*
* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
@@ -226,8 +229,8 @@ shmem_truncate(struct drm_i915_gem_object *obj)
* backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
- obj->mm.madv = __I915_MADV_PURGED;
- obj->mm.pages = ERR_PTR(-EFAULT);
+ mm->madv = __I915_MADV_PURGED;
+ mm->pages = ERR_PTR(-EFAULT);
}
static void
@@ -280,10 +283,12 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
{
- GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
+ struct i915_mm_pages *mm = obj->mm;
+
+ GEM_BUG_ON(mm->madv == __I915_MADV_PURGED);
- if (obj->mm.madv == I915_MADV_DONTNEED)
- obj->mm.dirty = false;
+ if (mm->madv == I915_MADV_DONTNEED)
+ i915_mm_pages_clear_dirty(mm);
if (needs_clflush &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
@@ -296,6 +301,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
static void
shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sgt_iter sgt_iter;
struct pagevec pvec;
struct page *page;
@@ -311,10 +317,10 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
pagevec_init(&pvec);
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty)
+ if (i915_mm_pages_is_dirty(mm))
set_page_dirty(page);
- if (obj->mm.madv == I915_MADV_WILLNEED)
+ if (mm->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
if (!pagevec_add(&pvec, page))
@@ -322,7 +328,7 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
}
if (pagevec_count(&pvec))
check_release_pagevec(&pvec);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
sg_free_table(pages);
kfree(pages);
@@ -352,7 +358,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (i915_gem_object_has_pages(obj))
return -ENODEV;
- if (obj->mm.madv != I915_MADV_WILLNEED)
+ if (obj->mm->madv != I915_MADV_WILLNEED)
return -EFAULT;
/*
@@ -420,8 +426,7 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
static void shmem_release(struct drm_i915_gem_object *obj)
{
- i915_gem_object_release_memory_region(obj);
-
+ i915_mm_pages_release_memory_region(obj->mm);
fput(obj->base.filp);
}
@@ -473,7 +478,7 @@ create_shmem(struct intel_memory_region *mem,
gfp_t mask;
int ret;
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&i915_gem_shmem_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -492,8 +497,6 @@ create_shmem(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class);
-
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -516,7 +519,7 @@ create_shmem(struct intel_memory_region *mem,
i915_gem_object_set_cache_coherency(obj, cache_level);
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_mm_pages_init_memory_region(obj->mm, mem, 0);
return obj;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f7e4b39c734f..4610ea02231e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -23,6 +23,8 @@ static bool swap_available(void)
static bool can_release_pages(struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
+
/* Consider only shrinkable ojects. */
if (!i915_gem_object_is_shrinkable(obj))
return false;
@@ -36,7 +38,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* to the GPU, simply unbinding from the GPU is not going to succeed
* in releasing our pin count on the pages themselves.
*/
- if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
+ if (atomic_read(&mm->pin_count) > atomic_read(&obj->bind_count))
return false;
/*
@@ -44,7 +46,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
* discard the contents (because the user has marked them as being
* purgeable) or if we can move their contents out to swap.
*/
- return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
+ return swap_available() || mm->madv == I915_MADV_DONTNEED;
}
static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
@@ -65,7 +67,7 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
static void try_to_writeback(struct drm_i915_gem_object *obj,
unsigned int flags)
{
- switch (obj->mm.madv) {
+ switch (obj->mm->madv) {
case I915_MADV_DONTNEED:
i915_gem_object_truncate(obj);
case __I915_MADV_PURGED:
@@ -184,11 +186,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
while (count < target &&
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
- mm.link))) {
- list_move_tail(&obj->mm.link, &still_in_list);
+ mm_link))) {
+ list_move_tail(&obj->mm_link, &still_in_list);
if (shrink & I915_SHRINK_VMAPS &&
- !is_vmalloc_addr(obj->mm.mapping))
+ !is_vmalloc_addr(obj->mm->mapping))
continue;
if (!(shrink & I915_SHRINK_ACTIVE) &&
@@ -209,12 +211,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
if (unsafe_drop_pages(obj, shrink)) {
/* May arrive from get_pages on another bo */
- mutex_lock(&obj->mm.lock);
+ mutex_lock(&obj->mm->lock);
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT;
}
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&obj->mm->lock);
}
scanned += obj->base.size >> PAGE_SHIFT;
@@ -347,7 +349,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
*/
available = unevictable = 0;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm_link) {
if (!can_release_pages(obj))
unevictable += obj->base.size >> PAGE_SHIFT;
else
@@ -457,13 +459,13 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
* from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross.
*/
- if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
+ if (atomic_add_unless(&obj->mm->shrink_pin, 1, 0))
return;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
- !list_empty(&obj->mm.link)) {
- list_del_init(&obj->mm.link);
+ if (!atomic_fetch_inc(&obj->mm->shrink_pin) &&
+ !list_empty(&obj->mm_link)) {
+ list_del_init(&obj->mm_link);
i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size;
}
@@ -480,15 +482,15 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
if (!i915_gem_object_is_shrinkable(obj))
return;
- if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
+ if (atomic_add_unless(&obj->mm->shrink_pin, -1, 1))
return;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(!kref_read(&obj->base.refcount));
- if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
- GEM_BUG_ON(!list_empty(&obj->mm.link));
+ if (atomic_dec_and_test(&obj->mm->shrink_pin)) {
+ GEM_BUG_ON(!list_empty(&obj->mm_link));
- list_add_tail(&obj->mm.link, head);
+ list_add_tail(&obj->mm_link, head);
i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index d582b3677368..fc80c31d0d9e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "mm/i915_mm_region.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -549,7 +550,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!stolen);
- i915_gem_object_release_memory_region(obj);
+ i915_mm_pages_release_memory_region(obj->mm);
i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
@@ -568,25 +569,27 @@ __i915_gem_object_create_stolen(struct intel_memory_region *mem,
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
unsigned int cache_level;
+ struct i915_mm_pages *mm;
int err = -ENOMEM;
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&i915_gem_object_stolen_ops, &lock_class);
if (!obj)
goto err;
drm_gem_private_object_init(&mem->i915->drm, &obj->base, stolen->size);
- i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class);
obj->stolen = stolen;
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
cache_level = HAS_LLC(mem->i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
i915_gem_object_set_cache_coherency(obj, cache_level);
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto cleanup;
+ }
- i915_gem_object_init_memory_region(obj, mem, 0);
+ i915_mm_pages_init_memory_region(obj->mm, mem, 0);
return obj;
@@ -640,7 +643,7 @@ i915_gem_object_create_stolen(struct drm_i915_private *i915,
resource_size_t size)
{
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_STOLEN],
- size, I915_BO_ALLOC_CONTIGUOUS);
+ size, I915_MM_PAGES_CONTIGUOUS);
}
static int init_stolen(struct intel_memory_region *mem)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..19b5dc717a65 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -208,6 +208,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_mm_pages *mm = obj->mm;
struct i915_vma *vma;
int err;
@@ -252,22 +253,22 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
*/
- mutex_lock(&obj->mm.lock);
- if (i915_gem_object_has_pages(obj) &&
- obj->mm.madv == I915_MADV_WILLNEED &&
+ mutex_lock(&mm->lock);
+ if (mm->pages &&
+ mm->madv == I915_MADV_WILLNEED &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
if (tiling == I915_TILING_NONE) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_mm_pages_is_quirked(mm));
+ i915_mm_pages_unpin(mm);
+ i915_mm_pages_clear_quirk(mm);
}
if (!i915_gem_object_is_tiled(obj)) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_mm_pages_is_quirked(mm));
+ i915_mm_pages_pin(mm);
+ i915_mm_pages_set_quirk(mm);
}
}
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&mm->lock);
for_each_ggtt_vma(vma, obj) {
vma->fence_size =
@@ -288,13 +289,14 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
- if (!obj->bit_17) {
- obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
- GFP_KERNEL);
+ if (!obj->mm->bit_17) {
+ obj->mm->bit_17 =
+ bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+ GFP_KERNEL);
}
} else {
- bitmap_free(obj->bit_17);
- obj->bit_17 = NULL;
+ bitmap_free(obj->mm->bit_17);
+ obj->mm->bit_17 = NULL;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index f7f66c62cf0e..632470727f18 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -489,7 +489,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
}
- mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
+ mutex_lock_nested(&obj->mm->lock, I915_MM_GET_PAGES);
if (obj->userptr.work == &work->work) {
struct sg_table *pages = ERR_PTR(ret);
@@ -506,7 +506,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (IS_ERR(pages))
__i915_gem_userptr_set_active(obj, false);
}
- mutex_unlock(&obj->mm.lock);
+ mutex_unlock(&obj->mm->lock);
release_pages(pvec, pinned);
kvfree(pvec);
@@ -631,6 +631,7 @@ static void
i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sgt_iter sgt_iter;
struct page *page;
@@ -649,10 +650,10 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
* that the object will never have been written to.
*/
if (i915_gem_object_is_readonly(obj))
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
for_each_sgt_page(page, sgt_iter, pages) {
- if (obj->mm.dirty && trylock_page(page)) {
+ if (i915_mm_pages_is_dirty(mm) && trylock_page(page)) {
/*
* As this may not be anonymous memory (e.g. shmem)
* but exist on a real mapping, we have to lock
@@ -678,7 +679,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
mark_page_accessed(page);
put_page(page);
}
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
sg_free_table(pages);
kfree(pages);
@@ -791,12 +792,11 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
- obj = i915_gem_object_alloc();
- if (obj == NULL)
+ obj = i915_gem_object_alloc(&i915_gem_userptr_ops, &lock_class);
+ if (!obj)
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index fa16f2c3f3ac..30d6652f1edc 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -81,10 +81,12 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
static void huge_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
i915_gem_gtt_finish_pages(obj, pages);
huge_free_pages(obj, pages);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
static const struct drm_i915_gem_object_ops huge_ops = {
@@ -110,12 +112,11 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&huge_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
- i915_gem_object_init(obj, &huge_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 497c367a79ca..67f100b4ee35 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
+#include "mm/i915_mm_iomap.h"
#include "igt_gem_utils.h"
#include "mock_context.h"
@@ -59,7 +60,8 @@ static void huge_pages_free_pages(struct sg_table *st)
static int get_huge_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
- unsigned int page_mask = obj->mm.page_mask;
+ struct i915_mm_pages *mm = obj->mm;
+ unsigned int page_mask = mm->page_mask;
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
@@ -116,7 +118,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, st))
goto err;
- GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
+ GEM_BUG_ON(sg_page_sizes != mm->page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -132,10 +134,12 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
static void put_huge_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
i915_gem_gtt_finish_pages(obj, pages);
huge_pages_free_pages(pages);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -162,20 +166,19 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&huge_page_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &huge_page_ops, &lock_class);
- i915_gem_object_set_volatile(obj);
+ i915_mm_pages_set_volatile(obj->mm);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
- obj->mm.page_mask = page_mask;
+ obj->mm->page_mask = page_mask;
return obj;
}
@@ -184,9 +187,9 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
const u64 max_len = rounddown_pow_of_two(UINT_MAX);
- struct sg_table *st;
- struct scatterlist *sg;
unsigned int sg_page_sizes;
+ struct scatterlist *sg;
+ struct sg_table *st;
u64 rem;
st = kmalloc(sizeof(*st), GFP);
@@ -238,9 +241,9 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
unsigned int page_size;
+ struct scatterlist *sg;
+ struct sg_table *st;
st = kmalloc(sizeof(*st), GFP);
if (!st)
@@ -278,8 +281,10 @@ static void fake_free_huge_pages(struct drm_i915_gem_object *obj,
static void fake_put_huge_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
fake_free_huge_pages(obj, pages);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -309,18 +314,14 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(single ? &fake_ops_single : &fake_ops,
+ &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- if (single)
- i915_gem_object_init(obj, &fake_ops_single, &lock_class);
- else
- i915_gem_object_init(obj, &fake_ops, &lock_class);
-
- i915_gem_object_set_volatile(obj);
+ i915_mm_pages_set_volatile(obj->mm);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
@@ -334,6 +335,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj;
+ struct i915_mm_pages *mm = obj->mm;
int err;
/* We have to wait for the async bind to complete before our asserts */
@@ -353,21 +355,21 @@ static int igt_check_page_sizes(struct i915_vma *vma)
err = -EINVAL;
}
- if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
+ if (vma->page_sizes.phys != mm->page_sizes.phys) {
pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n",
- vma->page_sizes.phys, obj->mm.page_sizes.phys);
+ vma->page_sizes.phys, mm->page_sizes.phys);
err = -EINVAL;
}
- if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
+ if (vma->page_sizes.sg != mm->page_sizes.sg) {
pr_err("vma->page_sizes.sg(%u) != obj->mm.page_sizes.sg(%u)\n",
- vma->page_sizes.sg, obj->mm.page_sizes.sg);
+ vma->page_sizes.sg, mm->page_sizes.sg);
err = -EINVAL;
}
- if (obj->mm.page_sizes.gtt) {
+ if (mm->page_sizes.gtt) {
pr_err("obj->page_sizes.gtt(%u) should never be set\n",
- obj->mm.page_sizes.gtt);
+ mm->page_sizes.gtt);
err = -EINVAL;
}
@@ -455,7 +457,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
static int igt_mock_memory_region_huge_pages(void *arg)
{
- const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
+ const unsigned int flags[] = { 0, I915_MM_PAGES_CONTIGUOUS };
struct i915_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -561,6 +563,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
unsigned int size =
round_up(page_size, I915_GTT_PAGE_SIZE_2M) << 1;
struct i915_vma *vma;
+ struct i915_mm_pages *mm;
obj = fake_huge_pages_object(i915, size, true);
if (IS_ERR(obj))
@@ -573,12 +576,14 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
goto out_put;
}
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out_put;
+ }
/* Force the page size for this object */
- obj->mm.page_sizes.sg = page_size;
+ mm->page_sizes.sg = page_size;
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -649,7 +654,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
i915_vma_close(vma);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -699,6 +704,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
u64 size = page_num << PAGE_SHIFT;
struct i915_vma *vma;
unsigned int expected_gtt = 0;
+ struct i915_mm_pages *mm;
int i;
obj = fake_huge_pages_object(i915, size, single);
@@ -715,8 +721,8 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
break;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
i915_gem_object_put(obj);
break;
}
@@ -887,19 +893,23 @@ static int igt_mock_ppgtt_64K(void *arg)
unsigned int flags = PIN_USER;
for (single = 0; single <= 1; single++) {
+ struct i915_mm_pages *mm;
+
obj = fake_huge_pages_object(i915, size, !!single);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out_object_put;
+ }
/*
* Disable 2M pages -- We only want to use 64K/4K pages
* for this test.
*/
- obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
+ mm->page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -947,7 +957,7 @@ static int igt_mock_ppgtt_64K(void *arg)
i915_vma_unpin(vma);
i915_vma_close(vma);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -1019,6 +1029,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
+ struct i915_mm_pages *mm;
unsigned long n;
int err;
@@ -1028,15 +1039,15 @@ static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
u32 __iomem *base;
u32 read_val;
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+ base = i915_mm_pages_io_map_page_atomic(obj->mm, n);
read_val = ioread32(base + dword);
io_mapping_unmap_atomic(base);
@@ -1048,7 +1059,7 @@ static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
}
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return err;
}
@@ -1118,6 +1129,7 @@ static int __igt_write_huge(struct intel_context *ce,
static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
+ struct i915_mm_pages *mm = obj->mm;
struct i915_gem_engines *engines;
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -1135,7 +1147,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
size = obj->base.size;
- if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ if (mm->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
size = round_up(size, I915_GTT_PAGE_SIZE_2M);
n = 0;
@@ -1162,7 +1174,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
if (!order)
return -ENOMEM;
- max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
+ max_page_size = rounddown_pow_of_two(obj->mm->page_sizes.sg);
max = div_u64(max - size, max_page_size);
/*
@@ -1189,7 +1201,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
* boundary, however to improve coverage we opt for testing both
* aligned and unaligned offsets.
*/
- if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
+ if (mm->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
offset_low = round_down(offset_low,
I915_GTT_PAGE_SIZE_2M);
@@ -1254,6 +1266,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
*/
for (page_mask = 2; page_mask <= size_mask; page_mask++) {
unsigned int page_sizes = 0;
+ struct i915_mm_pages *mm;
for (i = 0; i < n; i++) {
if (page_mask & BIT(i))
@@ -1273,8 +1286,8 @@ static int igt_ppgtt_exhaust_huge(void *arg)
goto out_device;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
i915_gem_object_put(obj);
if (err == -ENOMEM) {
@@ -1291,7 +1304,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
}
/* Force the page-size for the gtt insertion */
- obj->mm.page_sizes.sg = page_sizes;
+ mm->page_sizes.sg = page_sizes;
err = igt_write_huge(ctx, obj);
if (err) {
@@ -1300,7 +1313,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
goto out_unpin;
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
}
@@ -1400,6 +1413,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
for (i = 0; i < ARRAY_SIZE(backends); ++i) {
u32 min = backends[i].min;
u32 max = backends[i].max;
+ struct i915_mm_pages *mm;
u32 size = max;
try_again:
size = igt_random_size(&prng, min, rounddown_pow_of_two(size));
@@ -1418,8 +1432,9 @@ static int igt_ppgtt_smoke_huge(void *arg)
return err;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
if (err == -ENXIO || err == -E2BIG) {
i915_gem_object_put(obj);
size >>= 1;
@@ -1428,7 +1443,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
goto out_put;
}
- if (obj->mm.page_sizes.phys < min) {
+ if (mm->page_sizes.phys < min) {
pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n",
__func__, size, i);
err = -ENOMEM;
@@ -1441,7 +1456,7 @@ static int igt_ppgtt_smoke_huge(void *arg)
__func__, size, i);
}
out_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
__i915_gem_object_put_pages(obj);
out_put:
i915_gem_object_put(obj);
@@ -1468,7 +1483,7 @@ static int igt_ppgtt_sanity_check(void *arg)
unsigned int flags;
} backends[] = {
{ igt_create_system, 0, },
- { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, },
+ { igt_create_local, I915_MM_PAGES_CONTIGUOUS, },
};
struct {
u32 size;
@@ -1502,6 +1517,7 @@ static int igt_ppgtt_sanity_check(void *arg)
struct drm_i915_gem_object *obj;
u32 size = combos[j].size;
u32 pages = combos[j].pages;
+ struct i915_mm_pages *mm;
obj = backends[i].fn(i915, size, backends[i].flags);
if (IS_ERR(obj)) {
@@ -1515,8 +1531,9 @@ static int igt_ppgtt_sanity_check(void *arg)
return err;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
goto out;
}
@@ -1525,11 +1542,11 @@ static int igt_ppgtt_sanity_check(void *arg)
pages = pages & supported;
if (pages)
- obj->mm.page_sizes.sg = pages;
+ mm->page_sizes.sg = pages;
err = igt_write_huge(ctx, obj);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
__i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
@@ -1789,7 +1806,7 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_close;
- if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
+ if (obj->mm->page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
pr_info("failed to allocate THP, finishing test early\n");
goto out_unpin;
}
@@ -1824,7 +1841,7 @@ static int igt_shrink_thp(void *arg)
goto out_close;
}
- if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
+ if (obj->mm->page_sizes.sg || obj->mm->page_sizes.phys) {
pr_err("residual page-size bits left\n");
err = -EINVAL;
goto out_close;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index b972be165e85..4718546e0342 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -66,8 +66,8 @@ static int __igt_client_fill(struct intel_engine_cs *engine)
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
obj->cache_dirty = true;
- err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
- &obj->mm.page_sizes,
+ err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm->pages,
+ &obj->mm->page_sizes,
val);
if (err)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 2a52b92586b9..28ef63332bf3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -163,6 +163,7 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct i915_mm_pages *mm;
void *ptr;
int err;
@@ -190,14 +191,15 @@ static int igt_dmabuf_import_ownership(void *arg)
dma_buf_put(dmabuf);
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
goto out_obj;
}
err = 0;
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out_obj:
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 591435c5f368..cf207852ada0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -300,6 +300,7 @@ static int igt_partial_tiling(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
+ struct i915_mm_pages *mm;
int tiling;
int err;
@@ -320,8 +321,9 @@ static int igt_partial_tiling(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
@@ -419,7 +421,7 @@ next_tiling: ;
out_unlock:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out:
i915_gem_object_put(obj);
return err;
@@ -430,11 +432,12 @@ static int igt_smoke_tiling(void *arg)
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
intel_wakeref_t wakeref;
I915_RND_STATE(prng);
unsigned long count;
IGT_TIMEOUT(end);
- int err;
+ int err = 0;
if (!i915_ggtt_has_aperture(&i915->ggtt))
return 0;
@@ -457,8 +460,9 @@ static int igt_smoke_tiling(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
@@ -513,7 +517,7 @@ static int igt_smoke_tiling(void *arg)
pr_info("%s: Completed %lu trials\n", __func__, count);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out:
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index 2b6db6f799de..329e6506a727 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -36,8 +36,9 @@ static int igt_gem_huge(void *arg)
const unsigned int nreal = 509; /* just to be awkward */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
unsigned int n;
- int err;
+ int err = 0;
/* Basic sanitycheck of our huge fake object allocation */
@@ -47,8 +48,9 @@ static int igt_gem_huge(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
@@ -65,7 +67,7 @@ static int igt_gem_huge(void *arg)
}
out_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out:
i915_gem_object_put(obj);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
index 34932871b3a5..b3e573ca77f9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
@@ -37,7 +37,7 @@ static int mock_phys_object(void *arg)
goto out_obj;
}
- if (!atomic_read(&obj->mm.pages_pin_count)) {
+ if (!atomic_read(&obj->mm->pin_count)) {
pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
err = -EINVAL;
goto out_obj;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index b1e346d2d35f..55a226ed13dc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -123,7 +123,7 @@ static int __context_pin_state(struct i915_vma *vma)
* it cannot reclaim the object until we release it.
*/
i915_vma_make_unshrinkable(vma);
- vma->obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(vma->obj->mm);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
index 397186818305..5c1af6a3e0f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -44,16 +44,16 @@ static int pool_active(struct i915_active *ref)
struct intel_engine_pool_node *node =
container_of(ref, typeof(*node), active);
struct dma_resv *resv = node->obj->base.resv;
- int err;
+ struct i915_mm_pages *mm;
if (dma_resv_trylock(resv)) {
dma_resv_add_excl_fence(resv, NULL);
dma_resv_unlock(resv);
}
- err = i915_gem_object_pin_pages(node->obj);
- if (err)
- return err;
+ mm = i915_gem_object_pin_pages(node->obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
/* Hide this pinned object from the shrinker until retired */
i915_gem_object_make_unshrinkable(node->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index b14d69d60e00..80738a01480b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -510,7 +510,7 @@ static struct page *status_page(struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- return sg_page(obj->mm.pages->sgl);
+ return sg_page(obj->mm->pages->sgl);
}
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index e2d78cc22fb4..5ca656a1b079 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -23,7 +23,7 @@ static struct page *hwsp_page(struct intel_timeline *tl)
struct drm_i915_gem_object *obj = tl->hwsp_ggtt->obj;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- return sg_page(obj->mm.pages->sgl);
+ return sg_page(obj->mm->pages->sgl);
}
static unsigned long hwsp_cacheline(struct intel_timeline *tl)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index b6aedee46f9e..5d9aa29a0f39 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -408,7 +408,7 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
struct i915_vma dummy = {
.node.start = uc_fw_ggtt_offset(uc_fw),
.node.size = obj->base.size,
- .pages = obj->mm.pages,
+ .pages = obj->mm->pages,
.vm = &ggtt->vm,
};
@@ -532,7 +532,7 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
if (!intel_uc_fw_is_available(uc_fw))
return -ENOEXEC;
- err = i915_gem_object_pin_pages(uc_fw->obj);
+ err = PTR_ERR_OR_ZERO(i915_gem_object_pin_pages(uc_fw->obj));
if (err) {
DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
intel_uc_fw_type_repr(uc_fw->type), err);
@@ -577,7 +577,7 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
*/
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
- struct sg_table *pages = uc_fw->obj->mm.pages;
+ struct sg_table *pages = uc_fw->obj->mm->pages;
u32 size = min_t(u32, uc_fw->rsa_size, max_len);
u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index e451298d11c3..64978991a1a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -156,13 +156,12 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc();
- if (obj == NULL)
+ obj = i915_gem_object_alloc(&intel_vgpu_gem_ops, &lock_class);
+ if (!obj)
return NULL;
drm_gem_private_object_init(dev, &obj->base,
- roundup(info->size, PAGE_SIZE));
- i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
+ round_up(info->size, PAGE_SIZE));
obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a0e437aa65b7..38ba7a3d4656 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1129,18 +1129,18 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
struct drm_i915_gem_object *src_obj,
u32 offset, u32 length)
{
+ struct i915_mm_pages *mm;
bool needs_clflush;
void *dst, *src;
- int ret;
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
if (IS_ERR(dst))
return dst;
- ret = i915_gem_object_pin_pages(src_obj);
- if (ret) {
+ mm = i915_gem_object_pin_pages(src_obj);
+ if (IS_ERR(mm)) {
i915_gem_object_unpin_map(dst_obj);
- return ERR_PTR(ret);
+ return ERR_CAST(mm);
}
needs_clflush =
@@ -1189,7 +1189,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
- i915_gem_object_unpin_pages(src_obj);
+ i915_mm_pages_unpin(mm);
/* dst_obj is returned with vmap pinned */
return dst;
@@ -1505,7 +1505,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
if (ret) {
/* Batch unsafe to execute with privileges, cancel! */
- cmd = page_mask_bits(shadow->obj->mm.mapping);
+ cmd = page_mask_bits(shadow->obj->mm->mapping);
*cmd = MI_BATCH_BUFFER_END;
/* If batch is unsafe but valid, jump to the original */
@@ -1530,7 +1530,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
+ void *ptr = page_mask_bits(shadow->obj->mm->mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d28468eaed57..c3bb5f2695e8 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -94,7 +94,7 @@ static char get_global_flag(struct drm_i915_gem_object *obj)
static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
{
- return obj->mm.mapping ? 'M' : ' ';
+ return obj->mm->mapping ? 'M' : ' ';
}
static const char *
@@ -144,8 +144,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->read_domains,
obj->write_domain,
i915_cache_level_str(dev_priv, obj->cache_level),
- obj->mm.dirty ? " dirty" : "",
- obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
+ i915_mm_pages_is_dirty(obj->mm) ? " dirty" : "",
+ obj->mm->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5eeef1ef7448..d59ef01547f7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -490,6 +490,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pread *args = data;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
int ret;
if (args->size == 0)
@@ -517,15 +518,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto out;
+ }
ret = i915_gem_shmem_pread(obj, args);
if (ret == -EFAULT || ret == -ENODEV)
ret = i915_gem_gtt_pread(obj, args);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out:
i915_gem_object_put(obj);
return ret;
@@ -781,6 +784,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
int ret;
if (args->size == 0)
@@ -820,9 +824,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
goto err;
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ ret = PTR_ERR(mm);
goto err;
+ }
ret = -EFAULT;
/* We can only do the GTT pwrite on untiled buffers, as otherwise
@@ -846,7 +852,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_shmem_pwrite(obj, args);
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
err:
i915_gem_object_put(obj);
return ret;
@@ -1015,6 +1021,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_madvise *args = data;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
int err;
switch (args->madv) {
@@ -1029,29 +1036,31 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (!obj)
return -ENOENT;
- err = mutex_lock_interruptible(&obj->mm.lock);
+ mm = obj->mm;
+
+ err = mutex_lock_interruptible(&mm->lock);
if (err)
goto out;
- if (i915_gem_object_has_pages(obj) &&
+ if (mm->pages &&
i915_gem_object_is_tiled(obj) &&
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
- if (obj->mm.madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(!obj->mm.quirked);
- __i915_gem_object_unpin_pages(obj);
- obj->mm.quirked = false;
+ if (mm->madv == I915_MADV_WILLNEED) {
+ GEM_BUG_ON(!i915_mm_pages_is_quirked(mm));
+ i915_mm_pages_unpin(mm);
+ i915_mm_pages_clear_quirk(mm);
}
if (args->madv == I915_MADV_WILLNEED) {
- GEM_BUG_ON(obj->mm.quirked);
- __i915_gem_object_pin_pages(obj);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_mm_pages_is_quirked(mm));
+ i915_mm_pages_pin(mm);
+ i915_mm_pages_set_quirk(mm);
}
}
- if (obj->mm.madv != __I915_MADV_PURGED)
- obj->mm.madv = args->madv;
+ if (mm->madv != __I915_MADV_PURGED)
+ mm->madv = args->madv;
- if (i915_gem_object_has_pages(obj)) {
+ if (mm->pages) {
struct list_head *list;
if (i915_gem_object_is_shrinkable(obj)) {
@@ -1059,23 +1068,22 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
spin_lock_irqsave(&i915->mm.obj_lock, flags);
- if (obj->mm.madv != I915_MADV_WILLNEED)
+ if (obj->mm->madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
else
list = &i915->mm.shrink_list;
- list_move_tail(&obj->mm.link, list);
+ list_move_tail(&obj->mm_link, list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
}
/* if the object is no longer attached, discard its backing storage */
- if (obj->mm.madv == I915_MADV_DONTNEED &&
- !i915_gem_object_has_pages(obj))
+ if (mm->madv == I915_MADV_DONTNEED && !mm->pages)
i915_gem_object_truncate(obj);
- args->retained = obj->mm.madv != __I915_MADV_PURGED;
- mutex_unlock(&obj->mm.lock);
+ args->retained = mm->madv != __I915_MADV_PURGED;
+ mutex_unlock(&mm->lock);
out:
i915_gem_object_put(obj);
@@ -1507,7 +1515,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
i915_gem_shrink(i915, -1UL, NULL, ~0);
i915_gem_drain_freed_objects(i915);
- list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
+ list_for_each_entry(obj, &i915->mm.shrink_list, mm_link) {
i915_gem_object_lock(obj);
WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index 71efccfde122..981c30f7410f 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -777,17 +777,18 @@ void
i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
struct sgt_iter sgt_iter;
struct page *page;
int i;
- if (obj->bit_17 == NULL)
+ if (!mm->bit_17)
return;
i = 0;
for_each_sgt_page(page, sgt_iter, pages) {
char new_bit_17 = page_to_phys(page) >> 17;
- if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
+ if ((new_bit_17 & 0x1) != (test_bit(i, mm->bit_17) != 0)) {
i915_gem_swizzle_page(page);
set_page_dirty(page);
}
@@ -809,13 +810,14 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
+ struct i915_mm_pages *mm = obj->mm;
struct sgt_iter sgt_iter;
struct page *page;
int i;
- if (obj->bit_17 == NULL) {
- obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
- if (obj->bit_17 == NULL) {
+ if (!mm->bit_17) {
+ mm->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
+ if (!mm->bit_17) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
@@ -823,12 +825,11 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
}
i = 0;
-
for_each_sgt_page(page, sgt_iter, pages) {
- if (page_to_phys(page) & (1 << 17))
- __set_bit(i, obj->bit_17);
+ if (page_to_phys(page) & BIT(17))
+ __set_bit(i, mm->bit_17);
else
- __clear_bit(i, obj->bit_17);
+ __clear_bit(i, mm->bit_17);
i++;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d9a2f58a620a..3cab8710b620 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -199,9 +199,9 @@ static int ppgtt_set_pages(struct i915_vma *vma)
{
GEM_BUG_ON(vma->pages);
- vma->pages = vma->obj->mm.pages;
+ vma->pages = vma->obj->mm->pages;
- vma->page_sizes = vma->obj->mm.page_sizes;
+ vma->page_sizes = vma->obj->mm->page_sizes;
return 0;
}
@@ -210,7 +210,7 @@ static void clear_pages(struct i915_vma *vma)
{
GEM_BUG_ON(!vma->pages);
- if (vma->pages != vma->obj->mm.pages) {
+ if (vma->pages != vma->obj->mm->pages) {
sg_free_table(vma->pages);
kfree(vma->pages);
}
@@ -2175,7 +2175,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
* try again - if there are no more pages to remove from
* the DMA remapper, i915_gem_shrink will return 0.
*/
- GEM_BUG_ON(obj->mm.pages == pages);
+ GEM_BUG_ON(obj->mm->pages == pages);
} while (i915_gem_shrink(to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
@@ -2576,7 +2576,7 @@ static int ggtt_set_pages(struct i915_vma *vma)
if (ret)
return ret;
- vma->page_sizes = vma->obj->mm.page_sizes;
+ vma->page_sizes = vma->obj->mm->page_sizes;
return 0;
}
@@ -3602,7 +3602,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
GEM_BUG_ON(vma->ggtt_view.type);
/* fall through */
case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
+ vma->pages = vma->obj->mm->pages;
return 0;
case I915_GGTT_VIEW_ROTATED:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8374d50c0770..c2daa7195d33 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1024,7 +1024,7 @@ i915_error_object_create(struct drm_i915_private *i915,
break;
}
} else if (i915_gem_object_is_lmem(vma->obj)) {
- struct intel_memory_region *mem = vma->obj->mm.region;
+ struct intel_memory_region *mem = vma->obj->mm->region;
dma_addr_t dma;
for_each_sgt_daddr(dma, iter, vma->pages) {
@@ -1397,7 +1397,7 @@ capture_object(struct drm_i915_private *dev_priv,
struct i915_vma fake = {
.node = { .start = U64_MAX, .size = obj->base.size },
.size = obj->base.size,
- .pages = obj->mm.pages,
+ .pages = obj->mm->pages,
.obj = obj,
};
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 6794c742fbbf..570a62c76d70 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -580,7 +580,7 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj)
* assume that no else is pinning the pages, but as a rough assertion
* that we will not run into problems later, this will do!)
*/
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
+ GEM_BUG_ON(atomic_read(&obj->mm->pin_count) < atomic_read(&obj->bind_count));
}
/**
@@ -778,6 +778,7 @@ static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
static int vma_get_pages(struct i915_vma *vma)
{
+ struct i915_mm_pages *mm = NULL;
int err = 0;
if (atomic_add_unless(&vma->pages_count, 1, 0))
@@ -789,15 +790,17 @@ static int vma_get_pages(struct i915_vma *vma)
if (!atomic_read(&vma->pages_count)) {
if (vma->obj) {
- err = i915_gem_object_pin_pages(vma->obj);
- if (err)
+ mm = i915_gem_object_pin_pages(vma->obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto unlock;
+ }
}
err = vma->ops->set_pages(vma);
if (err) {
- if (vma->obj)
- i915_gem_object_unpin_pages(vma->obj);
+ if (mm)
+ i915_mm_pages_unpin(mm);
goto unlock;
}
}
@@ -1154,7 +1157,7 @@ int i915_vma_move_to_active(struct i915_vma *vma,
obj->write_domain = 0;
}
obj->read_domains |= I915_GEM_GPU_DOMAINS;
- obj->mm.dirty = true;
+ i915_mm_pages_set_dirty(obj->mm);
GEM_BUG_ON(!i915_vma_is_active(vma));
return 0;
diff --git a/drivers/gpu/drm/i915/mm/Makefile b/drivers/gpu/drm/i915/mm/Makefile
new file mode 100644
index 000000000000..7e73aa587967
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/Makefile
@@ -0,0 +1,5 @@
+# For building individual subdir files on the command line
+subdir-ccflags-y += -I$(srctree)/$(src)/..
+
+# Extra header tests
+header-test-pattern-$(CONFIG_DRM_I915_WERROR) := *.h
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_iomap.c b/drivers/gpu/drm/i915/mm/i915_mm_iomap.c
new file mode 100644
index 000000000000..e7f2c70b4066
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_iomap.c
@@ -0,0 +1,43 @@
+#include "i915_mm_iomap.h"
+#include "i915_mm_pages.h"
+#include "i915_mm_sg.h"
+#include "intel_memory_region.h"
+
+/* XXX: Time to vfunc your life up? */
+void __iomem *
+i915_mm_pages_io_map_page(struct i915_mm_pages *mm, unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_mm_pages_get_dma_address(mm, n);
+ offset -= mm->region->region.start;
+
+ return io_mapping_map_wc(&mm->region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *
+i915_mm_pages_io_map_page_atomic(struct i915_mm_pages *mm,
+ unsigned long n)
+{
+ resource_size_t offset;
+
+ offset = i915_mm_pages_get_dma_address(mm, n);
+ offset -= mm->region->region.start;
+
+ return io_mapping_map_atomic_wc(&mm->region->iomap, offset);
+}
+
+void __iomem *
+i915_mm_pages_io_map(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned long size)
+{
+ resource_size_t offset;
+
+ GEM_BUG_ON(!i915_mm_pages_is_contiguous(mm));
+
+ offset = i915_mm_pages_get_dma_address(mm, n);
+ offset -= mm->region->region.start;
+
+ return io_mapping_map_wc(&mm->region->iomap, offset, size);
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_iomap.h b/drivers/gpu/drm/i915/mm/i915_mm_iomap.h
new file mode 100644
index 000000000000..07af72625451
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_iomap.h
@@ -0,0 +1,15 @@
+#include <linux/types.h>
+
+struct i915_mm_pages;
+
+void __iomem *
+i915_mm_pages_io_map_page(struct i915_mm_pages *mm, unsigned long n);
+
+void __iomem *
+i915_mm_pages_io_map_page_atomic(struct i915_mm_pages *mm,
+ unsigned long n);
+
+void __iomem *
+i915_mm_pages_io_map(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned long size);
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages.c b/drivers/gpu/drm/i915/mm/i915_mm_pages.c
new file mode 100644
index 000000000000..123365d6afda
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages.c
@@ -0,0 +1,53 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#include <linux/bitmap.h>
+#include <linux/slab.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_mm_pages.h"
+
+struct i915_mm_pages *i915_mm_pages_create(struct lock_class_key *key)
+{
+ struct i915_mm_pages *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (!mm)
+ return NULL;
+
+ kref_init(&mm->kref);
+ mutex_init(&mm->lock);
+ __mutex_init(&mm->lock, "mm.lock", key);
+
+ INIT_RADIX_TREE(&mm->get_page.radix, GFP_KERNEL | __GFP_NOWARN);
+ mutex_init(&mm->get_page.lock);
+
+ return mm;
+}
+
+void __i915_mm_pages_reset_page_iter(struct i915_mm_pages *mm)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ rcu_read_lock();
+ radix_tree_for_each_slot(slot, &mm->get_page.radix, &iter, 0)
+ radix_tree_delete(&mm->get_page.radix, iter.index);
+ rcu_read_unlock();
+}
+
+void i915_mm_pages_free(struct kref *kref)
+{
+ struct i915_mm_pages *mm = container_of(kref, typeof(*mm), kref);
+
+ bitmap_free(mm->bit_17);
+
+ mutex_destroy(&mm->get_page.lock);
+ mutex_destroy(&mm->lock);
+
+ kfree(mm);
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages.h b/drivers/gpu/drm/i915/mm/i915_mm_pages.h
new file mode 100644
index 000000000000..9d70613966e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages.h
@@ -0,0 +1,103 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef __I915_MM_PAGES_H__
+#define __I915_MM_PAGES_H__
+
+#include "i915_gem.h"
+#include "i915_mm_pages_types.h"
+
+struct i915_mm_pages *i915_mm_pages_create(struct lock_class_key *key);
+void i915_mm_pages_free(struct kref *kref);
+
+void __i915_mm_pages_reset_page_iter(struct i915_mm_pages *mm);
+
+static inline struct i915_mm_pages *i915_mm_pages_get(struct i915_mm_pages *p)
+{
+ kref_get(&p->kref);
+ return p;
+}
+
+static inline void i915_mm_pages_put(struct i915_mm_pages *p)
+{
+ kref_put(&p->kref, i915_mm_pages_free);
+}
+
+static inline void
+i915_mm_pages_pin(struct i915_mm_pages *p)
+{
+ atomic_inc(&p->pin_count);
+ GEM_BUG_ON(!atomic_read(&p->pin_count));
+}
+
+static inline bool
+i915_mm_pages_is_pinned(struct i915_mm_pages *p)
+{
+ return atomic_read(&p->pin_count);
+}
+
+static inline void
+i915_mm_pages_unpin(struct i915_mm_pages *p)
+{
+ GEM_BUG_ON(!atomic_read(&p->pin_count));
+ atomic_dec(&p->pin_count);
+}
+
+static inline bool
+i915_mm_pages_is_contiguous(const struct i915_mm_pages *mm)
+{
+ return mm->flags & I915_MM_PAGES_CONTIGUOUS;
+}
+
+static inline bool
+i915_mm_pages_is_volatile(const struct i915_mm_pages *mm)
+{
+ return mm->flags & I915_MM_PAGES_VOLATILE;
+}
+
+static inline void
+i915_mm_pages_set_volatile(struct i915_mm_pages *mm)
+{
+ mm->flags |= I915_MM_PAGES_VOLATILE;
+}
+
+static inline bool
+i915_mm_pages_is_dirty(const struct i915_mm_pages *mm)
+{
+ return mm->flags & BIT(I915_MM_PAGES_DIRTY);
+}
+
+static inline void
+i915_mm_pages_set_dirty(struct i915_mm_pages *mm)
+{
+ set_bit(I915_MM_PAGES_DIRTY, &mm->flags);
+}
+
+static inline void
+i915_mm_pages_clear_dirty(struct i915_mm_pages *mm)
+{
+ set_bit(I915_MM_PAGES_DIRTY, &mm->flags);
+}
+
+static inline bool
+i915_mm_pages_is_quirked(const struct i915_mm_pages *mm)
+{
+ return mm->flags & BIT(I915_MM_PAGES_QUIRK);
+}
+
+static inline void
+i915_mm_pages_set_quirk(struct i915_mm_pages *mm)
+{
+ set_bit(I915_MM_PAGES_QUIRK, &mm->flags);
+}
+
+static inline void
+i915_mm_pages_clear_quirk(struct i915_mm_pages *mm)
+{
+ set_bit(I915_MM_PAGES_QUIRK, &mm->flags);
+}
+
+#endif /* __I915_MM_PAGES_H__ */
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h b/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h
new file mode 100644
index 000000000000..c79613d6fa4b
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_pages_types.h
@@ -0,0 +1,107 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016-2019 Intel Corporation
+ */
+
+#ifndef __I915_MM_PAGES_TYPES_H__
+#define __I915_MM_PAGES_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/scatterlist.h>
+
+#include "i915_selftest.h"
+
+struct i915_mm_pages {
+ struct kref kref;
+
+ struct mutex lock; /* protects the pages and their use */
+ atomic_t pin_count;
+ atomic_t shrink_pin;
+
+ /**
+ * Memory region for this object.
+ */
+ struct intel_memory_region *region;
+ /**
+ * List of memory region blocks allocated for this object.
+ */
+ struct list_head blocks;
+ /**
+ * Element within memory_region->objects or region->purgeable
+ * if the object is marked as DONTNEED. Access is protected by
+ * region->obj_lock.
+ */
+ struct list_head region_link;
+
+ struct sg_table *pages;
+ void *mapping;
+
+ /* TODO: whack some of this into the error state */
+ struct i915_page_sizes {
+ /**
+ * The sg mask of the pages sg_table. i.e the mask of
+ * of the lengths for each sg entry.
+ */
+ unsigned int phys;
+
+ /**
+ * The gtt page sizes we are allowed to use given the
+ * sg mask and the supported page sizes. This will
+ * express the smallest unit we can use for the whole
+ * object, as well as the larger sizes we may be able
+ * to use opportunistically.
+ */
+ unsigned int sg;
+
+ /**
+ * The actual gtt page size usage. Since we can have
+ * multiple vma associated with this object we need to
+ * prevent any trampling of state, hence a copy of this
+ * struct also lives in each vma, therefore the gtt
+ * value here should only be read/write through the vma.
+ */
+ unsigned int gtt;
+ } page_sizes;
+
+ I915_SELFTEST_DECLARE(unsigned int page_mask);
+
+ struct i915_page_iter {
+ struct scatterlist *sg_pos;
+ unsigned int sg_idx; /* in pages, but 32bit eek! */
+
+ struct radix_tree_root radix;
+ struct mutex lock; /* protects this cache */
+ } get_page;
+
+ unsigned long flags;
+#define I915_MM_PAGES_CONTIGUOUS BIT(0)
+#define I915_MM_PAGES_VOLATILE BIT(1)
+#define I915_MM_PAGES_ALLOC (I915_MM_PAGES_CONTIGUOUS | I915_MM_PAGES_VOLATILE)
+#define I915_MM_PAGES_DIRTY 2
+#define I915_MM_PAGES_QUIRK 3
+
+ /**
+ * Advice: are the backing pages purgeable?
+ */
+ unsigned int madv:2;
+
+ /** Record of address bit 17 of each page at last unbind. */
+ unsigned long *bit_17;
+};
+
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
+ I915_MM_NORMAL = 0,
+ /*
+ * Used for obj->mm.lock when allocating pages. Safe because the object
+ * isn't yet on any LRU, and therefore the shrinker can't deadlock on
+ * it. As soon as the object has pages, obj->mm.lock nests within
+ * fs_reclaim.
+ */
+ I915_MM_GET_PAGES = 1,
+};
+
+#endif /* __I915_MM_PAGES_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_region.c b/drivers/gpu/drm/i915/mm/i915_mm_region.c
new file mode 100644
index 000000000000..63ccea982eaa
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_region.c
@@ -0,0 +1,32 @@
+#include "i915_mm_pages.h"
+#include "i915_mm_region.h"
+#include "intel_memory_region.h"
+
+void i915_mm_pages_init_memory_region(struct i915_mm_pages *mm,
+ struct intel_memory_region *mem,
+ unsigned long flags)
+{
+ INIT_LIST_HEAD(&mm->blocks);
+ mm->region = intel_memory_region_get(mem);
+ mm->flags |= flags;
+
+ mutex_lock(&mem->objects.lock);
+
+ if (i915_mm_pages_is_volatile(mm))
+ list_add(&mm->region_link, &mem->objects.purgeable);
+ else
+ list_add(&mm->region_link, &mem->objects.list);
+
+ mutex_unlock(&mem->objects.lock);
+}
+
+void i915_mm_pages_release_memory_region(struct i915_mm_pages *mm)
+{
+ struct intel_memory_region *mem = mm->region;
+
+ mutex_lock(&mem->objects.lock);
+ list_del(&mm->region_link);
+ mutex_unlock(&mem->objects.lock);
+
+ intel_memory_region_put(mem);
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_region.h b/drivers/gpu/drm/i915/mm/i915_mm_region.h
new file mode 100644
index 000000000000..ab5ea2b1550e
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_region.h
@@ -0,0 +1,9 @@
+#include <linux/types.h>
+
+struct i915_mm_pages;
+struct intel_memory_region;
+
+void i915_mm_pages_init_memory_region(struct i915_mm_pages *mm,
+ struct intel_memory_region *mem,
+ unsigned long flags);
+void i915_mm_pages_release_memory_region(struct i915_mm_pages *mm);
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_sg.c b/drivers/gpu/drm/i915/mm/i915_mm_sg.c
new file mode 100644
index 000000000000..66d0f76f1ec8
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_sg.c
@@ -0,0 +1,161 @@
+#include <linux/scatterlist.h>
+
+#include "i915_mm_pages.h"
+#include "i915_mm_sg.h"
+#include "i915_scatterlist.h"
+
+struct scatterlist *
+i915_mm_pages_get_sg(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned int *offset)
+{
+ struct i915_page_iter *iter = &mm->get_page;
+ struct scatterlist *sg;
+ unsigned int idx, count;
+
+ might_sleep();
+
+ /* As we iterate forward through the sg, we record each entry in a
+ * radixtree for quick repeated (backwards) lookups. If we have seen
+ * this index previously, we will have an entry for it.
+ *
+ * Initial lookup is O(N), but this is amortized to O(1) for
+ * sequential page access (where each new request is consecutive
+ * to the previous one). Repeated lookups are O(lg(obj->base.size)),
+ * i.e. O(1) with a large constant!
+ */
+ if (n < READ_ONCE(iter->sg_idx))
+ goto lookup;
+
+ mutex_lock(&iter->lock);
+
+ /* We prefer to reuse the last sg so that repeated lookup of this
+ * (or the subsequent) sg are fast - comparing against the last
+ * sg is faster than going through the radixtree.
+ */
+
+ sg = iter->sg_pos;
+ idx = iter->sg_idx;
+ count = __sg_page_count(sg);
+
+ while (idx + count <= n) {
+ void *entry;
+ unsigned long i;
+ int ret;
+
+ /* If we cannot allocate and insert this entry, or the
+ * individual pages from this range, cancel updating the
+ * sg_idx so that on this lookup we are forced to linearly
+ * scan onwards, but on future lookups we will try the
+ * insertion again (in which case we need to be careful of
+ * the error return reporting that we have already inserted
+ * this index).
+ */
+ ret = radix_tree_insert(&iter->radix, idx, sg);
+ if (ret && ret != -EEXIST)
+ goto scan;
+
+ entry = xa_mk_value(idx);
+ for (i = 1; i < count; i++) {
+ ret = radix_tree_insert(&iter->radix, idx + i, entry);
+ if (ret && ret != -EEXIST)
+ goto scan;
+ }
+
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+scan:
+ iter->sg_pos = sg;
+ iter->sg_idx = idx;
+
+ mutex_unlock(&iter->lock);
+
+ if (unlikely(n < idx)) /* insertion completed by another thread */
+ goto lookup;
+
+ /* In case we failed to insert the entry into the radixtree, we need
+ * to look beyond the current sg.
+ */
+ while (idx + count <= n) {
+ idx += count;
+ sg = ____sg_next(sg);
+ count = __sg_page_count(sg);
+ }
+
+ *offset = n - idx;
+ return sg;
+
+lookup:
+ rcu_read_lock();
+
+ sg = radix_tree_lookup(&iter->radix, n);
+ GEM_BUG_ON(!sg);
+
+ /* If this index is in the middle of multi-page sg entry,
+ * the radix tree will contain a value entry that points
+ * to the start of that range. We will return the pointer to
+ * the base page and the offset of this page within the
+ * sg entry's range.
+ */
+ *offset = 0;
+ if (unlikely(xa_is_value(sg))) {
+ unsigned long base = xa_to_value(sg);
+
+ sg = radix_tree_lookup(&iter->radix, base);
+ GEM_BUG_ON(!sg);
+
+ *offset = n - base;
+ }
+
+ rcu_read_unlock();
+
+ return sg;
+}
+
+struct page *
+i915_mm_pages_get_page(struct i915_mm_pages *mm, unsigned long n)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_mm_pages_get_sg(mm, n, &offset);
+ return nth_page(sg_page(sg), offset);
+}
+
+/* Like i915_mm_pages_get_page(), but mark the returned page dirty */
+struct page *
+i915_mm_pages_get_dirty_page(struct i915_mm_pages *mm, unsigned long n)
+{
+ struct page *page;
+
+ page = i915_mm_pages_get_page(mm, n);
+ if (!i915_mm_pages_is_dirty(mm))
+ set_page_dirty(page);
+
+ return page;
+}
+
+dma_addr_t
+i915_mm_pages_get_dma_address_len(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned int *len)
+{
+ struct scatterlist *sg;
+ unsigned int offset;
+
+ sg = i915_mm_pages_get_sg(mm, n, &offset);
+
+ if (len)
+ *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
+
+ return sg_dma_address(sg) + (offset << PAGE_SHIFT);
+}
+
+dma_addr_t
+i915_mm_pages_get_dma_address(struct i915_mm_pages *mm, unsigned long n)
+{
+ return i915_mm_pages_get_dma_address_len(mm, n, NULL);
+}
diff --git a/drivers/gpu/drm/i915/mm/i915_mm_sg.h b/drivers/gpu/drm/i915/mm/i915_mm_sg.h
new file mode 100644
index 000000000000..af48e37e7a11
--- /dev/null
+++ b/drivers/gpu/drm/i915/mm/i915_mm_sg.h
@@ -0,0 +1,22 @@
+#include <linux/types.h>
+
+struct i915_mm_pages;
+
+struct scatterlist *
+i915_mm_pages_get_sg(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned int *offset);
+
+struct page *
+i915_mm_pages_get_page(struct i915_mm_pages *mm, unsigned long n);
+
+struct page *
+i915_mm_pages_get_dirty_page(struct i915_mm_pages *mm, unsigned long n);
+
+dma_addr_t
+i915_mm_pages_get_dma_address_len(struct i915_mm_pages *mm,
+ unsigned long n,
+ unsigned int *len);
+
+dma_addr_t
+i915_mm_pages_get_dma_address(struct i915_mm_pages *mm, unsigned long n);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 06ef88510209..2df904369d43 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -38,8 +38,8 @@ static void quirk_add(struct drm_i915_gem_object *obj,
struct list_head *objects)
{
/* quirk is only for live tiled objects, use it to declare ownership */
- GEM_BUG_ON(obj->mm.quirked);
- obj->mm.quirked = true;
+ GEM_BUG_ON(i915_mm_pages_is_quirked(obj->mm));
+ i915_mm_pages_set_quirk(obj->mm);
list_add(&obj->st_link, objects);
}
@@ -75,7 +75,7 @@ static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects)
bound = 0;
unbound = 0;
list_for_each_entry(obj, objects, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
+ GEM_BUG_ON(!i915_mm_pages_is_quirked(obj->mm));
if (atomic_read(&obj->bind_count))
bound++;
@@ -109,7 +109,7 @@ static void unpin_ggtt(struct i915_ggtt *ggtt)
struct i915_vma *vma;
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
- if (vma->obj->mm.quirked)
+ if (i915_mm_pages_is_quirked(vma->obj->mm))
i915_vma_unpin(vma);
}
@@ -118,8 +118,8 @@ static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list)
struct drm_i915_gem_object *obj, *on;
list_for_each_entry_safe(obj, on, list, st_link) {
- GEM_BUG_ON(!obj->mm.quirked);
- obj->mm.quirked = false;
+ GEM_BUG_ON(!i915_mm_pages_is_quirked(obj->mm));
+ i915_mm_pages_clear_quirk(obj->mm);
i915_gem_object_put(obj);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 80cde5bda922..19df05eeea1f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -91,8 +91,10 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
static void fake_put_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ struct i915_mm_pages *mm = obj->mm;
+
fake_free_pages(obj, pages);
- obj->mm.dirty = false;
+ i915_mm_pages_clear_dirty(mm);
}
static const struct drm_i915_gem_object_ops fake_ops = {
@@ -106,6 +108,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -113,24 +116,24 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&fake_ops, &lock_class);
if (!obj)
goto err;
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &fake_ops, &lock_class);
- i915_gem_object_set_volatile(obj);
+ i915_mm_pages_set_volatile(obj->mm);
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */
- if (i915_gem_object_pin_pages(obj))
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
goto err_obj;
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return obj;
err_obj:
@@ -224,6 +227,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
I915_RND_SUBSTATE(prng, seed_prng);
struct drm_i915_gem_object *obj;
unsigned int *order, count, n;
+ struct i915_mm_pages *mm;
u64 hole_size;
hole_size = (hole_end - hole_start) >> size;
@@ -264,7 +268,8 @@ static int lowlevel_hole(struct drm_i915_private *i915,
GEM_BUG_ON(obj->base.size != BIT_ULL(size));
- if (i915_gem_object_pin_pages(obj)) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
i915_gem_object_put(obj);
kfree(order);
break;
@@ -287,7 +292,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->allocate_va_range(vm, addr, BIT_ULL(size)))
break;
- mock_vma->pages = obj->mm.pages;
+ mock_vma->pages = obj->mm->pages;
mock_vma->node.size = BIT_ULL(size);
mock_vma->node.start = addr;
@@ -307,7 +312,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
vm->clear_range(vm, addr, BIT_ULL(size));
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
i915_gem_object_put(obj);
kfree(order);
@@ -1152,6 +1157,7 @@ static int igt_ggtt_page(void *arg)
intel_wakeref_t wakeref;
struct drm_mm_node tmp;
unsigned int *order, n;
+ struct i915_mm_pages *mm;
int err;
if (!i915_ggtt_has_aperture(ggtt))
@@ -1161,9 +1167,11 @@ static int igt_ggtt_page(void *arg)
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out_free;
+ }
memset(&tmp, 0, sizeof(tmp));
mutex_lock(&ggtt->vm.mutex);
@@ -1228,7 +1236,7 @@ static int igt_ggtt_page(void *arg)
drm_mm_remove_node(&tmp);
mutex_unlock(&ggtt->vm.mutex);
out_unpin:
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out_free:
i915_gem_object_put(obj);
return err;
@@ -1244,7 +1252,7 @@ static void track_vma_bind(struct i915_vma *vma)
GEM_BUG_ON(vma->pages);
atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
__i915_gem_object_pin_pages(obj);
- vma->pages = obj->mm.pages;
+ vma->pages = obj->mm->pages;
mutex_lock(&vma->vm->mutex);
list_add_tail(&vma->vm_link, &vma->vm->bound_list);
@@ -1321,6 +1329,7 @@ static int igt_gtt_reserve(void *arg)
for (total = 0;
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_mm_pages *mm;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1330,8 +1339,9 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
goto out;
}
@@ -1373,6 +1383,7 @@ static int igt_gtt_reserve(void *arg)
for (total = I915_GTT_PAGE_SIZE;
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_mm_pages *mm;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1382,8 +1393,9 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
goto out;
}
@@ -1537,6 +1549,7 @@ static int igt_gtt_insert(void *arg)
for (total = 0;
total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += I915_GTT_PAGE_SIZE) {
+ struct i915_mm_pages *mm;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1546,8 +1559,9 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
goto out;
}
@@ -1646,6 +1660,7 @@ static int igt_gtt_insert(void *arg)
for (total = 0;
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
+ struct i915_mm_pages *mm;
struct i915_vma *vma;
obj = i915_gem_object_create_internal(ggtt->vm.i915,
@@ -1655,8 +1670,9 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
goto out;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index 58b5f40a07dd..8a1a12db971e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -574,7 +574,7 @@ static int igt_vma_rotate_remap(void *arg)
goto out_object;
}
- if (vma->pages == obj->mm.pages) {
+ if (vma->pages == obj->mm->pages) {
pr_err("VMA using unrotated object pages!\n");
err = -EINVAL;
goto out_object;
@@ -674,7 +674,7 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
- if (vma->pages == vma->obj->mm.pages) {
+ if (vma->pages == vma->obj->mm->pages) {
pr_err("(%s) VMA using original object pages!\n",
name);
ok = false;
@@ -686,7 +686,7 @@ static bool assert_pin(struct i915_vma *vma,
ok = false;
}
- if (vma->pages != vma->obj->mm.pages) {
+ if (vma->pages != vma->obj->mm->pages) {
pr_err("VMA not using object pages!\n");
ok = false;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 04d0aa7b349e..a6fb8efb3817 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -19,6 +19,7 @@
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "mm/i915_mm_iomap.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
@@ -48,6 +49,7 @@ static int igt_mock_fill(void *arg)
resource_size_t total = resource_size(&mem->region);
resource_size_t page_size;
resource_size_t rem;
+ struct i915_mm_pages *mm;
unsigned long max_pages;
unsigned long page_num;
LIST_HEAD(objects);
@@ -67,8 +69,9 @@ static int igt_mock_fill(void *arg)
break;
}
- err = i915_gem_object_pin_pages(obj);
- if (err) {
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
i915_gem_object_put(obj);
break;
}
@@ -101,15 +104,18 @@ igt_object_create(struct intel_memory_region *mem,
unsigned int flags)
{
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
int err;
obj = i915_gem_object_create_region(mem, size, flags);
if (IS_ERR(obj))
return obj;
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto put;
+ }
list_add(&obj->st_link, objects);
return obj;
@@ -144,11 +150,11 @@ static int igt_mock_contiguous(void *arg)
/* Min size */
obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
- I915_BO_ALLOC_CONTIGUOUS);
+ I915_MM_PAGES_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
+ if (obj->mm->pages->nents != 1) {
pr_err("%s min object spans multiple sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
@@ -157,11 +163,11 @@ static int igt_mock_contiguous(void *arg)
igt_object_release(obj);
/* Max size */
- obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+ obj = igt_object_create(mem, &objects, total, I915_MM_PAGES_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
- if (obj->mm.pages->nents != 1) {
+ if (obj->mm->pages->nents != 1) {
pr_err("%s max object spans multiple sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
@@ -176,7 +182,7 @@ static int igt_mock_contiguous(void *arg)
target = max_t(u64, PAGE_SIZE, target);
obj = igt_object_create(mem, &objects, target,
- I915_BO_ALLOC_CONTIGUOUS);
+ I915_MM_PAGES_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -187,7 +193,7 @@ static int igt_mock_contiguous(void *arg)
goto err_close_objects;
}
- if (obj->mm.pages->nents != 1) {
+ if (obj->mm->pages->nents != 1) {
pr_err("%s object spans multiple sg entries\n", __func__);
err = -EINVAL;
goto err_close_objects;
@@ -212,7 +218,7 @@ static int igt_mock_contiguous(void *arg)
list = &objects;
obj = igt_object_create(mem, list, target,
- I915_BO_ALLOC_CONTIGUOUS);
+ I915_MM_PAGES_CONTIGUOUS);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto err_close_objects;
@@ -242,7 +248,7 @@ static int igt_mock_contiguous(void *arg)
bool should_fail = target > min;
obj = igt_object_create(mem, &objects, target,
- I915_BO_ALLOC_CONTIGUOUS);
+ I915_MM_PAGES_CONTIGUOUS);
if (should_fail != IS_ERR(obj)) {
pr_err("%s target allocation(%llx) mismatch\n",
__func__, target);
@@ -270,6 +276,7 @@ static int igt_gpu_write_dw(struct intel_context *ce,
static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
{
+ struct i915_mm_pages *mm;
unsigned long n;
int err;
@@ -279,15 +286,15 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (err)
return err;
- err = i915_gem_object_pin_pages(obj);
- if (err)
- return err;
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm))
+ return PTR_ERR(mm);
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
u32 __iomem *base;
u32 read_val;
- base = i915_gem_object_lmem_io_map_page_atomic(obj, n);
+ base = i915_mm_pages_io_map_page_atomic(obj->mm, n);
read_val = ioread32(base + dword);
io_mapping_unmap_atomic(base);
@@ -299,7 +306,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
}
}
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
return err;
}
@@ -382,17 +389,20 @@ static int igt_lmem_create(void *arg)
{
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ struct i915_mm_pages *mm;
int err = 0;
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
if (IS_ERR(obj))
return PTR_ERR(obj);
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out_put;
+ }
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_unpin(mm);
out_put:
i915_gem_object_put(obj);
@@ -404,8 +414,9 @@ static int igt_lmem_write_gpu(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
- struct file *file;
+ struct i915_mm_pages *mm;
I915_RND_STATE(prng);
+ struct file *file;
u32 sz;
int err;
@@ -427,15 +438,17 @@ static int igt_lmem_write_gpu(void *arg)
goto out_file;
}
- err = i915_gem_object_pin_pages(obj);
- if (err)
+ mm = i915_gem_object_pin_pages(obj);
+ if (IS_ERR(mm)) {
+ err = PTR_ERR(mm);
goto out_put;
+ }
err = igt_gpu_write(ctx, obj);
if (err)
pr_err("igt_gpu_write failed(%d)\n", err);
- i915_gem_object_unpin_pages(obj);
+ i915_mm_pages_put(mm);
out_put:
i915_gem_object_put(obj);
out_file:
@@ -495,7 +508,7 @@ static int igt_lmem_write_cpu(void *arg)
sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
sz = max_t(u32, 2 * PAGE_SIZE, sz);
- obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+ obj = i915_gem_object_create_lmem(i915, sz, I915_MM_PAGES_CONTIGUOUS);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index b2ad41c27e67..daab2082e909 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -5,9 +5,16 @@
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
+#include "mm/i915_mm_region.h"
#include "mock_region.h"
+static void
+i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
+{
+ i915_mm_pages_release_memory_region(obj->mm);
+}
+
static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
.get_pages = i915_gem_object_get_pages_buddy,
.put_pages = i915_gem_object_put_pages_buddy,
@@ -26,18 +33,17 @@ mock_object_create(struct intel_memory_region *mem,
if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size)
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc();
+ obj = i915_gem_object_alloc(&mock_region_obj_ops, &lock_class);
if (!obj)
return ERR_PTR(-ENOMEM);
drm_gem_private_object_init(&i915->drm, &obj->base, size);
- i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class);
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- i915_gem_object_init_memory_region(obj, mem, flags);
+ i915_mm_pages_init_memory_region(obj->mm, mem, flags);
return obj;
}
--
2.24.1
More information about the Intel-gfx-trybot
mailing list